diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6de78c5b113f8304841840022b72c4cc26c76451 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,9 @@ +# [Choice] Ubuntu version (use jammy or bionic on local arm64/Apple Silicon): jammy, focal, bionic +ARG VARIANT="bullseye" +FROM mcr.microsoft.com/vscode/devcontainers/base:0-${VARIANT} +# FROM ubuntu:20.04 +# Options for setup script +# ARG INSTALL_ZSH="true" +# ARG UPGRADE_PACKAGES="true" +ADD sources.list /etc/apt/ +RUN apt-get update && apt-get -y install tree vim tmux python3-pip gcc cmake build-essential git gdb \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000000000000000000000000000000..7b68e371c9aaa750baa87b8e2e7e5904b99b845a --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,34 @@ +{ + "name": "Ubuntu", + "build": { + "dockerfile": "Dockerfile", + // Update 'VARIANT' to pick an Ubuntu version: jammy / ubuntu-22.04, focal / ubuntu-20.04, bionic /ubuntu-18.04 + // Use ubuntu-22.04 or ubuntu-18.04 on local arm64/Apple Silicon. + "args": { "VARIANT": "ubuntu-20.04" } + }, + "runArgs": [ + "--cap-add=SYS_PTRACE", + "--security-opt", + "seccomp=unconfined" + ], + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "uname -a", + + // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "root", + "extensions": [ + "ms-vscode.cpptools", + "ms-vscode.cmake-tools", + "austin.code-gnu-global", + "visualstudioexptteam.vscodeintel", + "eamodio.gitlens", + "matepek.vscode-catch2-test-adapter", + "spmeesseman.vscode-taskexplorer", + "cschlosser.doxdocgen", + "urosvujosevic.explorer-manager" + ] + +} diff --git a/.devcontainer/sources.list b/.devcontainer/sources.list new file mode 100644 index 0000000000000000000000000000000000000000..c48fbd9ac3789453f0a19bfbb1cf3af0083c224b --- /dev/null +++ b/.devcontainer/sources.list @@ -0,0 +1,10 @@ +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal universe +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates universe +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal multiverse +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates multiverse +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security universe +deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security multiverse \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0eba25231f2a4961dd31f847458850a455788a58..d1f1dc4dedb31da5b41a1046d7a4419881746071 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ build/ cmake-build-debug/ cmake-build-release/ cscope.out +cscope.files +tags .DS_Store debug/ release/ diff --git a/Jenkinsfile2 b/Jenkinsfile2 index e03994b975181139ea62c62ee74357ac3863592b..6549f714a1cb13877b751a8e948e24691fbf45f0 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,7 +7,8 @@ def sync_source() { sh ''' hostname date - ''' + env + ''' sh ''' cd ${WKC} [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" @@ -57,6 +58,7 @@ def sync_source() { [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git clean -dfx + git log -5 ''' script { if (env.CHANGE_TARGET == 'master') { @@ -90,6 +92,7 @@ def sync_source() { cd ${WK} git pull >/dev/null git clean -dfx + git log -5 ''' script { if (env.CHANGE_URL =~ /\/TDengine\//) { @@ -98,16 +101,13 @@ def sync_source() { cd ${WKC} git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - - if [ ! -d src/connector/python/.github ]; then - rm -rf src/connector/python/* || : - rm -rf src/connector/python/.* || : - git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" - else - cd src/connector/python || echo "src/connector/python not exist" - git pull || : - cd ${WKC} - fi + git log -5 + ''' + sh ''' + cd ${WKC} + rm -rf src/connector/python + mkdir -p src/connector/python + git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" ''' } else if (env.CHANGE_URL =~ /\/TDinternal\//) { sh ''' @@ -115,16 +115,13 @@ def sync_source() { cd ${WK} git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - - if [ ! -d community/src/connector/python/.github ]; then - rm -rf community/src/connector/python/* || : - rm -rf community/src/connector/python/.* || : - git clone --depth 1 https://github.com/taosdata/taos-connector-python community/src/connector/python || echo "failed to clone python connector" - else - cd community/src/connector/python || echo "community/src/connector/python not exist" - git pull || : - cd ${WK} - fi + git log -5 + ''' + sh ''' + cd ${WKC} + rm -rf src/connector/python + mkdir -p src/connector/python + git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" ''' } else { sh ''' @@ -136,18 +133,8 @@ def sync_source() { cd ${WKC} git submodule update --init --recursive ''' - sh ''' - cd ${WKC} - git branch - git log -5 - ''' - sh ''' - cd ${WK} - git branch - git log -5 - ''' } -def pre_test() { +def pre_test_arm64() { sync_source() sh ''' cd ${WK} @@ -160,6 +147,14 @@ def pre_test() { ''' return 1 } +def pre_test() { + sync_source() + sh ''' + cd ${WKC}/tests/parallel_test + ./container_build.sh -w ${WKDIR} -t 8 >/dev/null + ''' + return 1 +} def pre_test_mac() { sync_source() sh ''' @@ -173,27 +168,164 @@ def pre_test_mac() { ''' return 1 } +def pre_test_win(){ + bat ''' + hostname + ipconfig + set + date /t + time /t + taskkill /f /t /im python.exe + taskkill /f /t /im bash.exe + taskkill /f /t /im taosd.exe + rd /s /Q %WIN_INTERNAL_ROOT%\\debug || echo "no debug folder" + echo "clean environment done" + exit 0 + ''' + bat ''' + cd %WIN_INTERNAL_ROOT% + git reset --hard + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git reset --hard + ''' + script { + if (env.CHANGE_TARGET == 'master') { + bat ''' + cd %WIN_INTERNAL_ROOT% + git checkout master + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git checkout master + ''' + } else if (env.CHANGE_TARGET == '2.0') { + bat ''' + cd %WIN_INTERNAL_ROOT% + git checkout 2.0 + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git checkout 2.0 + ''' + } else if (env.CHANGE_TARGET == '2.4') { + bat ''' + cd %WIN_INTERNAL_ROOT% + git checkout 2.4 + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git checkout 2.4 + ''' + } else if (env.CHANGE_TARGET == '2.6') { + bat ''' + cd %WIN_INTERNAL_ROOT% + git checkout 2.6 + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git checkout 2.6 + ''' + } else { + bat ''' + cd %WIN_INTERNAL_ROOT% + git checkout develop + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git checkout develop + ''' + } + } + bat ''' + cd %WIN_INTERNAL_ROOT% + git pull + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git remote prune origin + git pull + ''' + bat ''' + cd %WIN_INTERNAL_ROOT% + git branch + git log -5 + ''' + bat ''' + cd %WIN_COMMUNITY_ROOT% + git branch + git log -5 + ''' + script { + if (env.CHANGE_URL =~ /\/TDengine\//) { + bat ''' + echo "match /TDengine/ repository" + cd %WIN_COMMUNITY_ROOT% + git fetch origin +refs/pull/%CHANGE_ID%/merge + git checkout -qf FETCH_HEAD + git log -5 + ''' + } else if (env.CHANGE_URL =~ /\/TDinternal\//) { + bat ''' + echo "match /TDinternal/ repository" + cd %WIN_INTERNAL_ROOT% + git fetch origin +refs/pull/%CHANGE_ID%/merge + git checkout -qf FETCH_HEAD + git log -5 + ''' + } else { + bat ''' + echo "unmatched reposiotry %CHANGE_URL%" + ''' + } + } + bat ''' + cd %WIN_COMMUNITY_ROOT% + git submodule update --init --recursive + ''' + /*bat ''' + cd %WIN_CONNECTOR_ROOT% + git branch + git reset --hard + git pull + ''' + bat ''' + cd %WIN_CONNECTOR_ROOT% + git log -5 + '''*/ +} +def pre_test_build_win() { + bat ''' + echo "building ..." + time /t + cd %WIN_INTERNAL_ROOT% + mkdir debug + cd debug + time /t + call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64 + set CL=/MP8 + echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake" + time /t + cmake .. -G "NMake Makefiles JOM" || exit 7 + echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6" + time /t + jom -j 6 || exit 8 + time /t + ''' + return 1 +} pipeline { - agent {label " dispatcher "} + agent none options { skipDefaultCheckout() } environment{ + WKDIR = '/var/data/jenkins/workspace' WK = '/var/data/jenkins/workspace/TDinternal' WKC = '/var/data/jenkins/workspace/TDinternal/community' LOGDIR = '/var/data/jenkins/workspace/log' } stages { - stage ('pre_build') { - steps { - sh ''' - date - pwd - env - hostname - ''' - } - } - stage ('Parallel build stage') { - //only build pr + stage('run test') { options { skipDefaultCheckout() } when { allOf { @@ -202,103 +334,104 @@ pipeline { } } parallel { - stage ('dispatcher sync source') { - steps { - timeout(time: 20, unit: 'MINUTES') { - sync_source() - script { - sh ''' - echo "dispatcher ready" - date - ''' - } - } - } - } - stage ('build worker01') { - agent {label " worker01 "} + stage ('build arm64') { + agent {label " worker07_arm64 || worker09_arm64 "} steps { timeout(time: 20, unit: 'MINUTES') { - pre_test() + pre_test_arm64() script { sh ''' - echo "worker01 build done" + echo "arm64 build done" date ''' } } } } - stage ('build worker02') { - agent {label " worker02 "} + stage ('build Mac') { + agent {label " Mac_catalina "} steps { timeout(time: 20, unit: 'MINUTES') { - pre_test() + pre_test_mac() script { sh ''' - echo "worker02 build done" + echo "Mac build done" date ''' } } } } - } - } - stage('run test') { - options { skipDefaultCheckout() } - when { - allOf { - changeRequest() - not { expression { env.CHANGE_BRANCH =~ /docs\// }} - } - } - parallel { - stage ('build worker07_arm64') { - agent {label " worker07_arm64 "} - steps { - timeout(time: 20, unit: 'MINUTES') { - pre_test() - script { - sh ''' - echo "worker07_arm64 build done" - date - ''' - } - } + stage('build win') { + agent {label " windows10_05 || windows10_06 "} + environment{ + WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal" + WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community" + WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test" + WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python" } - } - stage ('build Mac_catalina ') { - agent {label " Mac_catalina "} steps { timeout(time: 20, unit: 'MINUTES') { - pre_test_mac() - script { - sh ''' - echo "Mac_catalina build done" - date - ''' - } + pre_test_win() + pre_test_build_win() } } } stage('run cases') { + agent {label " worker01 || worker02 "} steps { sh ''' date + pwd hostname ''' - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 20, unit: 'MINUTES') { + timeout(time: 15, unit: 'MINUTES') { + pre_test() + script { sh ''' + echo "Linux build done" date - cd ${WKC}/tests/parallel_test - time ./run.sh -m m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} - date - hostname ''' } } + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 60, unit: 'MINUTES') { + script { + def extra_param = "" + def log_server_file = "/home/log_server.json" + def timeout_cmd = "" + if (fileExists(log_server_file)) { + def log_server_enabled = sh ( + script: 'jq .enabled ' + log_server_file, + returnStdout: true + ).trim() + def timeout_param = sh ( + script: 'jq .timeout ' + log_server_file, + returnStdout: true + ).trim() + if (timeout_param != "null" && timeout_param != "0") { + timeout_cmd = "timeout " + timeout_param + } + if (log_server_enabled == "1") { + def log_server = sh ( + script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"', + returnStdout: true + ).trim() + if (log_server != "null" && log_server != "") { + extra_param = "-w " + log_server + } + } + } + sh ''' + date + cd ${WKC}/tests/parallel_test + ''' + timeout_cmd + ''' time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} ''' + extra_param + ''' + date + hostname + ''' + } + } + } } } } diff --git a/deps/TSZ/sz/src/CompressElement.c b/deps/TSZ/sz/src/CompressElement.c index b71ff9638eae603dd1ccad6e9f49a5c8add1ab0e..a215a3aebc87b83be9da867efa29f1ff4be271cd 100644 --- a/deps/TSZ/sz/src/CompressElement.c +++ b/deps/TSZ/sz/src/CompressElement.c @@ -7,7 +7,9 @@ * See COPYRIGHT in top-level directory. */ #ifndef WINDOWS +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) #pragma GCC diagnostic push +#endif #pragma GCC diagnostic ignored "-Wchar-subscripts" #endif @@ -233,5 +235,7 @@ INLINE void updateLossyCompElement_Float(unsigned char* diffBytes, unsigned char } #ifndef WINDOWS +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) #pragma GCC diagnostic pop -#endif \ No newline at end of file +#endif +#endif diff --git a/deps/TSZ/sz/src/Huffman.c b/deps/TSZ/sz/src/Huffman.c index 9868f3c0cb2b8f063226092adb13141dddc4b068..6db8b15d1f32bea41f444f68831efc064a80f111 100644 --- a/deps/TSZ/sz/src/Huffman.c +++ b/deps/TSZ/sz/src/Huffman.c @@ -117,7 +117,7 @@ node qremove(HuffmanTree* huffmanTree) /** * @out1 should be set to 0. * @out2 should be 0 as well. - * @index: the index of the byte + * @idx: the idx of the byte * */ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, unsigned long out2) { @@ -136,8 +136,8 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u huffmanTree->cout[n->c] = (unsigned char)len; return; } - int index = len >> 6; //=len/64 - if(index == 0) + int idx = len >> 6; //=len/64 + if(idx == 0) { out1 = out1 << 1; out1 = out1 | 0; @@ -164,13 +164,13 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u * */ void init(HuffmanTree* huffmanTree, int *s, size_t length) { - size_t i, index; + size_t i, idx; size_t *freq = (size_t *)malloc(huffmanTree->allNodes*sizeof(size_t)); memset(freq, 0, huffmanTree->allNodes*sizeof(size_t)); for(i = 0;i < length;i++) { - index = s[i]; - freq[index]++; + idx = s[i]; + freq[idx]++; } for (i = 0; i < huffmanTree->allNodes; i++) diff --git a/deps/TSZ/sz/src/TightDataPointStorageD.c b/deps/TSZ/sz/src/TightDataPointStorageD.c index 469a1bdce96aba395f92bc09ef345d72c3934064..e1a2af9c04283b3bcb65212f6896c2dd99f2d17a 100644 --- a/deps/TSZ/sz/src/TightDataPointStorageD.c +++ b/deps/TSZ/sz/src/TightDataPointStorageD.c @@ -25,9 +25,9 @@ void new_TightDataPointStorageD_Empty(TightDataPointStorageD **this) int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params) { new_TightDataPointStorageD_Empty(this); - size_t i, index = 0; - unsigned char version = flatBytes[index++]; //3 - unsigned char sameRByte = flatBytes[index++]; //1 + size_t i, idx = 0; + unsigned char version = flatBytes[idx++]; //3 + unsigned char sameRByte = flatBytes[idx++]; //1 // parse data format switch (version) @@ -46,15 +46,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi pde_params->accelerate_pw_rel_compression = (sameRByte & 0x08) >> 3; int errorBoundMode = SZ_ABS; - convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe); + convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe); - index += MetaDataByteLength_double; + idx += MetaDataByteLength_double; int isRegression = (sameRByte >> 7) & 0x01; unsigned char dsLengthBytes[8]; for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - dsLengthBytes[i] = flatBytes[index++]; + dsLengthBytes[i] = flatBytes[idx++]; (*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE); if((*this)->isLossless==1) @@ -65,7 +65,7 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi else if(same==1) { (*this)->allSameData = 1; - (*this)->exactMidBytes = &(flatBytes[index]); + (*this)->exactMidBytes = &(flatBytes[idx]); return errorBoundMode; } else @@ -74,42 +74,42 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi if(isRegression == 1) { (*this)->raBytes_size = flatBytesLength - 3 - 1 - MetaDataByteLength_double - pde_exe->SZ_SIZE_TYPE; - (*this)->raBytes = &(flatBytes[index]); + (*this)->raBytes = &(flatBytes[idx]); return errorBoundMode; } unsigned char byteBuf[8]; for (i = 0; i < 4; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4 pde_params->maxRangeRadius = max_quant_intervals/2; for (i = 0; i < 4; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4 for (i = 0; i < 8; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->medianValue = bytesToDouble(byteBuf);//8 - (*this)->reqLength = flatBytes[index++]; //1 + (*this)->reqLength = flatBytes[idx++]; //1 for (i = 0; i < 8; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->realPrecision = bytesToDouble(byteBuf);//8 for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE); for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST size_t logicLeadNumBitsNum = (*this)->exactDataNum * 2; @@ -122,12 +122,12 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi (*this)->leadNumArray_size = (logicLeadNumBitsNum >> 3) + 1; } - (*this)->typeArray = &flatBytes[index]; + (*this)->typeArray = &flatBytes[idx]; //retrieve the number of states (i.e., stateNum) (*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum (*this)->stateNum = ((*this)->allNodes+1)/2; - index+=(*this)->typeArray_size; + idx+=(*this)->typeArray_size; // todo need check length @@ -135,15 +135,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - (*this)->leadNumArray_size - (*this)->exactMidBytes_size - (*this)->typeArray_size; - (*this)->leadNumArray = &flatBytes[index]; + (*this)->leadNumArray = &flatBytes[idx]; - index+=(*this)->leadNumArray_size; + idx+=(*this)->leadNumArray_size; - (*this)->exactMidBytes = &flatBytes[index]; + (*this)->exactMidBytes = &flatBytes[idx]; - index+=(*this)->exactMidBytes_size; + idx+=(*this)->exactMidBytes_size; - (*this)->residualMidBits = &flatBytes[index]; + (*this)->residualMidBits = &flatBytes[idx]; return errorBoundMode; diff --git a/deps/TSZ/sz/src/TightDataPointStorageF.c b/deps/TSZ/sz/src/TightDataPointStorageF.c index cb1d79b8273d225c240548cebba3f776c7b0345c..16d524b9b44cd51db2af881b83a6a73b97f246f4 100644 --- a/deps/TSZ/sz/src/TightDataPointStorageF.c +++ b/deps/TSZ/sz/src/TightDataPointStorageF.c @@ -25,15 +25,15 @@ void new_TightDataPointStorageF_Empty(TightDataPointStorageF **this) int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params) { new_TightDataPointStorageF_Empty(this); - size_t i, index = 0; + size_t i, idx = 0; // // parse tdps // // 1 version(1) - unsigned char version = flatBytes[index++]; //1 - unsigned char sameRByte = flatBytes[index++]; //1 + unsigned char version = flatBytes[idx++]; //1 + unsigned char sameRByte = flatBytes[idx++]; //1 // parse data format switch (version) @@ -51,12 +51,12 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi pde_exe->SZ_SIZE_TYPE = ((sameRByte & 0x40)>>6)==1?8:4; //0100,0000 int errorBoundMode = SZ_ABS; // 3 meta(2) - convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe); - index += MetaDataByteLength; + convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe); + idx += MetaDataByteLength; // 4 element count(4) unsigned char dsLengthBytes[8]; for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - dsLengthBytes[i] = flatBytes[index++]; + dsLengthBytes[i] = flatBytes[idx++]; (*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);// 4 or 8 if((*this)->isLossless==1) { @@ -66,7 +66,7 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi else if(same==1) { (*this)->allSameData = 1; - (*this)->exactMidBytes = &(flatBytes[index]); + (*this)->exactMidBytes = &(flatBytes[idx]); return errorBoundMode; } else @@ -76,40 +76,40 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi if(isRegression == 1) { (*this)->raBytes_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE; - (*this)->raBytes = &(flatBytes[index]); + (*this)->raBytes = &(flatBytes[idx]); return errorBoundMode; } // 5 quant intervals(4) unsigned char byteBuf[8]; for (i = 0; i < 4; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4 pde_params->maxRangeRadius = max_quant_intervals/2; // 6 intervals for (i = 0; i < 4; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4 // 7 median for (i = 0; i < 4; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->medianValue = bytesToFloat(byteBuf); //4 // 8 reqLength - (*this)->reqLength = flatBytes[index++]; //1 + (*this)->reqLength = flatBytes[idx++]; //1 // 9 realPrecision(8) for (i = 0; i < 8; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->realPrecision = bytesToDouble(byteBuf);//8 // 10 typeArray_size for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// 4 // 11 exactNum for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST // 12 mid size for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) - byteBuf[i] = flatBytes[index++]; + byteBuf[i] = flatBytes[idx++]; (*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// STqq // calc leadNumArray_size @@ -124,20 +124,20 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi } // 13 typeArray - (*this)->typeArray = &flatBytes[index]; + (*this)->typeArray = &flatBytes[idx]; //retrieve the number of states (i.e., stateNum) (*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum (*this)->stateNum = ((*this)->allNodes+1)/2; - index+=(*this)->typeArray_size; + idx+=(*this)->typeArray_size; // 14 leadNumArray - (*this)->leadNumArray = &flatBytes[index]; - index += (*this)->leadNumArray_size; + (*this)->leadNumArray = &flatBytes[idx]; + idx += (*this)->leadNumArray_size; // 15 exactMidBytes - (*this)->exactMidBytes = &flatBytes[index]; - index+=(*this)->exactMidBytes_size; + (*this)->exactMidBytes = &flatBytes[idx]; + idx+=(*this)->exactMidBytes_size; // 16 residualMidBits - (*this)->residualMidBits = &flatBytes[index]; + (*this)->residualMidBits = &flatBytes[idx]; // calc residualMidBits_size (*this)->residualMidBits_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE - 4 - 4 - 4 - 1 - 8 diff --git a/deps/cJson/src/cJSON.c b/deps/cJson/src/cJSON.c index ff93e8730d4e9b378efaa5c9039eb886e3a30e97..53698b5baf6513aa502379272d9fe51585459110 100644 --- a/deps/cJson/src/cJSON.c +++ b/deps/cJson/src/cJSON.c @@ -1683,7 +1683,7 @@ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) return (int)size; } -static cJSON* get_array_item(const cJSON *array, size_t index) +static cJSON* get_array_item(const cJSON *array, size_t idx) { cJSON *current_child = NULL; @@ -1693,23 +1693,23 @@ static cJSON* get_array_item(const cJSON *array, size_t index) } current_child = array->child; - while ((current_child != NULL) && (index > 0)) + while ((current_child != NULL) && (idx > 0)) { - index--; + idx--; current_child = current_child->next; } return current_child; } -CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int idx) { - if (index < 0) + if (idx < 0) { return NULL; } - return get_array_item(array, (size_t)index); + return get_array_item(array, (size_t)idx); } static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) diff --git a/deps/lua/src/ldump.c b/deps/lua/src/ldump.c index f08277d3ac440a48a3f771113729f39f492e57cc..4b205914887a78da7f20d0725d00ba4c8841b206 100644 --- a/deps/lua/src/ldump.c +++ b/deps/lua/src/ldump.c @@ -60,7 +60,7 @@ static void DumpVector(const void* b, int n, size_t size, DumpState* D) static void DumpString(const TString* s, DumpState* D) { - if (s==NULL || getstr(s)==NULL) + if (s==NULL) { size_t size=0; DumpVar(size,D); diff --git a/docs-cn/02-intro.md b/docs-cn/02-intro.md deleted file mode 100644 index 673c2e96b65814fc1cd572d54f948793ed6fa521..0000000000000000000000000000000000000000 --- a/docs-cn/02-intro.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: 产品简介 -toc_max_heading_level: 2 ---- - -TDengine 是一款高性能、分布式、支持 SQL 的时序数据库 (Database),其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库 (Database) 功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 - -本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。 - -## 主要功能 - -TDengine的主要功能如下: - -1. 高速数据写入,除 [SQL 写入](/develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](/reference/schemaless/),支持 [InfluxDB LINE 协议](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json)等协议写入; -2. 第三方数据采集工具 [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQ](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入; -3. 支持[各种查询](/develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等 -4. 支持[用户自定义函数](/develop/udf) -5. 支持[缓存](/develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis -6. 支持[连续查询](/develop/continuous-query)(Continuous Query) -7. 支持[数据订阅](/develop/subscribe),而且可以指定过滤条件 -8. 支持[集群](/cluster/),可以通过多节点进行水平扩展,并通过多副本实现高可靠 -9. 提供[命令行程序](/reference/taos-shell),便于管理集群,检查系统状态,做即席查询 -10. 提供多种数据的[导入](/operation/import)、[导出](/operation/export) -11. 支持对[TDengine 集群本身的监控](/operation/monitor) -12. 提供 [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) 等多种编程语言的[连接器](/reference/connector/) -13. 支持 [REST 接口](/reference/rest-api/) -14. 支持与[ Grafana 无缝集成](/third-party/grafana) -15. 支持与 Google Data Studio 无缝集成 - -更多细小的功能,请阅读整个文档。 - -## 竞争优势 - -由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点: - -- **[高性能](https://www.taosdata.com/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,而且存储空间也大为节省。 - -- **[分布式](https://www.taosdata.com/scalable)**:通过原生分布式的设计,TDengine 提供了水平扩展的能力,只需要增加节点就能获得更强的数据处理能力,同时通过多副本机制保证了系统的高可用。 - -- **[支持 SQL](https://www.taosdata.com/sql-support)**:TDengine 采用 SQL 作为数据查询语言,减少学习和迁移成本,同时提供 SQL 扩展来处理时序数据特有的分析,而且支持方便灵活的 schemaless 数据写入。 - -- **All in One**:将数据库、消息队列、缓存、流式计算等功能融合一起,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低应用开发和维护成本。 - -- **零管理**:安装、集群几秒搞定,无任何依赖,不用分库分表,系统运行状态监测能与 Grafana 或其他运维工具无缝集成。 - -- **零学习成本**:采用 SQL 查询语言,支持 C/C++、Python、Java、Go、Rust、Node.js、C#、Lua(社区贡献)、PHP(社区贡献) 等多种编程语言,与 MySQL 相似,零学习成本。 - -- **无缝集成**:不用一行代码,即可与 Telegraf、Grafana、Prometheus、EMQX、HiveMQ、StatsD、collectd、icinga、TCollector、Matlab、R 等第三方工具无缝集成。 - -- **互动 Console**: 通过命令行 console,不用编程,执行 SQL 语句就能做即席查询、各种数据库的操作、管理以及集群的维护. - -采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面: - -1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低 -2. 因为采用 SQL 接口,能与众多第三放软件无缝集成,学习迁移成本大幅下降 -3. 因为其 All In One 的特性,系统复杂度降低,能降研发成本 -4. 因为运维维护简单,运营维护成本能大幅降低 - -## 技术生态 - -在整个时序大数据平台中,TDengine 在其中扮演的角色如下: - -
- -![TDengine Database 技术生态图](eco_system.webp) - -
-
图 1. TDengine技术生态图
- -上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。 - -## 总体适用场景 - -作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。 - -### 数据源特点和需求 - -从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。 - -| 数据源特点和需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| ---------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------------- | -| 总体数据量巨大 | | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。 | -| 数据输入速度偶尔或者持续巨大 | | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。 | -| 数据源数目巨大 | | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。 | - -### 系统架构要求 - -| 系统架构要求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| ---------------------- | ------ | -------- | -------- | ----------------------------------------------------------------------------------------------------- | -| 要求简单可靠的系统架构 | | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。 | -| 要求容错和高可靠 | | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。 | -| 标准化规范 | | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。 | - -### 系统功能需求 - -| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- | -| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 | -| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 | - -### 系统性能需求 - -| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ | -| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 | -| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 | -| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 | - -### 系统维护需求 - -| 系统维护需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| ---------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- | -| 要求系统可靠运行 | | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。 | -| 要求运维学习成本可控 | | | √ | 同上。 | -| 要求市场有大量人才储备 | √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。 | - -## 与其他数据库的对比测试 - -- [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) -- [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) -- [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) -- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html) -- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html) -- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) diff --git a/docs-cn/04-concept/index.md b/docs-cn/04-concept/index.md deleted file mode 100644 index 8e97d4a2f43537c1229c8e8ea092ddfc1257dde7..0000000000000000000000000000000000000000 --- a/docs-cn/04-concept/index.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: 数据模型和基本概念 ---- - -为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格: - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Device IDTime StampCollected MetricsTags
Device IDTime StampcurrentvoltagephaselocationgroupId
d1001153854868500010.32190.31California.SanFrancisco2
d1002153854868400010.22200.23California.SanFrancisco3
d1003153854868650011.52210.35California.LosAngeles3
d1004153854868550013.42230.29California.LosAngeles2
d1001153854869500012.62180.33California.SanFrancisco2
d1004153854869660011.82210.28California.LosAngeles2
d1002153854869665010.32180.25California.SanFrancisco3
d1001153854869680012.32210.31California.SanFrancisco2
-表 1:智能电表数据示例 -
- -每一条记录都有设备 ID,时间戳,采集的物理量以及每个设备相关的静态标签。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。 - -## 采集量 (Metric) - -采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。 - -## 标签 (Label/Tag) - -标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。 - -## 数据采集点 (Data Collection Point) - -数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。 - -## 表 (Table) - -因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。 - -为充分利用其数据的时序性和其他数据特点,TDengine 采取**一个数据采集点一张表**的策略,要求对每个数据采集点单独建表(比如有一千万个智能电表,就需创建一千万张表,上述表格中的 d1001,d1002,d1003,d1004 都需单独建表),用来存储这个数据采集点所采集的时序数据。这种设计有几大优点: - -1. 由于不同数据采集点产生数据的过程完全独立,每个数据采集点的数据源是唯一的,一张表也就只有一个写入者,这样就可采用无锁方式来写,写入速度就能大幅提升。 -2. 对于一个数据采集点而言,其产生的数据是按照时间排序的,因此写的操作可用追加的方式实现,进一步大幅提高数据写入速度。 -3. 一个数据采集点的数据是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。 -4. 一个数据块内部,采用列式存储,对于不同数据类型,采用不同压缩算法,而且由于一个数据采集点的采集量的变化是缓慢的,压缩率更高。 - -如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。** - -TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 current,voltage,phase),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。 - -对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。 - -## 超级表 (STable) - -由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。 - -超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。 - -在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。 - -## 子表 (Subtable) - -当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于: - -1. 子表就是表,因此所有正常表的SQL操作都可以在子表上执行。 -2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。 -3. 子表一定属于一张超级表,但普通表不属于任何超级表 -4. 普通表无法转为子表,子表也无法转为普通表。 - -超级表与与基于超级表建立的子表之间的关系表现在: - -1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。 -2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。 -3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。 - -查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。 - -TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。 - -## 库 (database) - -库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。 - -一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。 - -## FQDN & End Point - -FQDN (fully qualified domain name, 完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。 - -TDengine 集群的每个节点是由 End Point 来唯一标识的,End Point 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。 - -TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 diff --git a/docs-cn/05-get-started/_pkg_install.mdx b/docs-cn/05-get-started/_pkg_install.mdx deleted file mode 100644 index 83c987af8bcf24a9593105b680d32a0421344d5f..0000000000000000000000000000000000000000 --- a/docs-cn/05-get-started/_pkg_install.mdx +++ /dev/null @@ -1,17 +0,0 @@ -import PkgList from "/components/PkgList"; - -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 - -为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 - -在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 - -发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: - - - -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 - -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) - -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) diff --git a/docs-cn/05-get-started/index.md b/docs-cn/05-get-started/index.md deleted file mode 100644 index 878d7f020245fbff383308c281fbc3fa28ba5f6c..0000000000000000000000000000000000000000 --- a/docs-cn/05-get-started/index.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: 立即开始 -description: '从 Docker,安装包或使用 apt-get 快速安装 TDengine, 通过命令行程序TAOS CLI和工具 taosdemo 快速体验 TDengine 功能' ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import PkgInstall from "./\_pkg_install.mdx"; -import AptGetInstall from "./\_apt_get_install.mdx"; - -## 安装 - -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前 2.X 版服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。但在 2.4 之前的版本中没有 taosAdapter,RESTful 接口是由 taosd 内置的 HTTP 服务提供的。 - -TDengine 支持 X64/ARM64/MIPS64/Alpha64 硬件平台,后续将支持 ARM32、RISC-V 等 CPU 架构。 - - - -如果已经安装了 docker, 只需执行下面的命令。 - -```shell -docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -确定该容器已经启动并且在正常运行 - -```shell -docker ps -``` - -进入该容器并执行 bash - -```shell -docker exec -it bash -``` - -然后就可以执行相关的 Linux 命令操作和访问 TDengine - -详细操作方法请参照 [通过 Docker 快速体验 TDengine](/train-faq/docker)。 - -:::info -从 2.4.0.10 开始,除 taosd 以外,Docker 镜像还包含:taos、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码。启动 Docker 容器时,将同时启动 taosAdapter 和 taosd,实现对 RESTful 的支持。 - -::: - - - - - - - - - - -如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. - -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/cn/all-downloads/)。 - - - - -## 启动 - -安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。 - -```bash -systemctl start taosd -``` - -检查服务是否正常工作: - -```bash -systemctl status taosd -``` - -如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。 - -:::info - -- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。 -- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting,将其设为 0,就可将其关闭。 -- TDengine 采用 FQDN(一般就是 hostname)作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 FQDN,在 TDengine CLI 或应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。 -- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。 - -TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包: - -```bash -which systemctl -``` - -如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。 - -:::note - -## TDengine 命令行 (CLI) - -为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。 - -```bash -taos -``` - -如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下: - -```cmd -taos> -``` - -在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: - -```sql -create database demo; -use demo; -create table t (ts timestamp, speed int); -insert into t values ('2019-07-15 00:00:00', 10); -insert into t values ('2019-07-15 01:00:00', 20); -select * from t; - ts | speed | -======================================== - 2019-07-15 00:00:00.000 | 10 | - 2019-07-15 01:00:00.000 | 20 | -Query OK, 2 row(s) in set (0.003128s) -``` - -除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TAOS CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/) - -## 使用 taosBenchmark 体验写入速度 - -启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`): - -```bash -taosBenchmark -``` - -该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。 - -这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 - -taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 - -## 使用 TDengine CLI 体验查询速度 - -使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。 - -查询超级表下记录总条数: - -```sql -taos> select count(*) from test.meters; -``` - -查询 1 亿条记录的平均值、最大值、最小值等: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.meters; -``` - -查询 location="California.SanFrancisco" 的记录总条数: - -```sql -taos> select count(*) from test.meters where location="California.SanFrancisco"; -``` - -查询 groupId=10 的所有记录的平均值、最大值、最小值等: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; -``` - -对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); -``` diff --git a/docs-cn/07-develop/01-connect/_connect_c.mdx b/docs-cn/07-develop/01-connect/_connect_c.mdx deleted file mode 100644 index 9cd8669561195b49e8428ed490ad97bb5653ae6a..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c title="原生连接" -{{#include docs-examples/c/connect_example.c}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_cs.mdx b/docs-cn/07-develop/01-connect/_connect_cs.mdx deleted file mode 100644 index 821820e8fe1d87a35e01943530179eeb6e0f48be..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_cs.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```csharp title="原生连接" -{{#include docs-examples/csharp/ConnectExample.cs}} -``` - -:::info -C# 连接器目前只支持原生连接。 - -::: diff --git a/docs-cn/07-develop/01-connect/_connect_go.mdx b/docs-cn/07-develop/01-connect/_connect_go.mdx deleted file mode 100644 index 478768caaacc8aceb9a3f5a85f008dde00125eb7..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_go.mdx +++ /dev/null @@ -1,17 +0,0 @@ -#### 使用数据库访问统一接口 - -```go title="原生连接" -{{#include docs-examples/go/connect/cgoexample/main.go}} -``` - -```go title="REST 连接" -{{#include docs-examples/go/connect/restexample/main.go}} -``` - -#### 使用高级封装 - -也可以使用 driver-go 的 af 包建立连接。这个模块封装了 TDengine 的高级功能, 如:参数绑定、订阅等。 - -```go title="使用 af 包建立原生连接" -{{#include docs-examples/go/connect/afconn/main.go}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_java.mdx b/docs-cn/07-develop/01-connect/_connect_java.mdx deleted file mode 100644 index 635f39ceb28ffc3fd0b0d8edb057d9aa01c593de..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_java.mdx +++ /dev/null @@ -1,15 +0,0 @@ -```java title="原生连接" -{{#include docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java}} -``` - -```java title="REST 连接" -{{#include docs-examples/java/src/main/java/com/taos/example/RESTConnectExample.java:main}} -``` - -使用 REST 连接时,如果查询数据量比较大,还可开启批量拉取功能。 - -```java title="开启批量拉取功能" {4} -{{#include docs-examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} -``` - -更多连接参数配置,参考[Java 连接器](/reference/connector/java) diff --git a/docs-cn/07-develop/01-connect/_connect_node.mdx b/docs-cn/07-develop/01-connect/_connect_node.mdx deleted file mode 100644 index 199a6e3faa88fcb295379309a250990bf97fa973..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_node.mdx +++ /dev/null @@ -1,7 +0,0 @@ -```js title="原生连接" -{{#include docs-examples/node/nativeexample/connect.js}} -``` - -```js title="REST 连接" -{{#include docs-examples/node/restexample/connect.js}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_php.mdx b/docs-cn/07-develop/01-connect/_connect_php.mdx deleted file mode 100644 index 2431df2a722659ae6e5962a955fba139be3e5f67..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_php.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```php title="原生连接" -{{#include docs-examples/php/connect.php}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_python.mdx b/docs-cn/07-develop/01-connect/_connect_python.mdx deleted file mode 100644 index c0043c752e14bcc38f97c1046f2852a3f7fa2b7b..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```python title="原生连接" -{{#include docs-examples/python/connect_example.py}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_r.mdx b/docs-cn/07-develop/01-connect/_connect_r.mdx deleted file mode 100644 index 8aab6121a66b38540bf1b8ebf5b48a513282ac7a..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_r.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```r title="原生连接" -{{#include docs-examples/R/connect_native.r:demo}} -``` diff --git a/docs-cn/07-develop/01-connect/_connect_rust.mdx b/docs-cn/07-develop/01-connect/_connect_rust.mdx deleted file mode 100644 index 9e64724c178ba2c72e14fc9878bf9c3237bb50e7..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/_connect_rust.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```rust title="原生连接/REST 连接" -{{#include docs-examples/rust/nativeexample/examples/connect.rs}} -``` - -:::note -对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "rest" 特性,那么只有 RESTful 的实现会被编译进来。 - -::: diff --git a/docs-cn/07-develop/01-connect/index.md b/docs-cn/07-develop/01-connect/index.md deleted file mode 100644 index b1857b973932b4f9cfd1564b709dd79f26701951..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/01-connect/index.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: 建立连接 -description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。" ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import ConnJava from "./_connect_java.mdx"; -import ConnGo from "./_connect_go.mdx"; -import ConnRust from "./_connect_rust.mdx"; -import ConnNode from "./_connect_node.mdx"; -import ConnPythonNative from "./_connect_python.mdx"; -import ConnCSNative from "./_connect_cs.mdx"; -import ConnC from "./_connect_c.mdx"; -import ConnR from "./_connect_r.mdx"; -import ConnPHP from "./_connect_php.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; - -TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua(社区贡献)和 PHP (社区贡献)的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 - -## 连接器建立连接的方式 - -连接器建立连接的方式,TDengine 提供两种: - -1. 通过 taosAdapter 组件提供的 REST API 建立与 taosd 的连接,这种连接方式下文中简称“REST 连接” -2. 通过客户端驱动程序 taosc 直接与服务端程序 taosd 建立连接,这种连接方式下文中简称“原生连接”。 - -无论使用何种方式建立连接,连接器都提供了相同或相似的 API 操作数据库,都可以执行 SQL 语句,只是初始化连接的方式稍有不同,用户在使用上不会感到什么差别。 - -关键不同点在于: - -1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 -2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。 - -## 安装客户端驱动 taosc - -如果选择原生连接,而且应用程序不在 TDengine 同一台服务器上运行,你需要先安装客户端驱动,否则可以跳过此一步。为避免客户端驱动和服务端不兼容,请使用一致的版本。 - -### 安装步骤 - - - - - - - - - - -### 安装验证 - -以上安装和配置完成后,并确认 TDengine 服务已经正常启动运行,此时可以执行安装包里带有的 TDengine 命令行程序 taos 进行登录。 - - - - - - - - - - -## 安装连接器 - - - - -如果使用 maven 管理项目,只需在 pom.xml 中加入以下依赖。 - -```xml - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.38 - -``` - - - - -使用 `pip` 从 PyPI 安装: - -``` -pip install taospy -``` - -从 Git URL 安装: - -``` -pip install git+https://github.com/taosdata/taos-connector-python.git -``` - - - - -编辑 `go.mod` 添加 `driver-go` 依赖即可。 - -```go-mod title=go.mod -module goexample - -go 1.17 - -require github.com/taosdata/driver-go/v2 develop -``` - -:::note -driver-go 使用 cgo 封装了 taosc 的 API。cgo 需要使用 gcc 编译 C 的源码。因此需要确保你的系统上有 gcc。 - -::: - - - - -编辑 `Cargo.toml` 添加 `libtaos` 依赖即可。 - -```toml title=Cargo.toml -[dependencies] -libtaos = { version = "0.4.2"} -``` - -:::info -Rust 连接器通过不同的特性区分不同的连接方式。如果要建立 REST 连接,需要开启 `rest` 特性: - -```toml -libtaos = { version = "*", features = ["rest"] } -``` - -::: - - - - -Node.js 连接器通过不同的包提供不同的连接方式。 - -1. 安装 Node.js 原生连接器 - - ``` - npm i td2.0-connector - ``` - -:::note -推荐 Node 版本大于等于 `node-v12.8.0` 小于 `node-v13.0.0` -::: - -2. 安装 Node.js REST 连接器 - - ``` - npm i td2.0-rest-connector - ``` - - - - -编辑项目配置文件中添加 [TDengine.Connector](https://www.nuget.org/packages/TDengine.Connector/) 的引用即可: - -```xml title=csharp.csproj {12} - - - - Exe - net6.0 - enable - enable - TDengineExample.AsyncQueryExample - - - - - - - -``` - -也可通过 dotnet 命令添加: - -``` -dotnet add package TDengine.Connector -``` - -:::note -以下示例代码,均基于 dotnet6.0,如果使用其它版本,可能需要做适当调整。 - -::: - - - - -1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/)。 -2. 安装 R 的依赖包`RJDBC`: - -```R -install.packages("RJDBC") -``` - - - - -如果已经安装了 TDengine 服务端软件或 TDengine 客户端驱动 taosc, 那么已经安装了 C 连接器,无需额外操作。 -
- -
- - -**下载代码并解压:** - -```shell -curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ -&& mkdir php-tdengine \ -&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 -``` - -> 版本 `v1.0.2` 只是示例,可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases) 中查看可用版本。 - -**非 Swoole 环境:** - -```shell -phpize && ./configure && make -j && make install -``` - -**手动指定 TDengine 目录:** - -```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install -``` - -> `--with-tdengine-dir=` 后跟上 TDengine 目录。 -> 适用于默认找不到的情况,或者 macOS 系统用户。 - -**Swoole 环境:** - -```shell -phpize && ./configure --enable-swoole && make -j && make install -``` - -**启用扩展:** - -方法一:在 `php.ini` 中加入 `extension=tdengine` - -方法二:运行带参数 `php -d extension=tdengine test.php` - - -
- -## 建立连接 - -在执行这一步之前,请确保有一个正在运行的,且可以访问到的 TDengine,而且服务端的 FQDN 配置正确。以下示例代码,都假设 TDengine 安装在本机,且 FQDN(默认 localhost) 和 serverPort(默认 6030) 都使用默认配置。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -:::tip -如果建立连接失败,大部分情况下是 FQDN 或防火墙的配置不正确,详细的排查方法请看[《常见问题及反馈》](https://docs.taosdata.com/train-faq/faq)中的“遇到错误 Unable to establish connection, 我怎么办?” - -::: diff --git a/docs-cn/07-develop/03-insert-data/_c_line.mdx b/docs-cn/07-develop/03-insert-data/_c_line.mdx deleted file mode 100644 index 5ef2e9af774c54e9f090357286f83d2280c2ab11..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_c_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/line_example.c:main}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_c_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_c_opts_json.mdx deleted file mode 100644 index 22ad2e0122797248a372734aac0f3a16a1356530..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_c_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/json_protocol_example.c:main}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_c_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_c_opts_telnet.mdx deleted file mode 100644 index 508d7bc98a149f49766bcd0a474ffe226cbe30bb..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_c_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/telnet_line_example.c:main}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_c_sql.mdx b/docs-cn/07-develop/03-insert-data/_c_sql.mdx deleted file mode 100644 index f4153fd2c427677a338d0c377663d0335f2672f0..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_c_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/insert_example.c}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_c_stmt.mdx b/docs-cn/07-develop/03-insert-data/_c_stmt.mdx deleted file mode 100644 index 01ac067519a2bd224e313fd70169722ba5f20413..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_c_stmt.mdx +++ /dev/null @@ -1,6 +0,0 @@ -```c title=一次绑定一行 -{{#include docs-examples/c/stmt_example.c}} -``` -```c title=一次绑定多行 72:117 -{{#include docs-examples/c/multi_bind_example.c}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_cs_line.mdx b/docs-cn/07-develop/03-insert-data/_cs_line.mdx deleted file mode 100644 index 9c275ee3d7c7a1e52fbb34dbae922004543ee3ce..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_cs_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/InfluxDBLineExample.cs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_cs_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_cs_opts_json.mdx deleted file mode 100644 index 3d538b8506b298241faecd8098f89571359135c9..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_cs_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/OptsJsonExample.cs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_cs_opts_telnet.mdx deleted file mode 100644 index c53bf3d7233115351e5af03b7d9e6318aa4a0da6..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_cs_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/OptsTelnetExample.cs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_cs_sql.mdx b/docs-cn/07-develop/03-insert-data/_cs_sql.mdx deleted file mode 100644 index c7688bfbe77a1135424d829fe9b29fbb1bc93ae2..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_cs_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/SQLInsertExample.cs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_cs_stmt.mdx b/docs-cn/07-develop/03-insert-data/_cs_stmt.mdx deleted file mode 100644 index 97c3b910ffeb9e0c88fc143a02014115e819c147..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_cs_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/StmtInsertExample.cs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_go_line.mdx b/docs-cn/07-develop/03-insert-data/_go_line.mdx deleted file mode 100644 index cd225945b70e28bef2ca7fdaf0d9be0ad7ffc18c..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_go_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/line/main.go}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_go_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_go_opts_json.mdx deleted file mode 100644 index 0c0d3e5b6330e046988cdd02234285ec67e92f01..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_go_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/json/main.go}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_go_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_go_opts_telnet.mdx deleted file mode 100644 index d5ca40cc146e62412476289853e8e2739e0e9e4b..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_go_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/telnet/main.go}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_go_sql.mdx b/docs-cn/07-develop/03-insert-data/_go_sql.mdx deleted file mode 100644 index 613a65add1741eb763a4b24e65d180d05f7d670f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_go_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/sql/main.go}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_go_stmt.mdx b/docs-cn/07-develop/03-insert-data/_go_stmt.mdx deleted file mode 100644 index 7bb6792d6df5b250850bd0a0021ecceba994aa09..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_go_stmt.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```go -{{#include docs-examples/go/insert/stmt/main.go}} -``` - -:::tip -driver-go 的模块 `github.com/taosdata/driver-go/v2/wrapper` 是 C 接口的底层封装。使用这个模块也可以实现参数绑定写入。 - -::: diff --git a/docs-cn/07-develop/03-insert-data/_java_line.mdx b/docs-cn/07-develop/03-insert-data/_java_line.mdx deleted file mode 100644 index 2e59a5d4701b2a2ab04ec5711845dc5c80067a1e..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_java_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_java_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_java_opts_json.mdx deleted file mode 100644 index 826a1a07d9405cb193849f9d21e5444f68517914..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_java_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_java_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_java_opts_telnet.mdx deleted file mode 100644 index 954dcc1a482a150dea0b190e1e0593adbfbde796..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_java_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_java_sql.mdx b/docs-cn/07-develop/03-insert-data/_java_sql.mdx deleted file mode 100644 index a863378defe43b1f22c1f98087a34f053a7d6619..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_java_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java:insert}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_java_stmt.mdx b/docs-cn/07-develop/03-insert-data/_java_stmt.mdx deleted file mode 100644 index 54443e535fa84bdf8dc9161ed4ad00f50b26266c..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_java_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_js_line.mdx b/docs-cn/07-develop/03-insert-data/_js_line.mdx deleted file mode 100644 index 172c9bc17b8cff8b2620720b235a9c8e69bd4197..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_js_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/influxdb_line_example.js}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_js_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_js_opts_json.mdx deleted file mode 100644 index 20ac9ec91e8dc6675828b16d7da0acb09afd3b5f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_js_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/opentsdb_json_example.js}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_js_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_js_opts_telnet.mdx deleted file mode 100644 index c3c8c40bd642f4f443de88e3db006ad50724d514..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_js_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/opentsdb_telnet_example.js}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_js_sql.mdx b/docs-cn/07-develop/03-insert-data/_js_sql.mdx deleted file mode 100644 index f5e17c76892a57a94192a95451b508b1c176c984..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_js_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/insert_example.js}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_js_stmt.mdx b/docs-cn/07-develop/03-insert-data/_js_stmt.mdx deleted file mode 100644 index 17a6c9785c7dc1e3c3fa6a59982913f1f139f9c2..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_js_stmt.mdx +++ /dev/null @@ -1,12 +0,0 @@ -```js title=一次绑定一行 -{{#include docs-examples/node/nativeexample/param_bind_example.js}} -``` - -```js title=一次绑定多行 -{{#include docs-examples/node/nativeexample/multi_bind_example.js:insertData}} -``` - -:::info -一次绑定一行效率不如一次绑定多行,但支持非 INSERT 语句。一次绑定多行效率更高,但仅支持 INSERT 语句。 - -::: diff --git a/docs-cn/07-develop/03-insert-data/_php_sql.mdx b/docs-cn/07-develop/03-insert-data/_php_sql.mdx deleted file mode 100644 index 42d6a548479d526e7ecdba12807cf9cafb911ee5..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_php_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```php -{{#include docs-examples/php/insert.php}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_php_stmt.mdx b/docs-cn/07-develop/03-insert-data/_php_stmt.mdx deleted file mode 100644 index c1ba4ed3b160514fafb50886d799fc27e60927ed..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_php_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```php -{{#include docs-examples/php/insert_stmt.php}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_py_line.mdx b/docs-cn/07-develop/03-insert-data/_py_line.mdx deleted file mode 100644 index d3bb1ebb3403b53fa43bfc9d5d1a0de9764d7583..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_py_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/line_protocol_example.py}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_py_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_py_opts_json.mdx deleted file mode 100644 index cfbfe13ccfdb4f3f34b77300812863fdf70d0f59..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_py_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/json_protocol_example.py}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_py_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_py_opts_telnet.mdx deleted file mode 100644 index 14bc65a7a3da815abadf7f25c8deffeac666c8d7..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_py_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/telnet_line_protocol_example.py}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_py_sql.mdx b/docs-cn/07-develop/03-insert-data/_py_sql.mdx deleted file mode 100644 index c0e15b8ec115b9244d50a47c9eafec04bcfdd70c..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_py_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/native_insert_example.py}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_py_stmt.mdx b/docs-cn/07-develop/03-insert-data/_py_stmt.mdx deleted file mode 100644 index 8241ea86bc64ac64d842dc0a6cddc0eae0399503..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_py_stmt.mdx +++ /dev/null @@ -1,12 +0,0 @@ -```py title=一次绑定一行 -{{#include docs-examples/python/bind_param_example.py}} -``` - -```py title=一次绑定多行 -{{#include docs-examples/python/multi_bind_example.py:bind_batch}} -``` - -:::info -一次绑定一行效率不如一次绑定多行,但支持非 INSERT 语句。一次绑定多行效率更高,但仅支持 INSERT 语句。 - -::: \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_rust_line.mdx b/docs-cn/07-develop/03-insert-data/_rust_line.mdx deleted file mode 100644 index 696ddb7b854751b8dee01047066f97f74212933f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_rust_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_rust_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_rust_opts_json.mdx deleted file mode 100644 index 97d9052dacd1894cc7548a59951ecfaad9caee87..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_rust_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_rust_opts_telnet.mdx deleted file mode 100644 index 14021f43d8aff30c35dc30c5d278d4e51f375024..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_rust_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_rust_sql.mdx b/docs-cn/07-develop/03-insert-data/_rust_sql.mdx deleted file mode 100644 index 8e8013e4ad734efcc262ea2f750b82210a538e49..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_rust_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/restexample/examples/insert_example.rs}} -``` diff --git a/docs-cn/07-develop/03-insert-data/_rust_stmt.mdx b/docs-cn/07-develop/03-insert-data/_rust_stmt.mdx deleted file mode 100644 index 590a7a0e717426ed0235331c49dfc578bc55b2f7..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/03-insert-data/_rust_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/nativeexample/examples/stmt_example.rs}} -``` diff --git a/docs-cn/07-develop/04-query-data/_c.mdx b/docs-cn/07-develop/04-query-data/_c.mdx deleted file mode 100644 index 76c9067e2f6af19465cf7c52c3e9b48bb868547d..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/query_example.c}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/04-query-data/_c_async.mdx b/docs-cn/07-develop/04-query-data/_c_async.mdx deleted file mode 100644 index 09f3d3b3ff6d6644f837642ef41db459ba7c5753..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_c_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/async_query_example.c:demo}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/04-query-data/_cs.mdx b/docs-cn/07-develop/04-query-data/_cs.mdx deleted file mode 100644 index 2ab52feb564eff0fe251bc9900ea2539171e5dba..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_cs.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/QueryExample.cs}} -``` diff --git a/docs-cn/07-develop/04-query-data/_cs_async.mdx b/docs-cn/07-develop/04-query-data/_cs_async.mdx deleted file mode 100644 index f868994b303e62016b5e2f9304275135855c6ae5..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_cs_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/AsyncQueryExample.cs}} -``` diff --git a/docs-cn/07-develop/04-query-data/_go.mdx b/docs-cn/07-develop/04-query-data/_go.mdx deleted file mode 100644 index 417c12315c06517e2f3de850ac9a379b7714b519..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_go.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/query/sync/main.go}} -``` diff --git a/docs-cn/07-develop/04-query-data/_go_async.mdx b/docs-cn/07-develop/04-query-data/_go_async.mdx deleted file mode 100644 index 72fff411b980a0dcbdcaf4274722c63e0351db6f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_go_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/query/async/main.go}} -``` diff --git a/docs-cn/07-develop/04-query-data/_java.mdx b/docs-cn/07-develop/04-query-data/_java.mdx deleted file mode 100644 index 519b9266144486231caf3ee593e973d438941ee4..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_java.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java}} -``` diff --git a/docs-cn/07-develop/04-query-data/_js.mdx b/docs-cn/07-develop/04-query-data/_js.mdx deleted file mode 100644 index c5e4c4f3fc20d3940a2bc6e13e6a5dea8a15ff13..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_js.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/query_example.js}} -``` diff --git a/docs-cn/07-develop/04-query-data/_js_async.mdx b/docs-cn/07-develop/04-query-data/_js_async.mdx deleted file mode 100644 index c65d54ed12f6c4bbeb333e0de0ba9ca4638bff84..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_js_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/async_query_example.js}} -``` diff --git a/docs-cn/07-develop/04-query-data/_php.mdx b/docs-cn/07-develop/04-query-data/_php.mdx deleted file mode 100644 index 6264bd99f534fbd800f1f349d93ac69b31c77397..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_php.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/php/query.php}} -``` diff --git a/docs-cn/07-develop/04-query-data/_py.mdx b/docs-cn/07-develop/04-query-data/_py.mdx deleted file mode 100644 index 6a1bacdd3ef91e9484c1d87d6a22de8b128e2144..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_py.mdx +++ /dev/null @@ -1,11 +0,0 @@ -通过迭代逐行获取查询结果。 - -```py -{{#include docs-examples/python/query_example.py:iter}} -``` - -一次获取所有查询结果,并把每一行转化为一个字典返回。 - -```py -{{#include docs-examples/python/query_example.py:fetch_all}} -``` diff --git a/docs-cn/07-develop/04-query-data/_py_async.mdx b/docs-cn/07-develop/04-query-data/_py_async.mdx deleted file mode 100644 index 2399a50df645804788036e17bf223c53482d4eaf..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_py_async.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```py -{{#include docs-examples/python/async_query_example.py}} -``` - -:::note -这个示例程序,目前在 Windows 系统上还无法运行 - -::: diff --git a/docs-cn/07-develop/04-query-data/_rust.mdx b/docs-cn/07-develop/04-query-data/_rust.mdx deleted file mode 100644 index 742d70fd025ff44b573eedf78441c9d73defad45..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/_rust.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/restexample/examples/query_example.rs}} -``` diff --git a/docs-cn/07-develop/04-query-data/index.mdx b/docs-cn/07-develop/04-query-data/index.mdx deleted file mode 100644 index 824f36ef2f98aac227bdcaf2016d7be0a2e59328..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/04-query-data/index.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 查询数据 -description: "主要查询功能,通过连接器执行同步查询和异步查询" ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import JavaQuery from "./_java.mdx"; -import PyQuery from "./_py.mdx"; -import GoQuery from "./_go.mdx"; -import RustQuery from "./_rust.mdx"; -import NodeQuery from "./_js.mdx"; -import CsQuery from "./_cs.mdx"; -import CQuery from "./_c.mdx"; -import PhpQuery from "./_php.mdx"; -import PyAsync from "./_py_async.mdx"; -import NodeAsync from "./_js_async.mdx"; -import CsAsync from "./_cs_async.mdx"; -import CAsync from "./_c_async.mdx"; - -## 主要查询功能 - -TDengine 采用 SQL 作为查询语言。应用程序可以通过 REST API 或连接器发送 SQL 语句,用户还可以通过 TDengine 命令行工具 taos 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: - -- 单列、多列数据查询 -- 标签和数值的多种过滤条件:>, <, =, <\>, like 等 -- 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset) -- 数值列及聚合结果的四则运算 -- 时间戳对齐的连接查询(Join Query: 隐式连接)操作 -- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff 等 - -例如:在命令行工具 taos 中,从表 d1001 中查询出 voltage > 215 的记录,按时间降序排列,仅仅输出 2 条。 - -```sql -taos> select * from d1001 where voltage > 215 order by ts desc limit 2; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | -Query OK, 2 row(s) in set (0.001100s) -``` - -为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine 还支持连续查询。 - -具体的查询语法请看 [TAOS SQL 的数据查询](/taos-sql/select) 章节。 - -## 多表聚合查询 - -物联网场景中,往往同一个类型的数据采集点有多个。TDengine 采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时 TDengine 使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine 提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 - -### 示例一 - -在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 - -``` -taos> SELECT AVG(voltage) FROM meters GROUP BY location; - avg(voltage) | location | -============================================================= - 222.000000000 | California.LosAngeles | - 219.200000000 | California.SanFrancisco | -Query OK, 2 row(s) in set (0.002136s) -``` - -### 示例二 - -在 TAOS shell, 查找 groupId 为 2 的所有智能电表过去 24 小时的记录条数,电流的最大值。 - -``` -taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - cunt(*) | max(current) | -================================== - 5 | 13.4 | -Query OK, 1 row(s) in set (0.002136s) -``` - -TDengine 仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 - -## 降采样查询、插值 - -物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和 - -``` -taos> SELECT sum(current) FROM d1001 INTERVAL(10s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:00.000 | 10.300000191 | - 2018-10-03 14:38:10.000 | 24.900000572 | -Query OK, 2 row(s) in set (0.000883s) -``` - -降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 - -``` -taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.000 | 10.199999809 | - 2018-10-03 14:38:05.000 | 32.900000572 | - 2018-10-03 14:38:06.000 | 11.500000000 | - 2018-10-03 14:38:15.000 | 12.600000381 | - 2018-10-03 14:38:16.000 | 36.000000000 | -Query OK, 5 row(s) in set (0.001538s) -``` - -降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始 - -``` -taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.500 | 11.189999809 | - 2018-10-03 14:38:05.500 | 31.900000572 | - 2018-10-03 14:38:06.500 | 11.600000000 | - 2018-10-03 14:38:15.500 | 12.300000381 | - 2018-10-03 14:38:16.500 | 35.000000000 | -Query OK, 5 row(s) in set (0.001521s) -``` - -物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。 - -如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。 - -语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](/taos-sql/interval) 章节。 - -## 示例代码 - -### 查询数据 - -在 [SQL 写入](/develop/insert-data/sql-writing) 一章,我们创建了 power 数据库,并向 meters 表写入了一些数据,以下示例代码展示如何查询这个表的数据。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - -:::note - -1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。 -2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。 - -::: - -### 异步查询 - -除同步查询 API 之外,TDengine 还提供性能更高的异步调用 API 处理数据插入、查询操作。在软硬件环境相同的情况下,异步 API 处理数据插入的速度比同步 API 快 2-4 倍。异步 API 采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步 API 在网络延迟严重的情况下,优点尤为突出。 - -需要注意的是,只有使用原生连接的连接器,才能使用异步查询功能。 - - - - - - - - - - - - diff --git a/docs-cn/07-develop/07-subscribe.mdx b/docs-cn/07-develop/07-subscribe.mdx deleted file mode 100644 index 0f531e07c9dce7dbb03bacebf8e5cbefae82671f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/07-subscribe.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -sidebar_label: 数据订阅 -description: "轻量级的数据订阅与推送服务。连续写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" -title: 数据订阅 ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - -基于数据天然的时间序列特性,TDengine 的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine 在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine 中每一张表均可视为一个标准的消息队列。 - -TDengine 内嵌支持轻量级的消息订阅与推送服务。使用系统提供的 API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 - -TDengine 的订阅与推送服务的状态是由客户端维持,TDengine 服务端并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 - -TDengine 的 API 中,与订阅相关的主要有以下三个: - -```c -taos_subscribe -taos_consume -taos_unsubscribe -``` - -这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”),完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c) 找到。 - -如果我们希望当某个电表的电流超过一定限制(比如 10A)后能得到通知并进行一些处理, 有两种方法:一是分别对每张子表进行查询,每次查询后记录最后一条数据的时间戳,后续只查询这个时间戳之后的数据: - -```sql -select * from D1001 where ts > {last_timestamp1} and current > 10; -select * from D1002 where ts > {last_timestamp2} and current > 10; -... -``` - -这确实可行,但随着电表数量的增加,查询数量也会增加,客户端和服务端的性能都会受到影响,当电表数增长到一定的程度,系统就无法承受了。 - -另一种方法是对超级表进行查询。这样,无论有多少电表,都只需一次查询: - -```sql -select * from meters where ts > {last_timestamp} and current > 10; -``` - -但是,如何选择 `last_timestamp` 就成了一个新的问题。因为,一方面数据的产生时间(也就是数据时间戳)和数据入库的时间一般并不相同,有时偏差还很大;另一方面,不同电表的数据到达 TDengine 的时间也会有差异。所以,如果我们在查询中使用最慢的那台电表的数据的时间戳作为 `last_timestamp`,就可能重复读入其它电表的数据;如果使用最快的电表的时间戳,其它电表的数据就可能被漏掉。 - -TDengine 的订阅功能为上面这个问题提供了一个彻底的解决方案。 - -首先是使用 `taos_subscribe` 创建订阅: - -```c -TAOS_SUB* tsub = NULL; -if (async) { -  // create an asynchronized subscription, the callback function will be called every 1s -  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); -} else { -  // create an synchronized subscription, need to call 'taos_consume' manually -  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); -} -``` - -TDengine 中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数 `async` 的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用 `taos_consume` 来拉取数据,而异步则由 API 在内部的另一个线程中调用 `taos_consume`,然后把拉取到的数据交给回调函数 `subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。) - -参数 `taos` 是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在 API 的内部线程中被调用,而 TDengine 的部分 API 不是线程安全的。 - -参数 `sql` 是查询语句,可以在其中使用 where 子句指定过滤条件。在我们的例子中,如果只想订阅电流超过 10A 时的数据,可以这样写: - -```sql -select * from meters where current > 10; -``` - -注意,这里没有指定起始时间,所以会读到所有时间的数据。如果只想从一天前的数据开始订阅,而不需要更早的历史数据,可以再加上一个时间条件: - -```sql -select * from meters where ts > now - 1d and current > 10; -``` - -订阅的 `topic` 实际上是它的名字,因为订阅功能是在客户端 API 中实现的,所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。 - -如果名为 `topic` 的订阅不存在,参数 `restart` 没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个 `topic` 时,`restart` 就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果 `restart` 是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且 `restart` 是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。 - -`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用 `taos_consume` 的时间间隔小于此时间,`taos_consume` 会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。 - -`taos_subscribe` 的倒数第二个参数用于用户程序向回调函数传递附加参数,订阅 API 不对其做任何处理,只原样传递给回调函数。此参数在同步模式下无意义。 - -订阅创建以后,就可以消费其数据了,同步模式下,示例代码是下面的 else 部分: - -```c -if (async) { -  getchar(); -} else while(1) { -  TAOS_RES* res = taos_consume(tsub); -  if (res == NULL) { -    printf("failed to consume data."); -    break; -  } else { -    print_result(res, blockFetch); -    getchar(); -  } -} -``` - -这里是一个 **while** 循环,用户每按一次回车键就调用一次 `taos_consume`,而 `taos_consume` 的返回值是查询到的结果集,与 `taos_use_result` 完全相同,例子中使用这个结果集的代码是函数 `print_result`: - -```c -void print_result(TAOS_RES* res, int blockFetch) { -  TAOS_ROW row = NULL; -  int num_fields = taos_num_fields(res); -  TAOS_FIELD* fields = taos_fetch_fields(res); -  int nRows = 0; -  if (blockFetch) { -    nRows = taos_fetch_block(res, &row); -    for (int i = 0; i < nRows; i++) { -      char temp[256]; -      taos_print_row(temp, row + i, fields, num_fields); -      puts(temp); -    } -  } else { -    while ((row = taos_fetch_row(res))) { -      char temp[256]; -      taos_print_row(temp, row, fields, num_fields); -      puts(temp); -      nRows++; -    } -  } -  printf("%d rows consumed.\n", nRows); -} -``` - -其中的 `taos_print_row` 用于处理订阅到数据,在我们的例子中,它会打印出所有符合条件的记录。而异步模式下,消费订阅到的数据则显得更为简单: - -```c -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { -  print_result(res, *(int*)param); -} -``` - -当要结束一次数据订阅时,需要调用 `taos_unsubscribe`: - -```c -taos_unsubscribe(tsub, keep); -``` - -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 - -代码介绍完毕,我们来看一下实际的运行效果。假设: - -- 示例代码已经下载到本地 -- TDengine 也已经在同一台机器上安装好 -- 示例所需的数据库、超级表、子表已经全部创建好 - -则可以在示例代码所在目录执行以下命令来编译并启动示例程序: - -```bash -make -./subscribe -sql='select * from meters where current > 10;' -``` - -示例程序启动后,打开另一个终端窗口,启动 TDengine CLI 向 **D1001** 插入一条电流为 12A 的数据: - -```sql -$ taos -> use test; -> insert into D1001 values(now, 12, 220, 1); -``` - -这时,因为电流超过了 10A,您应该可以看到示例程序将它输出到了屏幕上。您可以继续插入一些数据观察示例程序的输出。 - -## 示例程序 - -下面的示例程序展示是如何使用连接器订阅所有电流超过 10A 的记录。 - -### 准备数据 - -``` -# create database "power" -taos> create database power; -# use "power" as the database in following operations -taos> use power; -# create super table "meters" -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); -# create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("California.SanFrancisco", 2); -taos> create table d1002 using meters tags ("California.LosAngeles", 2); -# insert some rows -taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); -taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# filter out the rows in which current is bigger than 10A -taos> select * from meters where current > 10; - ts | current | voltage | phase | location | groupid | -=========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | -Query OK, 5 row(s) in set (0.004896s) -``` - -### 示例代码 - - - - - - - - - {/* - - */} - - - - {/* - - - - - */} - - - - - -### 运行示例程序 - -示例程序会先消费符合查询条件的所有历史数据: - -```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 -``` - -接着,使用 TDengine CLI 向表中新增一条数据: - -``` -# taos -taos> use power; -taos> insert into d1001 values(now, 12.4, 220, 1); -``` - -因为这条数据的电流大于 10A,示例程序会将其消费: - -``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 -``` diff --git a/docs-cn/07-develop/09-udf.md b/docs-cn/07-develop/09-udf.md deleted file mode 100644 index 09681650db32200e60c0fdb787d3e455dd339d85..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/09-udf.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -sidebar_label: 用户定义函数 -title: UDF(用户定义函数) -description: "支持用户编码的聚合函数和标量函数,在查询中嵌入并使用用户定义函数,拓展查询的能力和功能。" ---- - -在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。 - -从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 - -用户可以通过 UDF 实现两类函数: 标量函数 和 聚合函数。 - -## 用 C/C++ 语言来定义 UDF - -### 标量函数 - -用户可以按照下列函数模板定义自己的标量计算函数 - - `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` - - 其中 udfNormalFunc 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算,其参数项是固定的,用于按照约束完成与引擎之间的数据交换。 - -- udfNormalFunc 中各参数的具体含义是: - - data:输入数据。 - - itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](/reference/rest-api/)。例如 4 用于表示 INT 型。 - - iBytes:输入数据中每个值会占用的字节数。 - - numOfRows:输入数据的总行数。 - - ts:主键时间戳在输入中的列数据(只读)。 - - dataOutput:输出数据的缓冲区,缓冲区大小为用户指定的输出类型大小 \* numOfRows。 - - interBuf:中间计算结果的缓冲区,大小为用户在创建 UDF 时指定的 BUFSIZE 大小。通常用于计算中间结果与最终结果不一致时使用,由引擎负责分配与释放。 - - tsOutput:主键时间戳在输出时的列数据,如果非空可用于输出结果对应的时间戳。 - - numOfOutput:输出结果的个数(行数)。 - - oType:输出数据的类型。取值含义与 itype 参数一致。 - - oBytes:输出数据中每个值占用的字节数。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 - - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现,也即上面定义的 udfNormalFunc 函数的一个具体实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。 - -### 聚合函数 - -用户可以按照如下函数模板定义自己的聚合函数。 - -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` - -其中 udfMergeFunc 是函数名的占位符,以上述模板实现的函数用于对计算中间结果进行聚合,只有针对超级表的聚合查询才需要调用该函数。其中各参数的具体含义是: - - - data:udfNormalFunc 的输出数据数组,如果使用了 interBuf 那么 data 就是 interBuf 的数组。 - - numOfRows:data 中数据的行数。 - - dataOutput:输出数据的缓冲区,大小等于一条最终结果的大小。如果此时输出还不是最终结果,可以选择输出到 interBuf 中即 data 中。 - - numOfOutput:输出结果的个数(行数)。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 - -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。 - -其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`)来生成每个子表的中间结果,再将子表的中间结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成超级表的最终聚合结果或中间结果。聚合查询最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把超级表的中间结果处理为最终结果,最终结果只能含 0 或 1 条结果数据。 - -其他典型场景,如协方差的计算,也可通过定义聚合 UDF 的方式实现。 - -### 最终计算 - -用户可以按下面的函数模板实现自己的函数对计算结果进行最终计算,通常用于有 interBuf 使用的场景。 - -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` - -其中 udfFinalizeFunc 是函数名的占位符 ,其中各参数的具体含义是: - - dataOutput:输出数据的缓冲区。 - - interBuf:中间结算结果缓冲区,可作为输入。 - - numOfOutput:输出数据的个数,对聚合函数来说只能是 0 或者 1。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 - -## UDF 实现方式的规则总结 - -三类 UDF 函数: udfNormalFunc、udfMergeFunc、udfFinalizeFunc ,其函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名,也即 udfNormalFunc 函数不需要在实际函数名后添加后缀;而udfMergeFunc 的函数名要加上后缀 `_merge`、udfFinalizeFunc 的函数名要加上后缀 `_finalize`,这是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。 - -根据 UDF 函数类型的不同,用户所要实现的功能函数也不同: - -- 标量函数:UDF 中需实现 udfNormalFunc。 -- 聚合函数:UDF 中需实现 udfNormalFunc、udfMergeFunc(对超级表查询)、udfFinalizeFunc。 - -:::note -如果对应的函数不需要具体的功能,也需要实现一个空函数。 - -::: - -## 编译 UDF - -用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。 - -例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件: - -```bash -gcc -g -O0 -fPIC -shared add_one.c -o add_one.so -``` - -这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。 - -## 在系统中管理和使用 UDF - -### 创建 UDF - -用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 - -在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外, UDF 支持输入与输出类型不一致,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 - -- 创建标量函数 -```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; -``` - - - ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 - - 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: - - ```sql - CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; - ``` - -- 创建聚合函数: -```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; -``` - - - ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 - - 关于中间计算结果的使用,可以参考示例程序[demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) - - 例如,如下语句可以把 demo.so 创建为系统中可用的 UDF: - - ```sql - CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; - ``` - -### 管理 UDF - -- 删除指定名称的用户定义函数: -``` -DROP FUNCTION ids(X); -``` - -- ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 -```sql -DROP FUNCTION add_one; -``` -- 显示系统中当前可用的所有 UDF: -```sql -SHOW FUNCTIONS; -``` - -### 调用 UDF - -在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: -```sql -SELECT X(c) FROM table/stable; -``` - -表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 - -## UDF 的一些使用限制 - -在当前版本下,使用 UDF 存在如下这些限制: - -1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统; -2. UDF 不能与系统内建的 SQL 函数混合使用,暂不支持在一条 SQL 语句中使用多个不同名的 UDF ; -3. UDF 只支持以单个数据列作为输入; -4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; -5. 无法通过 RESTful 接口来创建 UDF; -6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 - -## 示例代码 - -### 标量函数示例 [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) - -
-add_one.c - -```c -{{#include tests/script/sh/add_one.c}} -``` - -
- -### 向量函数示例 [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) - -
-abs_max.c - -```c -{{#include tests/script/sh/abs_max.c}} -``` - -
- -### 使用中间计算结果示例 [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) - -
-demo.c - -```c -{{#include tests/script/sh/demo.c}} -``` - -
diff --git a/docs-cn/07-develop/_sub_c.mdx b/docs-cn/07-develop/_sub_c.mdx deleted file mode 100644 index 95fef0042d0a277f9136e6e6f8c15558487232f9..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/subscribe_demo.c}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_cs.mdx b/docs-cn/07-develop/_sub_cs.mdx deleted file mode 100644 index 80934aa4d014a076896dce7f41e520f06ffd735d..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_cs.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/SubscribeDemo.cs}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_go.mdx b/docs-cn/07-develop/_sub_go.mdx deleted file mode 100644 index cd908fc12c3a35f49ca108ee56c3951c5388a95f..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_go.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/sub/main.go}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_java.mdx b/docs-cn/07-develop/_sub_java.mdx deleted file mode 100644 index 1ee0cb1a21e35f6760f8680e2ba6dedee92201cd..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_java.mdx +++ /dev/null @@ -1,7 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} -``` -:::note -目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 - -::: \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_node.mdx b/docs-cn/07-develop/_sub_node.mdx deleted file mode 100644 index c93ad627ce9a77ca71a014b41d571089e6c1727b..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_node.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/subscribe_demo.js}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_python.mdx b/docs-cn/07-develop/_sub_python.mdx deleted file mode 100644 index b817deeba6e283a3ba16fee0d580d3823c999536..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/subscribe_demo.py}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/_sub_rust.mdx b/docs-cn/07-develop/_sub_rust.mdx deleted file mode 100644 index 4750cf7a3b871db48c9e5a26b22ab4b8a03f11be..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/_sub_rust.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rs -{{#include docs-examples/rust/nativeexample/examples/subscribe_demo.rs}} -``` \ No newline at end of file diff --git a/docs-cn/07-develop/index.md b/docs-cn/07-develop/index.md deleted file mode 100644 index 0393a87ab2ae7a5b08eea75d7a0bea95614b8131..0000000000000000000000000000000000000000 --- a/docs-cn/07-develop/index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 开发指南 ---- - -开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做: -1. 确定应用到TDengine的链接方式。无论你使用何种编程语言,你总可以使用REST接口, 但也可以使用每种编程语言独有的连接器方便的进行链接。 -2. 根据自己的应用场景,确定数据模型。根据数据特征,决定建立一个还是多个库;分清静态标签、采集量,建立正确的超级表,建立子表。 -3. 决定插入数据的方式。TDengine支持使用标准的SQL写入,但同时也支持schemaless模式写入,这样不用手工建表,可以将数据直接写入。 -4. 根据业务要求,看需要撰写哪些SQL查询语句。 -5. 如果你要基于时序数据做实时的统计分析,包括各种监测看板,那么建议你采用TDengine的连续查询功能,而不用上线Spark, Flink等复杂的流式计算系统。 -6. 如果你的应用有模块需要消费插入的数据,希望有新的数据插入时,就能获取通知,那么建议你采用TDengine提供的数据订阅功能,而无需专门部署Kafka或其他消息队列软件。 -7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。 -8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。 - -本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](/reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。 - -如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。 - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - - -``` diff --git a/docs-cn/12-taos-sql/02-database.md b/docs-cn/12-taos-sql/02-database.md deleted file mode 100644 index 566fec324148fede8d897869656b83e657569f59..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/02-database.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -sidebar_label: 数据库管理 -title: 数据库管理 -description: "创建、删除数据库,查看、修改数据库参数" ---- - -## 创建数据库 - -``` -CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; -``` - -:::info -1. KEEP 是该数据库的数据保留多长天数,缺省是 3650 天(10 年),数据库会自动删除超过时限的数据; -2. UPDATE 标志数据库支持更新相同时间戳数据;(从 2.1.7.0 版本开始此参数支持设为 2,表示允许部分列更新,也即更新数据行时未被设置的列会保留原值。)(从 2.0.8.0 版本开始支持此参数。注意此参数不能通过 `ALTER DATABASE` 指令进行修改。) - 1. UPDATE 设为 0 时,表示不允许更新数据,后发送的相同时间戳的数据会被直接丢弃; - 2. UPDATE 设为 1 时,表示更新全部列数据,即如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL; - 3. UPDATE 设为 2 时,表示支持更新部分列数据,即如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值; - 4. 更多关于 UPDATE 参数的用法,请参考[FAQ](/train-faq/faq)。 -3. 数据库名最大长度为 33; -4. 一条 SQL 语句的最大长度为 65480 个字符; -5. 创建数据库时可用的参数有: - - cache: [详细说明](/reference/config/#cache) - - blocks: [详细说明](/reference/config/#blocks) - - days: [详细说明](/reference/config/#days) - - keep: [详细说明](/reference/config/#keep) - - minRows: [详细说明](/reference/config/#minrows) - - maxRows: [详细说明](/reference/config/#maxrows) - - wal: [详细说明](/reference/config/#wallevel) - - fsync: [详细说明](/reference/config/#fsync) - - update: [详细说明](/reference/config/#update) - - cacheLast: [详细说明](/reference/config/#cachelast) - - replica: [详细说明](/reference/config/#replica) - - quorum: [详细说明](/reference/config/#quorum) - - maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb) - - comp: [详细说明](/reference/config/#comp) - - precision: [详细说明](/reference/config/#precision) -6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 - -::: - -### 创建数据库示例 - -创建时间精度为纳秒的数据库, 保留 1 年数据: - -```sql -CREATE DATABASE test PRECISION 'ns' KEEP 365; -``` - -## 显示系统当前参数 - -``` -SHOW VARIABLES; -``` - -## 使用数据库 - -``` -USE db_name; -``` - -使用/切换数据库(在 REST 连接方式下无效)。 - -## 删除数据库 - -``` -DROP DATABASE [IF EXISTS] db_name; -``` - -删除数据库。指定 Database 所包含的全部数据表将被删除,谨慎使用! - -## 修改数据库参数 - -``` -ALTER DATABASE db_name COMP 2; -``` - -COMP 参数是指修改数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。0 表示不压缩,1 表示一阶段压缩,2 表示两阶段压缩。 - -``` -ALTER DATABASE db_name REPLICA 2; -``` - -REPLICA 参数是指修改数据库副本数,取值范围 [1, 3]。在集群中使用,副本数必须小于或等于 DNODE 的数目。 - -``` -ALTER DATABASE db_name KEEP 365; -``` - -KEEP 参数是指修改数据文件保存的天数,缺省值为 3650,取值范围 [days, 365000],必须大于或等于 days 参数值。 - -``` -ALTER DATABASE db_name QUORUM 2; -``` - -QUORUM 参数是指数据写入成功所需要的确认数,取值范围 [1, 2]。对于异步复制,quorum 设为 1,具有 master 角色的虚拟节点自己确认即可。对于同步复制,quorum 设为 2。原则上,Quorum >= 1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 - -``` -ALTER DATABASE db_name BLOCKS 100; -``` - -BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache \* blocks)。取值范围 [3, 1000]。 - -``` -ALTER DATABASE db_name CACHELAST 0; -``` - -CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。) -说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。 - -:::tip -以上所有参数修改后都可以用 show databases 来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。 -::: - -## 显示系统所有数据库 - -``` -SHOW DATABASES; -``` - -## 显示一个数据库的创建语句 - -``` -SHOW CREATE DATABASE db_name; -``` - -常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。 - diff --git a/docs-cn/12-taos-sql/08-interval.md b/docs-cn/12-taos-sql/08-interval.md deleted file mode 100644 index b0619ea5ce3759e9bca1234b76e2a16176511547..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/08-interval.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -sidebar_label: 按窗口切分聚合 -title: 按窗口切分聚合 ---- - - -TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。 -窗口子句用于针对查询的数据集合进行按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。 - -## 时间窗口 - -INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 - -![TDengine Database 时间窗口示意图](./timewindow-1.webp) - -INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法: - -``` -SELECT * FROM temp_tb_1 INTERVAL(1m); -``` - -SLIDING 的向前滑动的时间不能超过一个窗口的时间范围。以下语句非法: - -``` -SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); -``` - -当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。 -_ 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。 -_ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。 \* **注意**:用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。 - -## 状态窗口 - -使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) - -![TDengine Database 时间窗口示意图](./timewindow-3.webp) - -使用 STATE_WINDOW 来确定状态窗口划分的列。例如: - -``` -SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); -``` - -## 会话窗口 - -会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。 - -![TDengine Database 时间窗口示意图](./timewindow-2.webp) - -在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) - -``` - -SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); -``` - -这种类型的查询语法如下: - -``` -SELECT function_list FROM tb_name - [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - -SELECT function_list FROM stb_name - [WHERE where_condition] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - [GROUP BY tags] -``` - -- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。 -- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。 -- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。 -- - -- WHERE 语句可以指定查询的起止时间和其他过滤条件。 -- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: - 1. 不进行填充:NONE(默认填充模式)。 - 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。 - 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。 - 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。 - 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。 - 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。 - -:::info - -1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。 -2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。 -3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。 - -::: - -时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](/develop/continuous-query)。 - -## 示例 - -智能电表的建表语句如下: - -``` -CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -``` - -针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下: - -``` -SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters - WHERE ts>=NOW-1d and ts<=now - INTERVAL(10m) - FILL(PREV); -``` diff --git a/docs-cn/12-taos-sql/10-json.md b/docs-cn/12-taos-sql/10-json.md deleted file mode 100644 index 4a4a8cca732ac433ba5ada1ec3805ebfa663edb3..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/10-json.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -sidebar_label: JSON 类型使用说明 -title: JSON 类型使用说明 ---- - - -## 语法说明 - -1. 创建 json 类型 tag - - ``` - create stable s1 (ts timestamp, v1 int) tags (info json) - - create table s1_1 using s1 tags ('{"k1": "v1"}') - ``` - -2. json 取值操作符 -> - - ``` - select * from s1 where info->'k1' = 'v1' - - select info->'k1' from s1 - ``` - -3. json key 是否存在操作符 contains - - ``` - select * from s1 where info contains 'k2' - - select * from s1 where info contains 'k1' - ``` - -## 支持的操作 - -1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is no null,不支持 in - - ``` - select * from s1 where info->'k1' match 'v*'; - - select * from s1 where info->'k1' like 'v%' and info contains 'k2'; - - select * from s1 where info is null; - - select * from s1 where info->'k1' is not null - ``` - -2. 支持 json tag 放在 group by、order by、join 子句、union all 以及子查询中,比如 group by json->'key' - -3. 支持 distinct 操作. - - ``` - select distinct info->'k1' from s1 - ``` - -4. 标签操作 - - 支持修改 json 标签值(全量覆盖) - - 支持修改 json 标签名 - - 不支持添加 json 标签、删除 json 标签、修改 json 标签列宽 - -## 其他约束条件 - -1. 只有标签列可以使用 json 类型,如果用 json 标签,标签列只能有一个。 - -2. 长度限制:json 中 key 的长度不能超过 256,并且 key 必须为可打印 ascii 字符;json 字符串总长度不超过 4096 个字节。 - -3. json 格式限制: - - 1. json 输入字符串可以为空("","\t"," "或 null)或 object,不能为非空的字符串,布尔型和数组。 - 2. object 可为{},如果 object 为{},则整个 json 串记为空。key 可为"",若 key 为"",则 json 串中忽略该 k-v 对。 - 3. value 可以为数字(int/double)或字符串或 bool 或 null,暂不可以为数组。不允许嵌套。 - 4. 若 json 字符串中出现两个相同的 key,则第一个生效。 - 5. json 字符串里暂不支持转义。 - -4. 当查询 json 中不存在的 key 时,返回 NULL - -5. 当 json tag 作为子查询结果时,不再支持上层查询继续对子查询中的 json 串做解析查询。 - - 比如暂不支持 - - ``` - select jtag->'key' from (select jtag from stable) - ``` - - 不支持 - - ``` - select jtag->'key' from (select jtag from stable) where jtag->'key'>0 - ``` diff --git a/docs-cn/12-taos-sql/11-escape.md b/docs-cn/12-taos-sql/11-escape.md deleted file mode 100644 index 756e5c81591e7414827fdc65e228cfafc96214ad..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/11-escape.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 转义字符说明 ---- - -## 转义字符表 - -| 字符序列 | **代表的字符** | -| :------: | -------------- | -| `\'` | 单引号' | -| `\"` | 双引号" | -| \n | 换行符 | -| \r | 回车符 | -| \t | tab 符 | -| `\\` | 斜杠\ | -| `\%` | % 规则见下 | -| `\_` | \_ 规则见下 | - -:::note -转义符的功能从 2.4.0.4 版本开始 - -::: - -## 转义字符使用规则 - -1. 标识符里有转义字符(数据库名、表名、列名) - 1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。 - 2. 反引号``标识符: 保持原样,不转义 -2. 数据里有转义字符 - 1. 遇到上面定义的转义字符会转义(%和\_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 - 2. 对于%和\_,因为在 like 里这两个字符是通配符,所以在模式匹配 like 里用`\%`%和`\_`表示字符里本身的%和\_,如果在 like 模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和\_。 diff --git a/docs-cn/12-taos-sql/12-keywords/_category_.yml b/docs-cn/12-taos-sql/12-keywords/_category_.yml deleted file mode 100644 index 67738650a4564477f017542aea81767b3de72922..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/12-keywords/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 参数限制与保留关键字 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords/index.md deleted file mode 100644 index 0b9ec4de862fc6b6ade11e733a0f7b169a79a324..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/12-keywords/index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -sidebar_label: 参数限制与保留关键字 -title: TDengine 参数限制与保留关键字 ---- - -## 名称命名规则 - -1. 合法字符:英文字符、数字和下划线 -2. 允许英文字符或下划线开头,不允许以数字开头 -3. 不区分大小写 -4. 转义后表(列)名规则: - 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 - 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 - - 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 - 需要注意的是转义字符中的内容必须是可打印字符。 - 支持转义符的功能从 2.3.0.1 版本开始。 - -## 密码合法字符集 - -`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` - -去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) - -- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB -- 表的列名:不能包含特殊字符,不能超过 64 个字节 -- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” -- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) -- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) -- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 -- 数据库副本数:不能超过 3 -- 用户名:不能超过 23 个 字节 -- 用户密码:不能超过 15 个 字节 -- 标签(Tags)数量:不能超过 128 个,可以 0 个 -- 标签的总长度:不能超过 16KB -- 记录条数:仅受存储空间限制 -- 表的个数:仅受节点个数限制 -- 库的个数:仅受节点个数限制 -- 单个库上虚拟节点个数:不能超过 64 个 -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 -- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - -## 保留关键字 - -目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: - -| 关键字列表 | | | | | -| ----------- | ---------- | --------- | ---------- | ------------ | -| ABORT | CREATE | IGNORE | NULL | STAR | -| ACCOUNT | CTIME | IMMEDIATE | OF | STATE | -| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT | -| ADD | DATABASES | IN | OR | STATE_WINDOW | -| AFTER | DAYS | INITIALLY | ORDER | STORAGE | -| ALL | DBS | INSERT | PARTITIONS | STREAM | -| ALTER | DEFERRED | INSTEAD | PASS | STREAMS | -| AND | DELIMITERS | INT | PLUS | STRING | -| AS | DESC | INTEGER | PPS | SYNCDB | -| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE | -| ATTACH | DETACH | INTO | PREV | TABLES | -| BEFORE | DISTINCT | IS | PRIVILEGE | TAG | -| BEGIN | DIVIDE | ISNULL | QTIME | TAGS | -| BETWEEN | DNODE | JOIN | QUERIES | TBNAME | -| BIGINT | DNODES | KEEP | QUERY | TIMES | -| BINARY | DOT | KEY | QUORUM | TIMESTAMP | -| BITAND | DOUBLE | KILL | RAISE | TINYINT | -| BITNOT | DROP | LE | REM | TOPIC | -| BITOR | EACH | LIKE | REPLACE | TOPICS | -| BLOCKS | END | LIMIT | REPLICA | TRIGGER | -| BOOL | EQ | LINEAR | RESET | TSERIES | -| BY | EXISTS | LOCAL | RESTRICT | UMINUS | -| CACHE | EXPLAIN | LP | ROW | UNION | -| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED | -| CASCADE | FILE | LT | RSHIFT | UPDATE | -| CHANGE | FILL | MATCH | SCORES | UPLUS | -| CLUSTER | FLOAT | MAXROWS | SELECT | USE | -| COLON | FOR | MINROWS | SEMI | USER | -| COLUMN | FROM | MINUS | SESSION | USERS | -| COMMA | FSYNC | MNODES | SET | USING | -| COMP | GE | MODIFY | SHOW | VALUES | -| COMPACT | GLOB | MODULES | SLASH | VARIABLE | -| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES | -| CONFLICT | GROUP | NE | SLIMIT | VGROUPS | -| CONNECTION | GT | NONE | SMALLINT | VIEW | -| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | -| CONNS | ID | NOTNULL | STABLE | WAL | -| COPY | IF | NOW | STABLES | WHERE | -| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | -| _WSTOP | _WDURATION | - -## 特殊说明 -### TBNAME -`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。 - -获取一个超级表所有的子表名及相关的标签信息: -```mysql -SELECT TBNAME, location FROM meters; - -统计超级表下辖子表数量: -```mysql -SELECT COUNT(TBNAME) FROM meters; -``` - -以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如: -```mysql -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.SanFrancisco | - d1003 | California.SanFrancisco | - d1002 | California.LosAngeles | - d1001 | California.LosAngeles | -Query OK, 4 row(s) in set (0.000881s) - -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) -``` -### _QSTART/_QSTOP/_QDURATION -表示查询过滤窗口的起始,结束以及持续时间 (从2.6.0.0版本开始支持) - -### _WSTART/_WSTOP/_WDURATION -窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间(从 2.6.0.0 版本开始支持) - -### _c0 -表示表或超级表的第一列 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md deleted file mode 100644 index cb01b3a918778abc6c7891c1ff185f1db32d3d36..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: TAOS SQL -description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容" ---- - -本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。 - -TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 - -本章节 SQL 语法遵循如下约定: - -- <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 -- \[ \] 表示内容为可选项,但不能输入 [] 本身 -- | 表示多选一,选择其中一个即可,但不能输入 | 本身 -- … 表示前面的项可重复多个 - -为更好地说明 SQL 语法的规则及其特点,本文假设存在一个数据集。以智能电表(meters)为例,假设每个智能电表采集电流、电压、相位三个量。其建模如下: - -``` -taos> DESCRIBE meters; - Field | Type | Length | Note | -================================================================================= - ts | TIMESTAMP | 8 | | - current | FLOAT | 4 | | - voltage | INT | 4 | | - phase | FLOAT | 4 | | - location | BINARY | 64 | TAG | - groupid | INT | 4 | TAG | -``` - -数据集包含 4 个智能电表的数据,按照 TDengine 的建模规则,对应 4 个子表,其名称分别是 d1001, d1002, d1003, d1004。 - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - - -``` diff --git a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx b/docs-cn/14-reference/02-rest-api/02-rest-api.mdx deleted file mode 100644 index 43099319b9c5bb1420c199cfa9f7def0b2c44d3d..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: REST API ---- - -为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 - -:::note -与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。从 2.2.0.0 版本开始,支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认由 taosAdapter 提供,要求必须在 URL 中指定 db_name。 -::: - -## 安装 - -RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。 - -## 验证 - -在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。 - -下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。 - -下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: - -```html -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql -``` - -返回值结果如下表示验证通过: - -```json -{ - "status": "succ", - "head": [ - "name", - "created_time", - "ntables", - "vgroups", - "replica", - "quorum", - "days", - "keep1,keep2,keep(D)", - "cache(MB)", - "blocks", - "minrows", - "maxrows", - "wallevel", - "fsync", - "comp", - "precision", - "status" - ], - "data": [ - [ - "log", - "2020-09-02 17:23:00.039", - 4, - 1, - 1, - 1, - 10, - "30,30,30", - 1, - 3, - 100, - 4096, - 1, - 3000, - 2, - "us", - "ready" - ] - ], - "rows": 1 -} -``` - -## HTTP 请求格式 - -``` -http://:/rest/sql/[db_name] -``` - -参数说明: - -- fqnd: 集群中的任一台主机 FQDN 或 IP 地址 -- port: 配置文件中 httpPort 配置项,缺省为 6041 -- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) - -例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 - -HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 - -- 自定义身份认证信息如下所示(token 稍后介绍) - - ``` - Authorization: Taosd - ``` - -- Basic 身份认证信息如下所示 - - ``` - Authorization: Basic - ``` - -HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据表应提供数据库前缀,例如 db_name.tb_name。如果表名不带数据库前缀,又没有在 URL 中指定数据库名的话,系统会返回错误。因为 HTTP 模块只是一个简单的转发,没有当前 DB 的概念。 - -使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: - -```bash -curl -H 'Authorization: Basic ' -d '' :/rest/sql/[db_name] -``` - -或者 - -```bash -curl -u username:password -d '' :/rest/sql/[db_name] -``` - -其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==` - -## HTTP 返回格式 - -返回值为 JSON 格式,如下: - -```json -{ - "status": "succ", - "head": ["ts","current", …], - "column_meta": [["ts",9,8],["current",6,4], …], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, …], - ["2018-10-03 14:38:15.000", 12.6, …] - ], - "rows": 2 -} -``` - -说明: - -- status: 告知操作结果是成功还是失败。 -- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在后续版本中,有可能会从返回值中去掉 head 这一项。) -- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。 -- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 -- rows: 表明总共多少行数据。 - -column_meta 中的列类型说明: - -- 1:BOOL -- 2:TINYINT -- 3:SMALLINT -- 4:INT -- 5:BIGINT -- 6:FLOAT -- 7:DOUBLE -- 8:BINARY -- 9:TIMESTAMP -- 10:NCHAR - -## 自定义授权码 - -HTTP 请求中需要带有授权码 ``,用于身份识别。授权码通常由管理员提供,可简单的通过发送 `HTTP GET` 请求来获取授权码,操作如下: - -```bash -curl http://:/rest/login// -``` - -其中,`fqdn` 是 TDengine 数据库的 FQDN 或 IP 地址,`port` 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 JSON 格式,各字段含义如下: - -- status:请求结果的标志位 - -- code:返回值代码 - -- desc:授权码 - -获取授权码示例: - -```bash -curl http://192.168.0.1:6041/rest/login/root/taosdata -``` - -返回值: - -```json -{ - "status": "succ", - "code": 0, - "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -} -``` - -## 使用示例 - -- 在 demo 库里查询表 d1001 的所有记录: - - ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql - ``` - - 返回值: - - ```json - { - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], - ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] - ], - "rows": 2 - } - ``` - -- 创建库 demo: - - ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql - ``` - - 返回值: - - ```json - { - "status": "succ", - "head": ["affected_rows"], - "column_meta": [["affected_rows", 4, 4]], - "data": [[1]], - "rows": 1 - } - ``` - -## 其他用法 - -### 结果集采用 Unix 时间戳 - -HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 - -```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt -``` - -返回结果: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - [1538548685000, 10.3, 219, 0.31], - [1538548695000, 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -### 结果集采用 UTC 时间字符串 - -HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 - -```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc -``` - -返回值: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], - ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -## 重要配置项 - -下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。 - -- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。 -- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。 -- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。 -- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。 -- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息。 -- httpDbNameMandatory: 是否必须在 RESTful URL 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful URL 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。 - -:::note -如果使用 taosd 提供的 REST API, 那么以上配置需要写在 taosd 的配置文件 taos.cfg 中。如果使用 taosAdapter 提供的 REST API, 那么需要参考 taosAdapter [对应的配置方法](/reference/taosadapter/)。 -::: diff --git a/docs-cn/14-reference/03-connector/_verify_linux.mdx b/docs-cn/14-reference/03-connector/_verify_linux.mdx deleted file mode 100644 index af543108f88e1255ba700b8b70dbe838eb32f93d..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/_verify_linux.mdx +++ /dev/null @@ -1,14 +0,0 @@ -在 Linux shell 下直接执行 `taos` 连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下: - -```text -$ taos -Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 -Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. -taos> show databases; -name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | -========================================================================================================================================================================================================================= -test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | -log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | -Query OK, 2 row(s) in set (0.001198s) -taos> -``` diff --git a/docs-cn/14-reference/03-connector/_verify_windows.mdx b/docs-cn/14-reference/03-connector/_verify_windows.mdx deleted file mode 100644 index 19ac71ec310ea18eb8dacde6dfa34f3bcd57f560..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/_verify_windows.mdx +++ /dev/null @@ -1,14 +0,0 @@ -在 cmd 下进入到 C:\TDengine 目录下直接执行 `taos.exe`,连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下: - -```text - C:\TDengine>taos - Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 - Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | - =================================================================================================================================================================================================================================================================== - test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | - log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | - Query OK, 2 row(s) in set (0.045000s) - taos> -``` diff --git a/docs-cn/14-reference/03-connector/cpp.mdx b/docs-cn/14-reference/03-connector/cpp.mdx deleted file mode 100644 index aba1d6c717dfec9228f38e89f90cbf1be0021045..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/cpp.mdx +++ /dev/null @@ -1,451 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: C/C++ -title: C/C++ Connector ---- - -C/C++ 开发人员可以使用 TDengine 的客户端驱动,即 C/C++连接器 (以下都用 TDengine 客户端驱动表示),开发自己的应用来连接 TDengine 集群完成数据存储、查询以及其他功能。TDengine 客户端驱动的 API 类似于 MySQL 的 C API。应用程序使用时,需要包含 TDengine 头文件 _taos.h_,里面列出了提供的 API 的函数原型;应用程序还要链接到所在平台上对应的动态库。 - -```c -#include -``` - -TDengine 服务端或客户端安装后,`taos.h` 位于: - -- Linux:`/usr/local/taos/include` -- Windows:`C:\TDengine\include` - -TDengine 客户端驱动的动态库位于: - -- Linux: `/usr/local/taos/driver/libtaos.so` -- Windows: `C:\TDengine\taos.dll` - -## 支持的平台 - -请参考[支持的平台列表](/reference/connector#支持的平台) - -## 支持的版本 - -TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一对应的强对应关系,建议使用与 TDengine 服务端完全相同的客户端驱动。虽然低版本的客户端驱动在前三段版本号一致(即仅第四段版本号不同)的情况下也能够与高版本的服务端相兼容,但这并非推荐用法。强烈不建议使用高版本的客户端驱动访问低版本的服务端。 - -## 安装步骤 - -TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤) - -## 建立连接 - -使用客户端驱动访问 TDengine 集群的基本过程为:建立连接、查询和写入、关闭连接、清除资源。 - -下面为建立连接的示例代码,其中省略了查询和写入部分,展示了如何建立连接、关闭连接以及清除资源。 - -```c - TAOS *taos = taos_connect("localhost:6030", "root", "taosdata", NULL, 0); - if (taos == NULL) { - printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/); - exit(1); - } - - /* put your code here for read and write */ - - taos_close(taos); - taos_cleanup(); -``` - -在上面的示例代码中, `taos_connect()` 建立到客户端程序所在主机的 6030 端口的连接,`taos_close()`关闭当前连接,`taos_cleanup()`清除客户端驱动所申请和使用的资源。 - -:::note - -- 如未特别说明,当 API 的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 -- 所有的错误码以及对应的原因描述在 `taoserror.h` 文件中。 - -::: - -## 示例程序 - -本节展示了使用客户端驱动访问 TDengine 集群的常见访问方式的示例代码。 - -### 同步查询示例 - -
-同步查询 - -```c -{{#include examples/c/demo.c}} -``` - -
- -### 异步查询示例 - -
-异步查询 - -```c -{{#include examples/c/asyncdemo.c}} -``` - -
- -### 参数绑定示例 - -
-参数绑定 - -```c -{{#include examples/c/prepare.c}} -``` - -
- -### 无模式写入示例 - -
-无模式写入 - -```c -{{#include examples/c/schemaless.c}} -``` - -
- -### 订阅和消费示例 - -
-订阅和消费 - -```c -{{#include examples/c/subscribe.c}} -``` - -
- -:::info -更多示例代码及下载请见 [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c)。 -也可以在安装目录下的 `examples/c` 路径下找到。 该目录下有 makefile,在 Linux 环境下,直接执行 make 就可以编译得到执行文件。 -**提示:**在 ARM 环境下编译时,请将 makefile 中的 `-msse4.2` 去掉,这个选项只有在 x64/x86 硬件平台上才能支持。 - -::: - -## API 参考 - -以下分别介绍 TDengine 客户端驱动的基础 API、同步 API、异步 API、订阅 API 和无模式写入 API。 - -### 基础 API - -基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。 - -- `void taos_init()` - - 初始化运行环境。如果没有主动调用该 API,那么调用 `taos_connect()` 时驱动将自动调用该 API,故程序一般无需手动调用。 - -- `void taos_cleanup()` - - 清理运行环境,应用退出前应调用。 - -- `int taos_options(TSDB_OPTION option, const void * arg, ...)` - - 设置客户端选项,目前支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。 - -- `char *taos_get_client_info()` - - 获取客户端版本信息。 - -- `TAOS *taos_connect(const char *host, const char *user, const char *pass, const char *db, int port)` - - 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: - - - host:TDengine 集群中任一节点的 FQDN - - user:用户名 - - pass:密码 - - db: 数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 - - port:taosd 程序监听的端口 - - 返回值为空表示失败。应用程序需要保存返回的参数,以便后续使用。 - - :::info - 同一进程可以根据不同的 host/port 连接多个 TDengine 集群 - - ::: - -- `char *taos_get_server_info(TAOS *taos)` - - 获取服务端版本信息。 - -- `int taos_select_db(TAOS *taos, const char *db)` - - 将当前的缺省数据库设置为 `db`。 - -- `void taos_close(TAOS *taos)` - - 关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。 - -### 同步查询 API - -本小节介绍 API 均属于同步接口。应用调用后,会阻塞等待响应,直到获得返回结果或错误信息。 - -- `TAOS_RES* taos_query(TAOS *taos, const char *sql)` - - 执行 SQL 语句,可以是 DQL、DML 或 DDL 语句。 其中的 `taos` 参数是通过 `taos_connect()` 获得的句柄。不能通过返回值是否是 `NULL` 来判断执行结果是否失败,而是需要用 `taos_errno()` 函数解析结果集中的错误代码来进行判断。 - -- `int taos_result_precision(TAOS_RES *res)` - - 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。 - -- `TAOS_ROW taos_fetch_row(TAOS_RES *res)` - - 按行获取查询结果集中的数据。 - -- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)` - - 批量获取查询结果集中的数据,返回值为获取到的数据的行数。 - -- `int taos_num_fields(TAOS_RES *res)` 和 `int taos_field_count(TAOS_RES *res)` - - 这两个 API 等价,用于获取查询结果集中的列数。 - -- `int* taos_fetch_lengths(TAOS_RES *res)` - - 获取结果集中每个字段的长度。返回值是一个数组,其长度为结果集的列数。 - -- `int taos_affected_rows(TAOS_RES *res)` - - 获取被所执行的 SQL 语句影响的行数。 - -- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` - - 获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fileds()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下: - -```c -typedef struct taosField { - char name[65]; // column name - uint8_t type; // data type - int16_t bytes; // length, in bytes -} TAOS_FIELD; -``` - -- `void taos_stop_query(TAOS_RES *res)` - - 停止当前查询的执行。 - -- `void taos_free_result(TAOS_RES *res)` - - 释放查询结果集以及相关的资源。查询完成后,务必调用该 API 释放资源,否则可能导致应用内存泄露。但也需注意,释放资源后,如果再调用 `taos_consume()` 等获取查询结果的函数,将导致应用崩溃。 - -- `char *taos_errstr(TAOS_RES *res)` - - 获取最近一次 API 调用失败的原因,返回值为字符串标识的错误提示信息。 - -- `int taos_errno(TAOS_RES *res)` - - 获取最近一次 API 调用失败的原因,返回值为错误代码。 - -:::note -2.0 及以上版本 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池。而不推荐在应用中将该连接 (TAOS\*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性,但 “USE statement” 等状态量有可能在线程之间相互干扰。此外,C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 `taos_close()` 关闭连接。 - -::: - -### 异步查询 API - -TDengine 还提供性能更高的异步 API 处理数据插入、查询操作。在软硬件环境相同的情况下,异步 API 处理数据插入的速度比同步 API 快 2 ~ 4 倍。异步 API 采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步 API 在网络延迟严重的情况下,优势尤为突出。 - -异步 API 都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的 API 而定。第一个参数 param 是应用调用异步 API 时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是 SQL 操作的结果集,如果为空,比如 insert 操作,表示没有记录返回,如果不为空,比如 select 操作,表示有记录返回。 - -异步 API 对于使用者的要求相对较高,用户可根据具体应用场景选择性使用。下面是两个重要的异步 API: - -- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);` - - 异步执行 SQL 语句。 - - - taos:调用 `taos_connect()` 返回的数据库连接 - - sql:需要执行的 SQL 语句 - - fp:用户定义的回调函数,其第三个参数 `code` 用于指示操作是否成功,`0` 表示成功,负数表示失败(调用 `taos_errstr()` 可获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数 `TAOS_RES *`,该参数是查询返回的结果集 - - param:应用提供一个用于回调的参数 - -- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);` - - 批量获取异步查询的结果集,只能与 `taos_query_a()` 配合使用。其中: - - - res:`taos_query_a()` 回调时返回的结果集 - - fp:回调函数。其参数 `param` 是用户可定义的传递给回调函数的参数结构体;`numOfRows` 是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用 `taos_fetch_row()` 前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用 `taos_fetch_rows_a()` 获取下一批记录进行处理,直到返回的记录数 `numOfRows` 为零(结果返回完成)或记录数为负值(查询出错)。 - -TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 - -### 参数绑定 API - -除了直接调用 `taos_query()` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,风格与 MySQL 类似,目前也仅支持用问号 `?` 来代表待绑定的参数。 - -从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下: - -1. 调用 `taos_stmt_init()` 创建参数绑定对象; -2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句; -3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname()` 来设置表名; -4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值; -5. 调用 `taos_stmt_bind_param_batch()` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值; -6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理; -7. 可以重复第 3 ~ 6 步,为批处理加入更多的数据行; -8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令; -9. 执行完毕,调用 `taos_stmt_close()` 释放所有资源。 - -说明:如果 `taos_stmt_execute()` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare()` 的解析结果,直接进行第 3 ~ 6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init()` 步骤重新开始。 - -接口相关的具体函数如下(也可以参考 [prepare.c](https://github.com/taosdata/TDengine/blob/develop/examples/c/prepare.c) 文件中使用对应函数的方式): - -- `TAOS_STMT* taos_stmt_init(TAOS *taos)` - - 创建一个 TAOS_STMT 对象用于后续调用。 - -- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` - - 解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。 - -- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` - - 不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。 - 进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下: - - ```c - typedef struct TAOS_BIND { - int buffer_type; - void * buffer; - uintptr_t buffer_length; // not in use - uintptr_t * length; - int * is_null; - int is_unsigned; // not in use - int * error; // not in use - } TAOS_BIND; - ``` - -- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)` - - (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) - 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 - -- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` - - (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) - 当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。 - -- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)` - - (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) - 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下: - - ```c - typedef struct TAOS_MULTI_BIND { - int buffer_type; - void * buffer; - uintptr_t buffer_length; - uintptr_t * length; - char * is_null; - int num; // the number of columns - } TAOS_MULTI_BIND; - ``` - -- `int taos_stmt_add_batch(TAOS_STMT *stmt)` - - 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。 - -- `int taos_stmt_execute(TAOS_STMT *stmt)` - - 执行准备好的语句。目前,一条语句只能执行一次。 - -- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` - - 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。 - -- `int taos_stmt_close(TAOS_STMT *stmt)` - - 执行完毕,释放所有资源。 - -- `char * taos_stmt_errstr(TAOS_STMT *stmt)` - - (2.1.3.0 版本新增) - 用于在其他 STMT API 返回错误(返回错误码或空指针)时获取错误信息。 - -### 无模式(schemaless)写入 API - -除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 - -- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - - **功能说明** - 该接口将行协议的文本数据写入到 TDengine 中。 - - **参数说明** - taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 - lines:文本数据。满足解析格式要求的无模式文本字符串。 - numLines:文本数据的行数,不能为 0 。 - protocol: 行协议类型,用于标识文本数据格式。 - precision:文本数据中的时间戳精度字符串。 - - **返回值** - TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 - 在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。 - 返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。 - - **说明** - 协议类型是枚举类型,包含以下三种格式: - - - TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol) - - TSDB_SML_TELNET_PROTOCOL: OpenTSDB Telnet 文本行协议 - - TSDB_SML_JSON_PROTOCOL: OpenTSDB Json 协议格式 - - 时间戳分辨率的定义,定义在 `taos.h` 文件中,具体内容如下: - - - TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0, - - TSDB_SML_TIMESTAMP_HOURS, - - TSDB_SML_TIMESTAMP_MINUTES, - - TSDB_SML_TIMESTAMP_SECONDS, - - TSDB_SML_TIMESTAMP_MILLI_SECONDS, - - TSDB_SML_TIMESTAMP_MICRO_SECONDS, - - TSDB_SML_TIMESTAMP_NANO_SECONDS - - 需要注意的是,时间戳分辨率参数只在协议类型为 `SML_LINE_PROTOCOL` 的时候生效。 - 对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。 - - **支持版本** - 该功能接口从 2.3.0.0 版本开始支持。 - -### 订阅和消费 API - -订阅 API 目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 - -- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - - 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: - - - taos:已经建立好的数据库连接 - - restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 - - topic:订阅的主题(即名称),此参数是订阅的唯一标识 - - sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 - - fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` - - param:调用回调函数时的附加参数,系统 API 将其原样传递到回调函数,不进行任何处理 - - interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用 `taos_consume()` 的间隔小于此周期,API 将会阻塞,直到时间间隔超过此周期。 - -- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - - 异步模式下,回调函数的原型,其参数为: - - - tsub:订阅对象 - - res:查询结果集,注意结果集中可能没有记录 - - param:调用 `taos_subscribe()` 时客户程序提供的附加参数 - - code:错误码 - - :::note - 在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。 - - ::: - -- `TAOS_RES *taos_consume(TAOS_SUB *tsub)` - - 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用 `taos_consume()` 的间隔小于订阅的轮询周期,API 将会阻塞,直到时间间隔超过此周期。如果数据库有新记录到达,该 API 将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此 API。 - - :::note - 在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。 - - ::: - -- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` - - 取消订阅。 如参数 `keepProgress` 不为 0,API 会保留订阅的进度信息,后续调用 `taos_subscribe()` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 - diff --git a/docs-cn/14-reference/03-connector/csharp.mdx b/docs-cn/14-reference/03-connector/csharp.mdx deleted file mode 100644 index 1e23df9286bf0cb3bf1db95e334301c04d01ad04..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/csharp.mdx +++ /dev/null @@ -1,189 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 7 -sidebar_label: C# -title: C# Connector ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -import Preparition from "./_preparition.mdx" -import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../../07-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" - -`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 - -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。 - -本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 - -`TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet)。 - -## 支持的平台 - -支持的平台和 TDengine 客户端驱动支持的平台一致。 - -## 版本支持 - -请参考[版本支持列表](/reference/connector#版本支持) - -## 支持的功能特性 - -1. 连接管理 -2. 普通查询 -3. 连续查询 -4. 参数绑定 -5. 订阅功能 -6. Schemaless - -## 安装步骤 - -### 安装前准备 - -* 安装 [.NET SDK](https://dotnet.microsoft.com/download) -* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) -* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) - -### 使用 dotnet CLI 安装 - - - - -可以在当前 .NET 项目的路径下,通过 dotnet 命令引用 Nuget 中发布的 `TDengine.Connector` 到当前项目。 - -``` bash -dotnet add package TDengine.Connector -``` - - - - -可以下载 TDengine 的源码,直接引用最新版本的 TDengine.Connector 库 - -```bash -git clone https://github.com/taosdata/TDengine.git -cd TDengine/src/connector/C#/src/ -cp -r TDengineDriver/ myProject - -cd myProject -dotnet add TDengineDriver/TDengineDriver.csproj -``` - - - -## 建立连接 - -``` C# -using TDengineDriver; - -namespace TDengineExample -{ - - internal class EstablishConnection - { - static void Main(String[] args) - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - TDengine.Close(conn); - TDengine.Cleanup(); - } - } -} - -``` - -## 使用示例 - -### 写入数据 - -#### SQL 写入 - - - -#### InfluxDB 行协议写入 - - - -#### OpenTSDB Telnet 行协议写入 - - - -#### OpenTSDB JSON 行协议写入 - - - -### 查询数据 - -#### 同步查询 - - - -#### 异步查询 - - - -### 更多示例程序 - -|示例程序 | 示例程序描述 | -|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [C#checker](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/C%23checker) | 使用 TDengine.Connector 可以通过 help 命令中提供的参数,测试C# Driver的同步写入和查询 | -| [TDengineTest](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/TDengineTest) | 使用 TDengine.Connector 实现的简单写入和查询的示例 | -| [insertCn](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/insertCn) | 使用 TDengine.Connector 实现的写入和查询中文字符的示例 | -| [jsonTag](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/jsonTag) | 使用 TDengine.Connector 实现的写入和查询 json tag 类型数据的示例 | -| [stmt](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/stmt) | 使用 TDengine.Connector 实现的参数绑定的示例 | -| [schemaless](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | -| [benchmark](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/taosdemo) | 使用 TDengine.Connector 实现的简易 Benchmark | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/QueryAsyncSample.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | -| [subscribe](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/SubscribeSample.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | - -## 重要更新记录 - -| TDengine.Connector | 说明 | -|--------------------|--------------------------------| -| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 | -| 1.0.5 | 修复 Windows 同步查询中文报错 bug。 | -| 1.0.4 | 新增异步查询,订阅等功能。修复绑定参数 bug。 | -| 1.0.3 | 新增参数绑定、schemaless、 json tag等功能。 | -| 1.0.2 | 新增连接管理、同步查询、错误信息等功能。 | - -## 其他说明 - -### 第三方驱动 - -`Maikebing.Data.Taos` 是一个 TDengine 的 ADO.NET 连接器,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考: - -* 接口下载: -* 用法说明: - -## 常见问题 - -1. "Unable to establish connection","Unable to resolve FQDN" - - 一般是因为 FQDN 配置不正确。可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html)解决。 - -2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: 找不到指定的模块。 - - 一般是因为程序没有找到依赖的客户端驱动。解决方法为:Windows 下可以将 `C:\TDengine\driver\taos.dll` 拷贝到 `C:\Windows\System32\ ` 目录下,Linux 下建立如下软链接 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 - -## API 参考 - -[API 参考](https://docs.taosdata.com/api/connector-csharp/html/860d2ac1-dd52-39c9-e460-0829c4e5a40b.htm) diff --git a/docs-cn/14-reference/03-connector/go.mdx b/docs-cn/14-reference/03-connector/go.mdx deleted file mode 100644 index 88b09aa5d0b0161973e3e7eabb4cf04357c134f3..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/go.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 4 -sidebar_label: Go -title: TDengine Go Connector ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -import Preparition from "./_preparition.mdx" -import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../07-develop/04-query-data/_go.mdx" - -`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 - -`driver-go` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。另外一种是 **REST 连接**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 运行实例。REST 连接实现的功能特性集合和原生连接有少量不同。 - -本文介绍如何安装 `driver-go`,并通过 `driver-go` 连接 TDengine 集群、进行数据查询、数据写入等基本操作。 - -`driver-go` 的源码托管在 [GitHub](https://github.com/taosdata/driver-go)。 - -## 支持的平台 - -原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 -REST 连接支持所有能运行 Go 的平台。 - -## 版本支持 - -请参考[版本支持列表](/reference/connector#版本支持) - -## 支持的功能特性 - -### 原生连接 - -“原生连接”指连接器通过 TDengine 客户端驱动(taosc)直接与 TDengine 运行实例建立的连接。支持的功能特性有: - -* 普通查询 -* 连续查询 -* 订阅 -* schemaless 接口 -* 参数绑定接口 - -### REST 连接 - -"REST 连接"指连接器通过 taosAdapter 组件提供的 REST API 与 TDengine 运行实例建立的连接。支持的功能特性有: - -* 普通查询 -* 连续查询 - -## 安装步骤 - -### 安装前准备 - -* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) -* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) - -配置好环境变量,检查命令: - -* ```go env``` -* ```gcc -v``` - -### 使用 go get 安装 - -`go get -u github.com/taosdata/driver-go/v2@develop` - -### 使用 go mod 管理 - -1. 使用 `go mod` 命令初始化项目: - - ```text - go mod init taos-demo - ``` - -2. 引入 taosSql : - - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" - ) - ``` - -3. 使用 `go mod tidy` 更新依赖包: - - ```text - go mod tidy - ``` - -4. 使用 `go run taos-demo` 运行程序或使用 `go build` 命令编译出二进制文件。 - - ```text - go run taos-demo - go build - ``` - -## 建立连接 - -### 数据源名称(DSN) - -数据源名称具有通用格式,例如 [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php),但没有类型前缀(方括号表示可选): - -``` text -[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN] -``` - -完整形式的 DSN: - -```text -username:password@protocol(address)/dbname?param=value -``` -### 使用连接器进行连接 - - - - -_taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用 [`database/sql`](https://golang.org/pkg/database/sql/) 的接口。 - -使用 `taosSql` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`,DSN 支持的参数: - -* configPath 指定 taos.cfg 目录 - -示例: - -```go -package main - -import ( - "database/sql" - "fmt" - - _ "github.com/taosdata/driver-go/v2/taosSql" -) - -func main() { - var taosUri = "root:taosdata@tcp(localhost:6030)/" - taos, err := sql.Open("taosSql", taosUri) - if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } -} -``` - - - - -_taosRestful_ 通过 `http client` 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用[`database/sql`](https://golang.org/pkg/database/sql/)的接口。 - -使用 `taosRestful` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`,DSN 支持的参数: - -* `disableCompression` 是否接受压缩数据,默认为 true 不接受压缩数据,如果传输数据使用 gzip 压缩设置为 false。 -* `readBufferSize` 读取数据的缓存区大小默认为 4K(4096),当查询结果数据量多时可以适当调大该值。 - -示例: - -```go -package main - -import ( - "database/sql" - "fmt" - - _ "github.com/taosdata/driver-go/v2/taosRestful" -) - -func main() { - var taosUri = "root:taosdata@http(localhost:6041)/" - taos, err := sql.Open("taosRestful", taosUri) - if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } -} -``` - - - -## 使用示例 - -### 写入数据 - -#### SQL 写入 - - - -#### InfluxDB 行协议写入 - - - -#### OpenTSDB Telnet 行协议写入 - - - -#### OpenTSDB JSON 行协议写入 - - - -### 查询数据 - - - -### 更多示例程序 - -* [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go) -* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 - -## 使用限制 - -由于 REST 接口无状态所以 `use db` 语法不会生效,需要将 db 名称放到 SQL 语句中,如:`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`否则将报错`[0x217] Database not specified or available`。 - -也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`,此方法在 TDengine 2.4.0.5 版本的 taosAdapter 开始支持。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 - -完整示例如下: - -```go -package main - -import ( - "database/sql" - "fmt" - "time" - - _ "github.com/taosdata/driver-go/v2/taosRestful" -) - -func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) - if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return - } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) - } -} -``` - -## 常见问题 - -1. 无法找到包 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 中 require 块对`github.com/taosdata/driver-go/v2`的引用改为`github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -2. database/sql 中 stmt(参数绑定)相关接口崩溃 - - REST 不支持参数绑定相关接口,建议使用`db.Exec`和`db.Query`。 - -3. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` - - 在 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 - -4. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` - - 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 - -5. 升级 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -6. `readBufferSize` 参数调大后无明显效果 - - `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 - -7. `disableCompression` 参数设置为 `false` 时查询效率降低 - - 当 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 - -8. `go get` 命令无法获取包,或者获取包超时 - - 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`。 - -## 常用 API - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - 该 API 用来打开 DB,返回一个类型为 \*DB 的对象。 - -:::info -该 API 成功创建的时候,并没有做权限等检查,只有在真正执行 Query 或者 Exec 的时候才能真正的去创建连接,并同时检查 user/password/host/port 是不是合法。 -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` 内置的方法,用来执行非查询相关 SQL。 - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` 内置的方法,用来执行查询语句。 - -### 高级功能(af)API - -`af` 包封装了连接管理、订阅、schemaless、参数绑定等 TDengine 高级功能。 - -#### 连接管理 - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - 该 API 通过 cgo 创建与 taosd 的连接。 - -* `func (conn *Connector) Close() error` - - 关闭与 taosd 的连接。 - -#### 订阅 - -* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` - - 订阅数据。 - -* `func (s *taosSubscriber) Consume() (driver.Rows, error)` - - 消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。 - -* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` - - 取消订阅数据。 - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - 写入 influxDB 行协议。 - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - 写入 OpenTDSB telnet 协议数据。 - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - 写入 OpenTSDB JSON 协议数据。 - -#### 参数绑定 - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - 参数绑定单行插入。 - -* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - - 参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。 - -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - - 初始化参数。 - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - 参数绑定预处理 SQL 语句。 - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - 参数绑定设置表名。 - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - 参数绑定设置子表名。 - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - 参数绑定多行数据。 - -* `func (stmt *InsertStmt) AddBatch() error` - - 添加到参数绑定批处理。 - -* `func (stmt *InsertStmt) Execute() error` - - 执行参数绑定。 - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - 获取参数绑定插入受影响行数。 - -* `func (stmt *InsertStmt) Close() error` - - 结束参数绑定。 - -## API 参考 - -全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v2) diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx deleted file mode 100644 index ddab9e5f24c64e51e82cad6e299f3ea0d741b349..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/java.mdx +++ /dev/null @@ -1,840 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 2 -sidebar_label: Java -title: TDengine Java Connector -description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。 ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 - -![TDengine Database Connector Java](tdengine-jdbc-connector.webp) - -上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式: - -- JDBC 原生连接:Java 应用在物理节点 1(pnode1)上使用 TSDBDriver 直接调用客户端驱动(libtaos.so 或 taos.dll)的 API 将写入和查询请求发送到位于物理节点 2(pnode2)上的 taosd 实例。 -- JDBC REST 连接:Java 应用通过 RestfulDriver 将 SQL 封装成一个 REST 请求,发送给物理节点 2 的 REST 服务器(taosAdapter),通过 REST 服务器请求 taosd 并返回结果。 - -使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活,但性能比原生连接器低约 30%。 - -:::info -TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但 TDengine 与关系对象型数据库的使用场景和技术特征存在差异,所以`taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: - -- TDengine 目前不支持针对单条数据记录的删除操作。 -- 目前不支持事务操作。 - -::: - -## 支持的平台 - -原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 -REST 连接支持所有能运行 Java 的平台。 - -## 版本支持 - -请参考[版本支持列表](/reference/connector#版本支持) - -## TDengine DataType 和 Java DataType - -TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: - -| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) | -| ----------------- | --------------------------------- | ---------------------------------- | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | - -**注意**:JSON 类型仅在 tag 中支持。 - -## 安装步骤 - -### 安装前准备 - -使用 Java Connector 连接数据库前,需要具备以下条件: - -- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本 -- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) - -### 安装连接器 - - - - -目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。 - -- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) -- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) -- [maven.aliyun](https://maven.aliyun.com/mvn/search) - -Maven 项目中,在 pom.xml 中添加以下依赖: - -```xml-dtd - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.** - -``` - - - - -可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector - -```shell -git clone https://github.com/taosdata/taos-connector-jdbc.git -cd taos-connector-jdbc -mvn clean install -Dmaven.test.skip=true -``` - -编译后,在 target 目录下会产生 taos-jdbcdriver-2.0.XX-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 - - - - -## 建立连接 - -TDengine 的 JDBC URL 规范格式为: -`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` - -对于建立连接,原生连接与 REST 连接有细微不同。 - - - - -```java -Class.forName("com.taosdata.jdbc.TSDBDriver"); -String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; -Connection conn = DriverManager.getConnection(jdbcUrl); -``` - -以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 - -**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll)。 - -url 中的配置参数如下: - -- user:登录 TDengine 用户名,默认值 'root'。 -- password:用户登录密码,默认值 'taosdata'。 -- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 -- charset:客户端使用的字符集,默认值为系统字符集。 -- locale:客户端语言环境,默认值系统当前 locale。 -- timezone:客户端使用的时区,默认值为系统当前时区。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。 -- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - -JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 - -**使用 TDengine 客户端驱动配置文件建立连接 ** - -当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示: - -1. 在 Java 应用中不指定 hostname 和 port - -```java -public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; -} -``` - -2. 在配置文件中指定 firstEp 和 secondEp - -```shell -# first fully qualified domain name (FQDN) for TDengine system -firstEp cluster_node1:6030 - -# second fully qualified domain name (FQDN) for TDengine system, for cluster only -secondEp cluster_node2:6030 - -# default system charset -# charset UTF-8 - -# system locale -# locale en_US.UTF-8 -``` - -以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。 - -TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。 - -> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。 - - - - -```java -Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); -String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; -Connection conn = DriverManager.getConnection(jdbcUrl); -``` - -以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 - -使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: - -1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; -2. jdbcUrl 以“jdbc:TAOS-RS://”开头; -3. 使用 6041 作为连接端口。 - -url 中的配置参数如下: - -- user:登录 TDengine 用户名,默认值 'root'。 -- password:用户登录密码,默认值 'taosdata'。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 -- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 -- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - -**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 - -:::note - -- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: - -```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); -``` - -- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); - -::: - - - - -### 指定 URL 和 Properties 获取连接 - -除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数。 - -**注意**: - -- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。 -- 以下示例代码基于 taos-jdbcdriver-2.0.36。 - -```java -public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connProps.setProperty("debugFlag", "135"); - connProps.setProperty("maxSQLLength", "1048576"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; -} - -public Connection getRestConn() throws Exception{ - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; -} -``` - -以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030/6041,数据库名为 test 的连接。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区、是否开启批量拉取等信息。 - -properties 中的配置参数如下: - -- TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。 -- TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。 -- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 -- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 sq 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 -- TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 -- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 -- TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。 -- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。 -- 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。 - -### 配置参数的优先级 - -通过前面三种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下: - -1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。 -2. Properties connProps -3. 使用原生连接时,TDengine 客户端驱动的配置文件 taos.cfg - -例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。 - -## 使用示例 - -### 创建数据库和表 - -```java -Statement stmt = conn.createStatement(); - -// create database -stmt.executeUpdate("create database if not exists db"); - -// use database -stmt.executeUpdate("use db"); - -// create table -stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); -``` - -> **注意**:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 - -### 插入数据 - -```java -// insert data -int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); - -System.out.println("insert " + affectedRows + " rows."); -``` - -> now 为系统内部函数,默认为客户端所在计算机当前时间。 -> `now + 1s` 代表客户端当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒),s(秒),m(分),h(小时),d(天),w(周),n(月),y(年)。 - -### 查询数据 - -```java -// query data -ResultSet resultSet = stmt.executeQuery("select * from tb"); - -Timestamp ts = null; -int temperature = 0; -float humidity = 0; -while(resultSet.next()){ - - ts = resultSet.getTimestamp(1); - temperature = resultSet.getInt(2); - humidity = resultSet.getFloat("humidity"); - - System.out.printf("%s, %d, %s\n", ts, temperature, humidity); -} -``` - -> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 - -### 处理异常 - -在报错后,通过 SQLException 可以获取到错误的信息和错误码: - -```java -try (Statement statement = connection.createStatement()) { - // executeQuery - ResultSet resultSet = statement.executeQuery(sql); - // print result - printResult(resultSet); -} catch (SQLException e) { - System.out.println("ERROR Message: " + e.getMessage()); - System.out.println("ERROR Code: " + e.getErrorCode()); - e.printStackTrace(); -} -``` - -JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间),原生连接方法的报错(错误码在 0x2351 到 0x2400 之间),TDengine 其他功能模块的报错。 - -具体的错误码请参考: - -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) -- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) - -### 通过参数绑定写入数据 - -从 2.1.2.0 版本开始,TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 - -**注意**: - -- JDBC REST 连接目前不支持参数绑定 -- 以下示例代码基于 taos-jdbcdriver-2.0.36 -- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法 -- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽 - -```java -public class ParameterBindingDemo { - - private static final String host = "127.0.0.1"; - private static final Random random = new Random(System.currentTimeMillis()); - private static final int BINARY_COLUMN_SIZE = 20; - private static final String[] schemaList = { - "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", - "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", - "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", - "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", - "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" - }; - private static final int numOfSubTable = 10, numOfRow = 10; - - public static void main(String[] args) throws SQLException { - - String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; - Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); - - init(conn); - - bindInteger(conn); - - bindFloat(conn); - - bindBoolean(conn); - - bindBytes(conn); - - bindString(conn); - - conn.close(); - } - - private static void init(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_parabind"); - stmt.execute("create database if not exists test_parabind"); - stmt.execute("use test_parabind"); - for (int i = 0; i < schemaList.length; i++) { - stmt.execute(schemaList[i]); - } - } - } - - private static void bindInteger(Connection conn) throws SQLException { - String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t1_" + i); - // set tags - pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); - pstmt.setTagLong(3, random.nextLong()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setByte(1, f1List); - - ArrayList f2List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setShort(2, f2List); - - ArrayList f3List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f3List.add(random.nextInt(Integer.MAX_VALUE)); - pstmt.setInt(3, f3List); - - ArrayList f4List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f4List.add(random.nextLong()); - pstmt.setLong(4, f4List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute column - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindFloat(Connection conn) throws SQLException { - String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; - - TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t2_" + i); - // set tags - pstmt.setTagFloat(0, random.nextFloat()); - pstmt.setTagDouble(1, random.nextDouble()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextFloat()); - pstmt.setFloat(1, f1List); - - ArrayList f2List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f2List.add(random.nextDouble()); - pstmt.setDouble(2, f2List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - // close if no try-with-catch statement is used - pstmt.close(); - } - - private static void bindBoolean(Connection conn) throws SQLException { - String sql = "insert into ? using stable3 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t3_" + i); - // set tags - pstmt.setTagBoolean(0, random.nextBoolean()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextBoolean()); - pstmt.setBoolean(1, f1List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindBytes(Connection conn) throws SQLException { - String sql = "insert into ? using stable4 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t4_" + i); - // set tags - pstmt.setTagString(0, new String("abc")); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add(new String("abc")); - } - pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindString(Connection conn) throws SQLException { - String sql = "insert into ? using stable5 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t5_" + i); - // set tags - pstmt.setTagNString(0, "California.SanFrancisco"); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add("California.LosAngeles"); - } - pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } -} -``` - -用于设定 TAGS 取值的方法总共有: - -```java -public void setTagNull(int index, int type) -public void setTagBoolean(int index, boolean value) -public void setTagInt(int index, int value) -public void setTagByte(int index, byte value) -public void setTagShort(int index, short value) -public void setTagLong(int index, long value) -public void setTagTimestamp(int index, long value) -public void setTagFloat(int index, float value) -public void setTagDouble(int index, double value) -public void setTagString(int index, String value) -public void setTagNString(int index, String value) -``` - -用于设定 VALUES 数据列的取值的方法总共有: - -```java -public void setInt(int columnIndex, ArrayList list) throws SQLException -public void setFloat(int columnIndex, ArrayList list) throws SQLException -public void setTimestamp(int columnIndex, ArrayList list) throws SQLException -public void setLong(int columnIndex, ArrayList list) throws SQLException -public void setDouble(int columnIndex, ArrayList list) throws SQLException -public void setBoolean(int columnIndex, ArrayList list) throws SQLException -public void setByte(int columnIndex, ArrayList list) throws SQLException -public void setShort(int columnIndex, ArrayList list) throws SQLException -public void setString(int columnIndex, ArrayList list, int size) throws SQLException -public void setNString(int columnIndex, ArrayList list, int size) throws SQLException -``` - -### 无模式写入 - -从 2.2.0.0 版本开始,TDengine 增加了对无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](/reference/schemaless/)。 - -**注意**: - -- JDBC REST 连接目前不支持无模式写入 -- 以下示例代码基于 taos-jdbcdriver-2.0.36 - -```java -public class SchemalessInsertTest { - private static final String host = "127.0.0.1"; - private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; - private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; - - public static void main(String[] args) throws SQLException { - final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; - try (Connection connection = DriverManager.getConnection(url)) { - init(connection); - - SchemalessWriter writer = new SchemalessWriter(connection); - writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); - writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); - writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); - } - } - - private static void init(Connection connection) throws SQLException { - try (Statement stmt = connection.createStatement()) { - stmt.executeUpdate("drop database if exists test_schemaless"); - stmt.executeUpdate("create database if not exists test_schemaless"); - stmt.executeUpdate("use test_schemaless"); - } - } -} -``` - -### 订阅 - -TDengine Java 连接器支持订阅功能,应用 API 如下: - -#### 创建订阅 - -```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); -``` - -`subscribe` 方法的三个参数含义如下: - -- topic:订阅的主题(即名称),此参数是订阅的唯一标识 -- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 -- restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 - -如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 - -#### 订阅消费数据 - -```java -int total = 0; -while(true) { - TSDBResultSet rs = sub.consume(); - int count = 0; - while(rs.next()) { - count++; - } - total += count; - System.out.printf("%d rows consumed, total %d\n", count, total); - Thread.sleep(1000); -} -``` - -`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 - -#### 关闭订阅 - -```java -sub.close(true); -``` - -`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 - -### 关闭资源 - -```java -resultSet.close(); -stmt.close(); -conn.close(); -``` - -> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 - -### 与连接池使用 - -#### HikariCP - -使用示例如下: - -```java - public static void main(String[] args) throws SQLException { - HikariConfig config = new HikariConfig(); - // jdbc properties - config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); - config.setUsername("root"); - config.setPassword("taosdata"); - // connection pool configurations - config.setMinimumIdle(10); //minimum number of idle connection - config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool - config.setMaxLifetime(0); // maximum life time for each connection - config.setIdleTimeout(0); // max idle time for recycle idle connection - config.setConnectionTestQuery("select server_status()"); //validation query - - HikariDataSource ds = new HikariDataSource(config); //create datasource - - Connection connection = ds.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - - //query or insert - // ... - - connection.close(); // put back to conneciton pool -} -``` - -> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 -> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 - -#### Druid - -使用示例如下: - -```java -public static void main(String[] args) throws Exception { - - DruidDataSource dataSource = new DruidDataSource(); - // jdbc properties - dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); - dataSource.setUrl(url); - dataSource.setUsername("root"); - dataSource.setPassword("taosdata"); - // pool configurations - dataSource.setInitialSize(10); - dataSource.setMinIdle(10); - dataSource.setMaxActive(10); - dataSource.setMaxWait(30000); - dataSource.setValidationQuery("select server_status()"); - - Connection connection = dataSource.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - //query or insert - // ... - - connection.close(); // put back to conneciton pool -} -``` - -> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 - -**注意事项:** - -- TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 - -如下所示,`select server_status()` 执行成功会返回 `1`。 - -```sql -taos> select server_status(); -server_status()| -================ -1 | -Query OK, 1 row(s) in set (0.000141s) -``` - -### 更多示例程序 - -示例程序源码位于 `TDengine/examples/JDBC` 下: - -- JDBCDemo:JDBC 示例源程序。 -- JDBCConnectorChecker:JDBC 安装校验源程序及 jar 包。 -- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。 -- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。 -- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。 - -请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) - -## 最近更新记录 - -| taos-jdbcdriver 版本 | 主要变化 | -| :------------------: | :----------------------------: | -| 2.0.38 | JDBC REST 连接增加批量拉取功能 | -| 2.0.37 | 增加对 json tag 支持 | -| 2.0.36 | 增加对 schemaless 写入支持 | - -## 常见问题 - -1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升? - - **原因**:TDengine 的 JDBC 实现中,通过 `addBatch` 方法提交的 SQL 语句,会按照添加的顺序,依次执行,这种方式没有减少与服务端的交互次数,不会带来性能上的提升。 - - **解决方法**:1. 在一条 insert 语句中拼接多个 values 值;2. 使用多线程的方式并发插入;3. 使用参数绑定的写入方式 - -2. java.lang.UnsatisfiedLinkError: no taos in java.library.path - - **原因**:程序没有找到依赖的本地函数库 taos。 - - **解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 - -3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform - - **原因**:目前 TDengine 只支持 64 位 JDK。 - - **解决方法**:重新安装 64 位 JDK。 - -4. 其它问题请参考 [FAQ](/train-faq/faq) - -## API 参考 - -[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx deleted file mode 100644 index 9f2bed9e97cb33aeabfce3d69dc3774931b426c0..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/node.mdx +++ /dev/null @@ -1,252 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 6 -sidebar_label: Node.js -title: TDengine Node.js Connector ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -import Preparition from "./_preparition.mdx"; -import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; - -`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 - -`td2.0-connector` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`td2.0-rest-connector` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。 - -Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node)。 - -## 支持的平台 - -原生连接器支持的平台和 TDengine 客户端驱动支持的平台一致。 -REST 连接器支持所有能运行 Node.js 的平台。 - -## 版本支持 - -请参考[版本支持列表](/reference/connector#版本支持) - -## 支持的功能特性 - -### 原生连接器 - -1. 连接管理 -2. 普通查询 -3. 连续查询 -4. 参数绑定 -5. 订阅功能 -6. Schemaless - -### REST 连接器 - -1. 连接管理 -2. 普通查询 -3. 连续查询 - -## 安装步骤 - -### 安装前准备 - -- 安装 Node.js 开发环境 -- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 - - - - -- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) -- `td2.0-connector` 2.0.6 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;2.0.5 及更早版本支持 Node.js LTS v10.x 版本。其他版本可能存在包兼容性的问题 -- `make` -- C 语言编译器,[GCC](https://gcc.gnu.org) v4.8.5 或更高版本 - - - - -- 安装方法 1 - -使用微软的[ windows-build-tools ](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具。 - -- 安装方法 2 - -手动安装以下工具: - -- 安装 Visual Studio 相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) -- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` -- 进入`cmd`命令行界面,`npm config set msvs_version 2017` - -参考微软的 Node.js 用户手册[ Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)。 - -如果在 Windows 10 ARM 上使用 ARM64 Node.js,还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。 - - - - -### 使用 npm 安装 - - - - -```bash -npm install td2.0-connector -``` - - - - -```bash -npm i td2.0-rest-connector -``` - - - - -### 安装验证 - -在安装好 TDengine 客户端后,使用 nodejsChecker.js 程序能够验证当前环境是否支持 Node.js 方式访问 TDengine。 - -验证方法: - -- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/develop/examples/nodejs/nodejsChecker.js)到本地。 - -- 在命令行中执行以下命令。 - -```bash -npm init -y -npm install td2.0-connector -node nodejsChecker.js host=localhost -``` - -- 执行以上步骤后,在命令行会输出 nodejsChecker.js 连接 TDengine 实例,并执行简单插入和查询的结果。 - -## 建立连接 - -请选择使用一种连接器。 - - - - -安装并引用 `td2.0-connector` 包。 - -```javascript -//A cursor also needs to be initialized in order to interact with TDengine from Node.js. -const taos = require("td2.0-connector"); -var conn = taos.connect({ - host: "127.0.0.1", - user: "root", - password: "taosdata", - config: "/etc/taos", - port: 0, -}); -var cursor = conn.cursor(); // Initializing a new cursor - -//Close a connection -conn.close(); -``` - - - - -安装并引用 `td2.0-rest-connector` 包。 - -```javascript -//A cursor also needs to be initialized in order to interact with TDengine from Node.js. -import { options, connect } from "td2.0-rest-connector"; -options.path = "/rest/sqlt"; -// set host -options.host = "localhost"; -// set other options like user/passwd - -let conn = connect(options); -let cursor = conn.cursor(); -``` - - - - -## 使用示例 - -### 写入数据 - -#### SQL 写入 - - - -#### InfluxDB 行协议写入 - - - -#### OpenTSDB Telnet 行协议写入 - - - -#### OpenTSDB JSON 行协议写入 - - - -### 查询数据 - - - -## 更多示例程序 - -| 示例程序 | 示例程序描述 | -| ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- | -| [connection](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/cursorClose.js) | 建立连接的示例。 | -| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamBatchSample.js) | 绑定多行参数插入的示例。 | -| [stmtBind](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamSample.js) | 一行一行绑定参数插入的示例。 | -| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindSingleParamBatchSample.js) | 按列绑定参数插入的示例。 | -| [stmtUseResult](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtUseResultSample.js) | 绑定参数查询的示例。 | -| [json tag](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testJsonTag.js) | Json tag 的使用示例。 | -| [Nanosecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testNanoseconds.js) | 时间戳为纳秒精度的使用的示例。 | -| [Microsecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testMicroseconds.js) | 时间戳为微秒精度的使用的示例。 | -| [schemless insert](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSchemalessInsert.js) | schemless 插入的示例。 | -| [subscribe](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSubscribe.js) | 订阅的使用示例。 | -| [asyncQuery](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/tset.js) | 异步查询的使用示例。 | -| [REST](https://github.com/taosdata/taos-connector-node/blob/develop/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 | - -## 使用限制 - -Node.js 连接器 >= v2.0.6 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。 - -## 其他说明 - -Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)。 - -## 常见问题 - -1. 使用 REST 连接需要启动 taosadapter。 - - ```bash - sudo systemctl start taosadapter - ``` - -2. Node.js 版本 - - 连接器 >v2.0.6 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 - -3. "Unable to establish connection","Unable to resolve FQDN" - - 一般都是因为配置 FQDN 不正确。 可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html) 。 - -## 重要更新记录 - -### 原生连接器 - -| td2.0-connector 版本 | 说明 | -| -------------------- | ---------------------------------------------------------------- | -| 2.0.12 | 修复 cursor.close() 报错的 bug。 | -| 2.0.11 | 支持绑定参数、json tag、schemaless 接口等功能。 | -| 2.0.10 | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | - -### REST 连接器 - -| td2.0-rest-connector 版本 | 说明 | -| ------------------------- | ---------------------------------------------------------------- | -| 1.0.3 | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | - -## API 参考 - -[API 参考](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs-cn/14-reference/03-connector/php.mdx b/docs-cn/14-reference/03-connector/php.mdx deleted file mode 100644 index f150aed4c8a6ba855d5e830a2944a6d6f88ab0f5..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/php.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: PHP -title: PHP Connector ---- - -`php-tdengine` 是由社区贡献的 PHP 连接器扩展,还特别支持了 Swoole 协程化。 - -PHP 连接器依赖 TDengine 客户端驱动。 - -项目地址: - -TDengine 服务端或客户端安装后,`taos.h` 位于: - -- Linux:`/usr/local/taos/include` -- Windows:`C:\TDengine\include` - -TDengine 客户端驱动的动态库位于: - -- Linux: `/usr/local/taos/driver/libtaos.so` -- Windows: `C:\TDengine\taos.dll` - -## 支持的平台 - -* Windows、Linux、MacOS - -* PHP >= 7.4 - -* TDengine >= 2.0 - -* Swoole >= 4.8 (可选) - -## 支持的版本 - -TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一对应的强对应关系,建议使用与 TDengine 服务端完全相同的客户端驱动。虽然低版本的客户端驱动在前三段版本号一致(即仅第四段版本号不同)的情况下也能够与高版本的服务端相兼容,但这并非推荐用法。强烈不建议使用高版本的客户端驱动访问低版本的服务端。 - -## 安装步骤 - -### 安装 TDengine 客户端驱动 - -TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤) - -### 编译安装 php-tdengine - -**下载代码并解压:** - -```shell -curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ -&& mkdir php-tdengine \ -&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 -``` - -> 版本 `v1.0.2` 可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases)。 - -**非 Swoole 环境:** - -```shell -phpize && ./configure && make -j && make install -``` - -**手动指定 tdengine 目录:** - -```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install -``` - -> `--with-tdengine-dir=` 后跟上 tdengine 目录。 -> 适用于默认找不到的情况,或者 MacOS 系统用户。 - -**Swoole 环境:** - -```shell -phpize && ./configure --enable-swoole && make -j && make install -``` - -**启用扩展:** - -方法一:在 `php.ini` 中加入 `extension=tdengine` - -方法二:运行带参数 `php -dextension=tdengine test.php` - -## 示例程序 - -本节展示了使用客户端驱动访问 TDengine 集群的常见访问方式的示例代码。 - -> 所有错误都会抛出异常: `TDengine\Exception\TDengineException` - -### 建立连接 - -
-建立连接 - -```c -{{#include docs-examples/php/connect.php}} -``` - -
- -### 插入数据 - -
-插入数据 - -```c -{{#include docs-examples/php/insert.php}} -``` - -
- -### 同步查询 - -
-同步查询 - -```c -{{#include docs-examples/php/query.php}} -``` - -
- -### 参数绑定 - -
-参数绑定 - -```c -{{#include docs-examples/php/insert_stmt.php}} -``` - -
- -## 常量 - -| 常量 | 说明 | -| ------------ | ------------ -| `TDengine\TSDB_DATA_TYPE_NULL` | null | -| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | -| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | -| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | -| `TDengine\TSDB_DATA_TYPE_INT` | int | -| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | -| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | -| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | -| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | -| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | -| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | -| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | -| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | -| `TDengine\TSDB_DATA_TYPE_UINT` | uint | -| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | diff --git a/docs-cn/14-reference/03-connector/python.mdx b/docs-cn/14-reference/03-connector/python.mdx deleted file mode 100644 index 828e0a4abb758a72c3a127be13dd89c4d86186f4..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/python.mdx +++ /dev/null @@ -1,348 +0,0 @@ ---- -sidebar_position: 3 -sidebar_label: Python -title: TDengine Python Connector -description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](/reference/connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 -除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。 - -使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。 - -Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。 - -## 支持的平台 - -- 原生连接[支持的平台](/reference/connector/#支持的平台)和 TDengine 客户端支持的平台一致。 -- REST 连接支持所有能运行 Python 的平台。 - -## 版本选择 - -无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。 - -## 支持的功能 - -- 原生连接支持 TDeingine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入(schemaless)。 -- REST 连接支持的功能包括:连接管理、执行 SQL。 (通过执行 SQL 可以: 管理数据库、管理表和超级表、写入数据、查询数据、创建连续查询等)。 - -## 安装 - -### 准备 - -1. 安装 Python。建议使用 Python >= 3.6。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 -2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip docuemntation](https://pip.pypa.io/en/stable/installation/) 安装。 -3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 - -### 使用 pip 安装 - -#### 卸载旧版本 - -如果以前安装过旧版本的 Python 连接器, 请提前卸载。 - -``` -pip3 uninstall taos taospy -``` - -:::note -较早的 TDengine 客户端软件包含了 Python 连接器。如果从客户端软件的安装目录安装了 Python 连接器,那么对应的 Python 包名是 `taos`。 所以上述卸载命令包含了 `taos`, 不存在也没关系。 - -::: - -#### 安装 `taospy` - - - - -安装最新版本 - -``` -pip3 install taospy -``` - -也可以指定某个特定版本安装。 - -``` -pip3 install taospy==2.3.0 -``` - - - - -``` -pip3 install git+https://github.com/taosdata/taos-connector-python.git -``` - - - - -### 安装验证 - - - - -对于原生连接,需要验证客户端驱动和 Python 连接器本身是否都正确安装。如果能成功导入 `taos` 模块,则说明已经正确安装了客户端驱动和 Python 连接器。可在 Python 交互式 Shell 中输入: - -```python -import taos -``` - - - - -对于 REST 连接,只需验证是否能成功导入 `taosrest` 模块。可在 Python 交互式 Shell 中输入: - -```python -import taosrest -``` - - - - -:::tip -如果系统上有多个版本的 Python,则可能有多个 `pip` 命令。要确保使用的 `pip` 命令路径是正确的。上面我们用 `pip3` 命令安装,排除了使用 Python 2.x 版本对应的 `pip` 的可能性。但是如果系统上有多个 Python 3.x 版本,仍需检查安装路径是否正确。最简单的验证方式是,在命令再次输入 `pip3 install taospy`, 就会打印出 `taospy` 的具体安装位置,比如在 Windows 上: - -``` -C:\> pip3 install taospy -Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple -Requirement already satisfied: taospy in c:\users\username\appdata\local\programs\python\python310\lib\site-packages (2.3.0) -``` - -::: - -## 建立连接 - -### 连通性测试 - -在用连接器建立连接之前,建议先测试本地 TDengine CLI 到 TDengine 集群的连通性。 - - - - -请确保 TDengine 集群已经启动, 且集群中机器的 FQDN (如果启动的是单机版,FQDN 默认为 hostname)在本机能够解析, 可用 `ping` 命令进行测试: - -``` -ping -``` - -然后测试用 TDengine CLI 能否正常连接集群: - -``` -taos -h -p -``` - -上面的 FQDN 可以为集群中任意一个 dnode 的 FQDN, PORT 为这个 dnode 对应的 serverPort。 - - - - -对于 REST 连接, 除了确保集群已经启动,还要确保 taosAdapter 组件已经启动。可以使用如下 curl 命令测试: - -``` -curl -u root:taosdata http://:/rest/sql -d "select server_version()" -``` - -上面的 FQDN 为运行 taosAdapter 的机器的 FQDN, PORT 为 taosAdapter 配置的监听端口, 默认为 6041。 -如果测试成功,会输出服务器版本信息,比如: - -```json -{ - "status": "succ", - "head": ["server_version()"], - "column_meta": [["server_version()", 8, 8]], - "data": [["2.4.0.16"]], - "rows": 1 -} -``` - - - - -### 使用连接器建立连接 - -以下示例代码假设 TDengine 安装在本机, 且 FQDN 和 serverPort 都使用了默认配置。 - - - - -```python -{{#include docs-examples/python/connect_native_reference.py}} -``` - -`connect` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明: - -- `host` : 要连接的节点的 FQDN。 没有默认值。如果不同提供此参数,则会连接客户端配置文件中的 firstEP。 -- `user` :TDengine 用户名。 默认值是 root。 -- `password` : TDengine 用户密码。 默认值是 taosdata。 -- `port` : 要连接的数据节点的起始端口,即 serverPort 配置。默认值是 6030。只有在提供了 host 参数的时候,这个参数才生效。 -- `config` : 客户端配置文件路径。 在 Windows 系统上默认是 `C:\TDengine\cfg`。 在 Linux 系统上默认是 `/etc/taos/`。 -- `timezone` : 查询结果中 TIMESTAMP 类型的数据,转换为 python 的 datetime 对象时使用的时区。默认为本地时区。 - -:::warning -`config` 和 `timezone` 都是进程级别的配置。建议一个进程建立的所有连接都使用相同的参数值。否则可能产生无法预知的错误。 -::: - -:::tip -`connect` 函数返回 `taos.TaosConnection` 实例。 在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。 - -::: - - - - -```python -{{#include docs-examples/python/connect_rest_examples.py:connect}} -``` - -`connect()` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明: - -- `url`: taosAdapter REST 服务的 URL。默认是 。 -- `user`: TDenigne 用户名。默认是 root。 -- `password`: TDeingine 用户密码。默认是 taosdata。 -- `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。 - - - - -## 示例程序 - -### 基本使用 - - - - -##### TaosConnection 类的使用 - -`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。 - -```python title="execute 方法" -{{#include docs-examples/python/connection_usage_native_reference.py:insert}} -``` - -```python title="query 方法" -{{#include docs-examples/python/connection_usage_native_reference.py:query}} -``` - -:::tip -查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。 -::: - -##### TaosResult 类的使用 - -上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。 - -```python title="blocks_iter 方法" -{{#include docs-examples/python/result_set_examples.py}} -``` -##### TaosCursor 类的使用 - -`TaosConnection` 类和 `TaosResult` 类已经实现了原生接口的所有功能。如果你对 PEP249 规范中的接口比较熟悉也可以使用 `TaosCursor` 类提供的方法。 - -```python title="TaosCursor 的使用" -{{#include docs-examples/python/cursor_usage_native_reference.py}} -``` - -:::note -TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。 - -::: - - - - -##### TaosRestCursor 类的使用 - -`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 - -```python title="TaosRestCursor 的使用" -{{#include docs-examples/python/connect_rest_examples.py:basic}} -``` -- `cursor.execute` : 用来执行任意 SQL 语句。 -- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 -- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 - -##### RestClient 类的使用 - -`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 - -```python title="RestClient 的使用" -{{#include docs-examples/python/rest_client_example.py}} -``` - -对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 - - - - - - -### 与 pandas 一起使用 - - - - -```python -{{#include docs-examples/python/conn_native_pandas.py}} -``` - - - - -```python -{{#include docs-examples/python/conn_rest_pandas.py}} -``` - - - - -### 其它示例程序 - -| 示例程序链接 | 示例程序内容 | -| ------------------------------------------------------------------------------------------------------------- | ----------------------- | -| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | 参数绑定, 一次绑定多行 | -| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 | -| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 | -| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 | -| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 | -| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 | - -## 其它说明 - -### 异常处理 - -所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如: - -```python -{{#include docs-examples/python/handle_exception.py}} -``` - -### 关于纳秒 (nanosecond) - -由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。 - -1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds -2. https://www.python.org/dev/peps/pep-0564/ - - -## 常见问题 - -欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。 - -## 重要更新 - -| 连接器版本 | 重要更新 | 发布日期 | -| ---------- | --------------------------------------------------------------------------------- | ---------- | -| 2.3.1 | 1. support TDengine REST API
2. remove support for Python version below 3.6 | 2022-04-28 | -| 2.2.5 | support timezone option when connect | 2022-04-13 | -| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | - - -[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases) - -## API 参考 - -- [taos](https://docs.taosdata.com/api/taospy/taos/) -- [taosrest](https://docs.taosdata.com/api/taospy/taosrest) diff --git a/docs-cn/14-reference/03-connector/rust.mdx b/docs-cn/14-reference/03-connector/rust.mdx deleted file mode 100644 index 25a8409b6e6faca651d1eaf3e02fbd4a0199c557..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/03-connector/rust.mdx +++ /dev/null @@ -1,388 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 5 -sidebar_label: Rust -title: TDengine Rust Connector ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -import Preparition from "./_preparition.mdx" -import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" -import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" -import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" -import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" -import RustQuery from "../../07-develop/04-query-data/_rust.mdx" - -[![Crates.io](https://img.shields.io/crates/v/libtaos)](https://crates.io/crates/libtaos) ![Crates.io](https://img.shields.io/crates/d/libtaos) [![docs.rs](https://img.shields.io/docsrs/libtaos)](https://docs.rs/libtaos) - -`libtaos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。 - -`libtaos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。另外一种是 **REST 连接**,它通过 taosAdapter 的 REST 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 features)” 来指定使用哪种连接器。REST 连接支持任何平台,但原生连接支持所有 TDengine 客户端能运行的平台。 - -`libtaos` 的源码托管在 [GitHub](https://github.com/taosdata/libtaos-rs)。 - -## 支持的平台 - -原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 -REST 连接支持所有能运行 Rust 的平台。 - -## 版本支持 - -请参考[版本支持列表](/reference/connector#版本支持) - -Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 2.4 版本以上的 TDengine,以避免已知问题。 - -## 安装 - -### 安装前准备 -* 安装 Rust 开发工具链 -* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) - -### 添加 libtaos 依赖 - -根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [libtaos][libtaos] 依赖: - - - - -在 `Cargo.toml` 文件中添加 [libtaos][libtaos]: - -```toml -[dependencies] -# use default feature -libtaos = "*" -``` - - - - -在 `Cargo.toml` 文件中添加 [libtaos][libtaos],并启用 `rest` 特性。 - -```toml -[dependencies] -# use rest feature -libtaos = { version = "*", features = ["rest"]} -``` - - - - - -### 使用连接池 - -请在 `Cargo.toml` 中启用 `r2d2` 特性。 - -```toml -[dependencies] -# with taosc -libtaos = { version = "*", features = ["r2d2"] } -# or rest -libtaos = { version = "*", features = ["rest", "r2d2"] } -``` - -## 建立连接 - -[TaosCfgBuilder] 为使用者提供构造器形式的 API,以便于后续创建连接或使用连接池。 - -```rust -let cfg: TaosCfg = TaosCfgBuilder::default() - .ip("127.0.0.1") - .user("root") - .pass("taosdata") - .db("log") // do not set if not require a default database. - .port(6030u16) - .build() - .expect("TaosCfg builder error"); -} -``` - -现在您可以使用该对象创建连接: - -```rust -let conn = cfg.connect()?; -``` - -连接对象可以创建多个: - -```rust -let conn = cfg.connect()?; -let conn2 = cfg.connect()?; -``` - -可以在应用中使用连接池: - -```rust -let pool = r2d2::Pool::builder() - .max_size(10000) // max connections - .build(cfg)?; - -// ... -// Use pool to get connection -let conn = pool.get()?; -``` - -之后您可以对数据库进行相关操作: - -```rust -async fn demo() -> Result<(), Error> { - // get connection ... - - // create database - conn.exec("create database if not exists demo").await?; - // change database context - conn.exec("use demo").await?; - // create table - conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?; - // insert - conn.exec("insert into tb1 values(now, 1)").await?; - // query - let rows = conn.query("select * from tb1").await?; - for row in rows.rows { - println!("{}", row.into_iter().join(",")); - } -} -``` - -## 使用示例 - -### 写入数据 - -#### SQL 写入 - - - -#### InfluxDB 行协议写入 - - - -#### OpenTSDB Telnet 行协议写入 - - - -#### OpenTSDB JSON 行协议写入 - - - -### 查询数据 - - - -### 更多示例程序 - -| 程序路径 | 程序说明 | -| -------------- | ----------------------------------------------------------------------------- | -| [demo.rs] | 基本API 使用示例 | -| [bailongma-rs] | 使用 TDengine 作为存储后端的 Prometheus 远程存储 API 适配器,使用 r2d2 连接池 | - -## API 参考 - -### 连接构造器 API - -[Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) 构造器模式是 Rust 处理复杂数据类型或可选配置类型的解决方案。[libtaos] 实现中,使用连接构造器 [TaosCfgBuilder] 作为 TDengine Rust 连接器的入口。[TaosCfgBuilder] 提供对服务器、端口、数据库、用户名和密码等的可选配置。 - -使用 `default()` 方法可以构建一个默认参数的 [TaosCfg],用于后续连接数据库或建立连接池。 - -```rust -let cfg = TaosCfgBuilder::default().build()?; -``` - -使用构造器模式,用户可按需设置: - -```rust -let cfg = TaosCfgBuilder::default() - .ip("127.0.0.1") - .user("root") - .pass("taosdata") - .db("log") - .port(6030u16) - .build()?; -``` - -使用 [TaosCfg] 对象创建 TDengine 连接: - -```rust -let conn: Taos = cfg.connect(); -``` - -### 连接池 - -在复杂应用中,建议启用连接池。[libtaos] 的连接池使用 [r2d2] 实现。 - -如下,可以生成一个默认参数的连接池。 - -```rust -let pool = r2d2::Pool::new(cfg)?; -``` - -同样可以使用连接池的构造器,对连接池参数进行设置: - -```rust - use std::time::Duration; - let pool = r2d2::Pool::builder() - .max_size(5000) // max connections - .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection - .min_idle(Some(1000)) // minimal idle connections - .connection_timeout(Duration::from_minutes(2)) - .build(cfg); -``` - -在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。 - -```rust -let taos = pool.get()?; -``` - -### 连接 - -[Taos] 结构体是 [libtaos] 中的连接管理者,主要提供了两个 API: - -1. `exec`: 执行某个非查询类 SQL 语句,例如 `CREATE`,`ALTER`,`INSERT` 等。 - - ```rust - taos.exec().await?; - ``` - -2. `query`:执行查询语句,返回 [TaosQueryData] 对象。 - - ```rust - let q = taos.query("select * from log.logs").await?; - ``` - - [TaosQueryData] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度): - - 列信息使用 [ColumnMeta] 存储: - - ```rust - let cols = &q.column_meta; - for col in cols { - println!("name: {}, type: {:?}, bytes: {}", col.name, col.type_, col.bytes); - } - ``` - - 逐行获取数据: - - ```rust - for (i, row) in q.rows.iter().enumerate() { - for (j, cell) in row.iter().enumerate() { - println!("cell({}, {}) data: {}", i, j, cell); - } - } - ``` - -需要注意的是,需要使用 Rust 异步函数和异步运行时。 - -[Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率: - -- `.describe(table: &str)`: 执行 `DESCRIBE` 并返回一个 Rust 数据结构。 -- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。 -- `.use_database(database: &str)`: 执行 `USE` 语句。 - -除此之外,该结构也是 [参数绑定](#参数绑定接口) 和 [行协议接口](#行协议接口) 的入口,使用方法请参考具体的 API 说明。 - -### 参数绑定接口 - -与 C 接口类似,Rust 提供参数绑定接口。首先,通过 [Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]: - -```rust -let mut stmt: Stmt = taos.stmt("insert into ? values(?,?)")?; -``` - -参数绑定对象提供了一组接口用于实现参数绑定: - -##### `.set_tbname(tbname: impl ToCString)` - -用于绑定表名。 - -##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)` - -当 SQL 语句使用超级表时,用于绑定子表表名和标签值: - -```rust -let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(?,?)")?; -// tags can be created with any supported type, here is an example using JSON -let v = Field::Json(serde_json::from_str("{\"tag1\":\"一二三四五六七八九十\"}").unwrap()); -stmt.set_tbname_tags("tb0", [&tag])?; -``` - -##### `.bind(params: impl IntoParams)` - -用于绑定值类型。使用 [Field] 结构体构建需要的类型并绑定: - -```rust -let ts = Field::Timestamp(Timestamp::now()); -let value = Field::Float(0.0); -stmt.bind(vec![ts, value].iter())?; -``` - -##### `.execute()` - -执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。 - -```rust -stmt.execute()?; - -// next bind cycle. -//stmt.set_tbname()?; -//stmt.bind()?; -//stmt.execute()?; -``` - -### 行协议接口 - -行协议接口支持多种模式和不同精度,需要引入 schemaless 模块中的常量以进行设置: - -```rust -use libtaos::*; -use libtaos::schemaless::*; -``` - -- InfluxDB 行协议 - - ```rust - let lines = [ - "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000" - "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000" - ]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)?; - ``` - -- OpenTSDB Telnet 协议 - - ```rust - let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?; - ``` - -- OpenTSDB JSON 协议 - - ```rust - let lines = [r#" - { - "metric": "st", - "timestamp": 1626006833, - "value": 10, - "tags": { - "t1": true, - "t2": false, - "t3": 10, - "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" - } - }"#]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?; - ``` - -其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。 - -[libtaos]: https://github.com/taosdata/libtaos-rs -[tdengine]: https://github.com/taosdata/TDengine -[bailongma-rs]: https://github.com/taosdata/bailongma-rs -[r2d2]: https://crates.io/crates/r2d2 -[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs -[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html -[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html -[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html -[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html -[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html -[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html diff --git a/docs-cn/14-reference/04-taosadapter.md b/docs-cn/14-reference/04-taosadapter.md deleted file mode 100644 index 6e259391d40acfd48d8db8db3246ad2196ce0520..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/04-taosadapter.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: "taosAdapter" -description: "taosAdapter 是一个 TDengine 的配套工具,是 TDengine 集群和应用程序之间的桥梁和适配器。它提供了一种易于使用和高效的方式来直接从数据收集代理软件(如 Telegraf、StatsD、collectd 等)摄取数据。它还提供了 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine" -sidebar_label: "taosAdapter" ---- - -import Prometheus from "./_prometheus.mdx" -import CollectD from "./_collectd.mdx" -import StatsD from "./_statsd.mdx" -import Icinga2 from "./_icinga2.mdx" -import TCollector from "./_tcollector.mdx" - -taosAdapter 是一个 TDengine 的配套工具,是 TDengine 集群和应用程序之间的桥梁和适配器。它提供了一种易于使用和高效的方式来直接从数据收集代理软件(如 Telegraf、StatsD、collectd 等)摄取数据。它还提供了 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。 - -taosAdapter 提供以下功能: - -- RESTful 接口 -- 兼容 InfluxDB v1 写接口 -- 兼容 OpenTSDB JSON 和 telnet 格式写入 -- 无缝连接到 Telegraf -- 无缝连接到 collectd -- 无缝连接到 StatsD -- 支持 Prometheus remote_read 和 remote_write - -## taosAdapter 架构图 - -![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) - -## taosAdapter 部署方法 - -### 安装 taosAdapter - -taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/develop/BUILD-CN.md)文档。 - -### start/stop taosAdapter - -在 Linux 系统上 taosAdapter 服务默认由 systemd 管理。使用命令 `systemctl start taosadapter` 可以启动 taosAdapter 服务。使用命令 `systemctl stop taosadapter` 可以停止 taosAdapter 服务。 - -### 移除 taosAdapter - -使用命令 rmtaos 可以移除包括 taosAdapter 在内的 TDengine server 软件。 - -### 升级 taosAdapter - -taosAdapter 和 TDengine server 需要使用相同版本。请通过升级 TDengine server 来升级 taosAdapter。 -与 taosd 分离部署的 taosAdapter 必须通过升级其所在服务器的 TDengine server 才能得到升级。 - -## taosAdapter 参数列表 - -taosAdapter 支持通过命令行参数、环境变量和配置文件来进行配置。默认配置文件是 /etc/taos/taosadapter.toml。 - -命令行参数优先于环境变量优先于配置文件,命令行用法是 arg=val,如 taosadapter -p=30000 --debug=true,详细列表如下: - -```shell -Usage of taosAdapter: - --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") - --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) - --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") - --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) - --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") - --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) - -c, --config string config path default /etc/taos/taosadapter.toml - --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) - --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" - --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" - --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" - --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" - --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" - --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" - --help Print this help message and exit - --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) - --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") - --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) - --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") - --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) - --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") - --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) - --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" - --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) - --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) - --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") - --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) - --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" - --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" - --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") - --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" - --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) - --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" - --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" - --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" - --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) - --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" - --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") - --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) - --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) - --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") - --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) - --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) - --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" - --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) - --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") - --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) - --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" - --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") - --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) - --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) - --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) - -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) - --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) - --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" - --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" - --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" - --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) - --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") - --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) - --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) - --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) - --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) - --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) - --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) - --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) - --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") - --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) - --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") - --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" - --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") - --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) - --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" - --version Print the version and exit -``` - -备注: -使用浏览器进行接口调用请根据实际情况设置如下跨源资源共享(CORS)参数: - -```text -AllowAllOrigins -AllowOrigins -AllowHeaders -ExposeHeaders -AllowCredentials -AllowWebSockets -``` - -如果不通过浏览器进行接口调用无需关心这几项配置。 - -关于 CORS 协议细节请参考:[https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) 或 [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS)。 - -示例配置文件参见 [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml)。 - -## 功能列表 - -- 与 RESTful 接口兼容 - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) -- 兼容 InfluxDB v1 写接口 - [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) -- 兼容 OpenTSDB JSON 和 telnet 格式写入 - - - - -- 与 collectd 无缝连接 - collectd 是一个系统统计收集守护程序,请访问 [https://collectd.org/](https://collectd.org/) 了解更多信息。 -- Seamless connection with StatsD - StatsD 是一个简单而强大的统计信息汇总的守护程序。请访问 [https://github.com/statsd/statsd](https://github.com/statsd/statsd) 了解更多信息。 -- 与 icinga2 的无缝连接 - icinga2 是一个收集检查结果指标和性能数据的软件。请访问 [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) 了解更多信息。 -- 与 tcollector 无缝连接 - TCollector 是一个客户端进程,从本地收集器收集数据,并将数据推送到 OpenTSDB。请访问 [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) 了解更多信息。 -- 无缝连接 node_exporter - node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。 -- 支持 Prometheus remote_read 和 remote_write - remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。 - -## 接口 - -### TDengine RESTful 接口 - -您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/connector#restful)。支持如下 EndPoint : - -```text -/rest/sql -/rest/sqlt -/rest/sqlutc -``` - -### InfluxDB - -您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://:6041/` 来写入 InfluxDB 兼容格式的数据到 TDengine。EndPoint 如下: - -```text -/influxdb/v1/write -``` - -支持 InfluxDB 查询参数如下: - -- `db` 指定 TDengine 使用的数据库名 -- `precision` TDengine 使用的时间精度 -- `u` TDengine 用户名 -- `p` TDengine 密码 - -注意: 目前不支持 InfluxDB 的 token 验证方式只支持 Basic 验证和查询参数验证。 - -### OpenTSDB - -您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://:6041/` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下: - -```text -/opentsdb/v1/put/json/:db -/opentsdb/v1/put/telnet/:db -``` - -### collectd - - - -### StatsD - - - -### icinga2 OpenTSDB writer - - - -### TCollector - - - -### node_exporter - -Prometheus 使用的由\*NIX 内核暴露的硬件和操作系统指标的输出器 - -- 启用 taosAdapter 的配置 node_exporter.enable -- 设置 node_exporter 的相关配置 -- 重新启动 taosAdapter - -### prometheus - - - -## 内存使用优化方法 - -taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。 - -- pauseQueryMemoryThreshold -- pauseAllMemoryThreshold - -当超过 pauseQueryMemoryThreshold 阈值时时停止处理查询请求。 - -http 返回内容: - -- code 503 -- body "query memory exceeds threshold" - -当超过 pauseAllMemoryThreshold 阈值时停止处理所有写入和查询请求。 - -http 返回内容: - -- code 503 -- body "memory exceeds threshold" - -当内存回落到阈值之下时恢复对应功能。 - -状态检查接口 `http://:6041/-/ping` - -- 正常返回 `code 200` -- 无参数 如果内存超过 pauseAllMemoryThreshold 将返回 `code 503` -- 请求参数 `action=query` 如果内存超过 pauseQueryMemoryThreshold 或 pauseAllMemoryThreshold 将返回 `code 503` - -对应配置参数 - -```text - monitor.collectDuration 监测间隔 环境变量 "TAOS_MONITOR_COLLECT_DURATION" (默认值 3s) - monitor.incgroup 是否是cgroup中运行(容器中运行设置为 true) 环境变量 "TAOS_MONITOR_INCGROUP" - monitor.pauseAllMemoryThreshold 不再进行插入和查询的内存阈值 环境变量 "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (默认值 80) - monitor.pauseQueryMemoryThreshold 不再进行查询的内存阈值 环境变量 "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (默认值 70) -``` - -您可以根据具体项目应用场景和运营策略进行相应调整,并建议使用运营监控软件及时进行系统内存状态监控。负载均衡器也可以通过这个接口检查 taosAdapter 运行状态。 - -## taosAdapter 监控指标 - -taosAdapter 采集 http 相关指标、cpu 百分比和内存百分比。 - -### http 接口 - -提供符合 [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md) 接口: - -```text -http://:6041/metrics -``` - -### 写入 TDengine - -taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengine。 - -有关配置参数 - -| **配置项** | **描述** | **默认值** | -| ----------------------- | --------------------------------------------------------- | ---------- | -| monitor.collectDuration | cpu 和内存采集间隔 | 3s | -| monitor.identity | 当前 taosadapter 的标识符如果不设置将使用 'hostname:port' | | -| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false | -| monitor.writeToTD | 是否写入到 TDengine | true | -| monitor.user | TDengine 连接用户名 | root | -| monitor.password | TDengine 连接密码 | taosdata | -| monitor.writeInterval | 写入 TDengine 间隔 | 30s | - -## 结果返回条数限制 - -taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 代表无限制,默认无限制。 - -该参数控制以下接口返回 - -- `http://:6041/rest/sql` -- `http://:6041/rest/sqlt` -- `http://:6041/rest/sqlutc` -- `http://:6041/prometheus/v1/remote_read/:db` - -## 故障解决 - -您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。 - -您也可以通过设置 --logLevel 参数或者环境变量 TAOS_ADAPTER_LOG_LEVEL 来调节 taosAdapter 日志输出详细程度。有效值包括: panic、fatal、error、warn、warning、info、debug 以及 trace。 - -## 如何从旧版本 TDengine 迁移到 taosAdapter - -在 TDengine server 2.2.x.x 或更早期版本中,taosd 进程包含一个内嵌的 http 服务。如前面所述,taosAdapter 是一个使用 systemd 管理的独立软件,拥有自己的进程。并且两者有一些配置参数和行为是不同的,请见下表: - -| **#** | **embedded httpd** | **taosAdapter** | **comment** | -| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | -| 1 | httpEnableRecordSql | --logLevel=debug | | -| 2 | httpMaxThreads | n/a | taosAdapter 自动管理线程池,无需此参数 | -| 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 | | -| 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 | -| 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 | -| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | diff --git a/docs-cn/14-reference/06-taosdump.md b/docs-cn/14-reference/06-taosdump.md deleted file mode 100644 index 3a9f2e9acd215be102991a1d91fba285ef6315bb..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/06-taosdump.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: taosdump -description: "taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序" ---- - -## 简介 - -taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 - -taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 -表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 -不指定位置,taosdump 默认会将数据备份到当前目录。 - -如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 -如果看到相关提示,请小心操作。 - -taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 -硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 -[ Apache AVRO ](https://avro.apache.org/)作为数据文件格式来存储备份数据。 - -## 安装 - -taosdump 有两种安装方式: - -- 安装 taosTools 官方安装包, 请从[所有下载链接](https://www.taosdata.com/all-downloads)页面找到 taosTools 并下载安装。 - -- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 - -## 常用使用场景 - -### taosdump 备份数据 - -1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数; -2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数; -3. 备份指定数据库中的某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; -4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。 -5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](/taos-sql/escape)。 - -:::tip -- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 -- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 - -::: - -### taosdump 恢复数据 - -恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 - -:::tip -taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。 - -::: - -## 详细命令行参数列表 - -以下为 taosdump 详细命令行参数列表: - -``` -Usage: taosdump [OPTION...] dbname [tbname ...] - or: taosdump [OPTION...] --databases db1,db2,... - or: taosdump [OPTION...] --all-databases - or: taosdump [OPTION...] -i inpath - or: taosdump [OPTION...] -o outpath - - -h, --host=HOST Server host dumping data from. Default is - localhost. - -p, --password User password to connect to server. Default is - taosdata. - -P, --port=PORT Port to connect - -u, --user=USER User name used to connect to server. Default is - root. - -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos - -i, --inpath=INPATH Input file path. - -o, --outpath=OUTPATH Output file path. - -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. - -a, --allow-sys Allow to dump system database - -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump inputted databases. Use comma to separate - databases' name. - -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump tables' schema. - -y, --answer-yes Input yes for prompt. It will skip data file - checking! - -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, - and lzma. - -S, --start-time=START_TIME Start time to dump. Either epoch or - ISO8601/RFC3339 format is acceptable. ISO8601 - format example: 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00:000+0800 or '2017-10-01 - 00:00:00.000+0800' - -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 - format is acceptable. ISO8601 format example: - 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00.000+0800 or '2017-10-01 - 00:00:00.000+0800' - -B, --data-batch=DATA_BATCH Number of data per query/insert statement when - backup/restore. Default value is 16384. If you see - 'error actual dump .. batch ..' when backup or if - you see 'WAL size exceeds limit' error when - restore, please adjust the value to a smaller one - and try. The workable value is related to the - length of the row and type of table schema. - -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Using loose mode if the table name and column name - use letter and number only. Default is NOT. - -n, --no-escape No escape char '`'. Default is using it. - -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is - 5. - -g, --debug Print debug info. - -?, --help Give this help list - --usage Give a short usage message - -V, --version Print program version - -Mandatory or optional arguments to long options are also mandatory or optional -for any corresponding short options. - -Report bugs to . -``` diff --git a/docs-cn/14-reference/08-taos-shell.md b/docs-cn/14-reference/08-taos-shell.md deleted file mode 100644 index 1be53adcdc69afd7449117412d25c4f56f4eaa4c..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/08-taos-shell.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: TDengine 命令行(CLI) -sidebar_label: TDengine CLI -description: TDengine CLI 的使用说明和技巧 ---- - -TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 - -## 安装 - -如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [安装客户端驱动](/reference/connector/#安装客户端驱动)。 - -## 执行 - -要进入 TDengine CLI,您只要在 Linux 终端或 Windows 终端执行 `taos` 即可。 - -```bash -taos -``` - -如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下: - -```cmd -taos> -``` - -进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。 - -## 执行 SQL 脚本 - -在 TDengine CLI 里可以通过 `source` 命令来运行脚本文件中的多条 SQL 命令。 - -```sql -taos> source ; -``` - -## 在线修改显示字符宽度 - -可以在 TDengine CLI 里使用如下命令调整字符显示宽度 - -```sql -taos> SET MAX_BINARY_DISPLAY_WIDTH ; -``` - -如显示的内容后面以 ... 结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。 - -## 命令行参数 - -您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数: - -- -h, --host=HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务 -- -P, --port=PORT: 指定服务端所用端口号 -- -u, --user=USER: 连接时使用的用户名 -- -p, --password=PASSWORD: 连接服务端时使用的密码 -- -?, --help: 打印出所有命令行参数 - -还有更多其他参数: - -- -c, --config-dir: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg` -- -C, --dump-config: 打印 -c 指定的目录中 `taos.cfg` 的配置参数 -- -d, --database=DATABASE: 指定连接到服务端时使用的数据库 -- -D, --directory=DIRECTORY: 导入指定路径中的 SQL 脚本文件 -- -f, --file=FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行 -- -k, --check=CHECK: 指定要检查的表 -- -l, --pktlen=PKTLEN: 网络测试时使用的测试包大小 -- -n, --netrole=NETROLE: 网络连接测试时的测试范围,默认为 `startup`, 可选值为 `client`、`server`、`rpc`、`startup`、`sync`、`speed` 和 `fqdn` 之一 -- -r, --raw-time: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t) -- -s, --commands=COMMAND: 以非交互模式执行的 SQL 命令 -- -S, --pkttype=PKTTYPE: 指定网络测试所用的包类型,默认为 TCP。只有 netrole 为 `speed` 时既可以指定为 TCP 也可以指定为 UDP -- -T, --thread=THREADNUM: 以多线程模式导入数据时的线程数 -- -s, --commands: 在不进入终端的情况下运行 TDengine 命令 -- -z, --timezone=TIMEZONE: 指定时区,默认为本地时区 -- -V, --version: 打印出当前版本号 - -示例: - -```bash -taos -h h1.taos.com -s "use db; show tables;" -``` - -## TDengine CLI 小技巧 - -- 可以使用上下光标键查看历史输入的指令 -- 在 TDengine CLI 中使用 `alter user` 命令可以修改用户密码,缺省密码为 `taosdata` -- Ctrl+C 中止正在进行中的查询 -- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 -- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 -- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI diff --git a/docs-cn/14-reference/12-directory.md b/docs-cn/14-reference/12-directory.md deleted file mode 100644 index f8c8cb4a082f691cf75db9bed3b42d0d6e1bc8a3..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/12-directory.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: 文件目录结构 -description: "TDengine 安装目录说明" ---- - -安装 TDengine 后,默认会在操作系统中生成下列目录或文件: - -| 目录/文件 | 说明 | -| ------------------------- | -------------------------------------------------------------------- | -| /usr/local/taos/bin | TDengine 可执行文件目录。其中的执行文件都会软链接到/usr/bin 目录下。 | -| /usr/local/taos/driver | TDengine 动态链接库目录。会软链接到/usr/lib 目录下。 | -| /usr/local/taos/examples | TDengine 各种语言应用示例目录。 | -| /usr/local/taos/include | TDengine 对外提供的 C 语言接口的头文件。 | -| /etc/taos/taos.cfg | TDengine 默认[配置文件] | -| /var/lib/taos | TDengine 默认数据文件目录。可通过[配置文件]修改位置。 | -| /var/log/taos | TDengine 默认日志文件目录。可通过[配置文件]修改位置。 | - -## 可执行文件 - -TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括: - -- _taosd_:TDengine 服务端可执行文件 -- _taos_:TDengine Shell 可执行文件 -- _taosdump_:数据导入导出工具 -- _taosBenchmark_:TDengine 测试工具 -- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos -- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件 -- _tarbitrator_: 提供双节点集群部署的仲裁功能 -- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本 -- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本 -- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本 -- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。 - -:::note -2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。 - -::: - -:::tip -您可以通过修改系统配置文件 taos.cfg 来配置不同的数据目录和日志目录。 - -::: diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md deleted file mode 100644 index f2712f2814593bddd65401cb129c8c58ee55a316..0000000000000000000000000000000000000000 --- a/docs-cn/14-reference/13-schemaless/13-schemaless.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Schemaless 写入 -description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构' ---- - -在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine -从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless -将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 - -无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 - -## 无模式写入行协议 - -TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 - -对于 InfluxDB、OpenTSDB 的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。 - -Schemaless 采用一个字符串来表达一个数据行(可以向写入 API 中一次传入多行字符串来实现多个数据行的批量写入),其格式约定如下: - -```json -measurement,tag_set field_set timestamp -``` - -其中: - -- measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。 -- tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。 -- field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 -- timestamp 即本行数据对应的主键时间戳。 - -tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。 - -在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: - -- 如果两边有英文双引号,表示 BINARY(32) 类型。例如 `"abc"`。 -- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 -- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) -- 数值类型将通过后缀来区分数据类型: - -| **序号** | **后缀** | **映射类型** | **大小(字节)** | -| -------- | -------- | ------------ | -------------- | -| 1 | 无或 f64 | double | 8 | -| 2 | f32 | float | 4 | -| 3 | i8 | TinyInt | 1 | -| 4 | i16 | SmallInt | 2 | -| 5 | i32 | Int | 4 | -| 6 | i64 或 i | Bigint | 8 | - -- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 - -例如如下数据行表示:向名为 st 的超级表下的 t1 标签为 "3"(NCHAR)、t2 标签为 "4"(NCHAR)、t3 -标签为 "t3"(NCHAR)的数据子表,写入 c1 列为 3(BIGINT)、c2 列为 false(BOOL)、c3 -列为 "passit"(BINARY)、c4 列为 4(DOUBLE)、主键时间戳为 1626006833639000000 的一行数据。 - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 -``` - -需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 - -## 无模式写入的主要处理逻辑 - -无模式写入按照如下原则来处理行数据: - -1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串 - -```json -"measurement,tag_key1=tag_value1,tag_key2=tag_value2" -``` - -需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 -排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t\*” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 - -2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 -3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 -4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 -5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 - NULL。 -6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 -7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 -8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 - -:::tip -无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) - -::: - -## 时间分辨率识别 - -无模式写入过程中支持三个指定的模式,具体如下 - -| **序号** | **值** | **说明** | -| -------- | ------------------- | ------------------------------- | -| 1 | SML_LINE_PROTOCOL | InfluxDB 行协议(Line Protocol) | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 | -| 3 | SML_JSON_PROTOCOL | JSON 协议格式 | - -在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示: - -| **序号** | **时间分辨率定义** | **含义** | -| -------- | --------------------------------- | -------------- | -| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | 未定义(无效) | -| 2 | TSDB_SML_TIMESTAMP_HOURS | 小时 | -| 3 | TSDB_SML_TIMESTAMP_MINUTES | 分钟 | -| 4 | TSDB_SML_TIMESTAMP_SECONDS | 秒 | -| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | 毫秒 | -| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | 微秒 | -| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | 纳秒 | - -在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 - -## 数据模式映射规则 - -本节将说明行协议的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为 -超级表名称。tag_set 中的 标签名称为 数据模式中的标签名,field_set 中的名称为列名称。以如下数据为例,说明映射规则: - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 -``` - -该行数据映射生成一个超级表: st, 其包含了 3 个类型为 nchar 的标签,分别是:t1, t2, t3。五个数据列,分别是 ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint)。映射成为如下 SQL 语句: - -```json -create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) -``` - -## 数据模式变更处理 - -本节将说明不同行数据写入情况下,对于数据模式的影响。 - -在使用行协议写入一个明确的标识的字段类型的时候,后续更改该字段的类型定义,会出现明确的数据模式错误,即会触发写入 API 报告错误。如下所示, - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 -``` - -第一行的数据类型映射将 c4 列定义为 Double, 但是第二行的数据又通过数值后缀方式声明该列为 BigInt, 由此会触发无模式写入的解析错误。 - -如果列前面的行协议将数据列声明为了 binary, 后续的要求长度更长的 binary 长度,此时会触发超级表模式的变更。 - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 -``` - -第一行中行协议解析会声明 c5 列是一个 binary(4)的字段,第二次行数据写入会提取列 c5 仍然是 binary 列,但是其宽度为 6,此时需要将 binary 的宽度增加到能够容纳 新字符串的宽度。 - -```json -st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 -``` - -第二行数据相对于第一行来说增加了一个列 c6,类型为 binary(6)。那么此时会自动增加一个列 c6, 类型为 binary(6)。 - -## 写入完整性 - -TDengine 提供数据写入的幂等性保证,即您可以反复调用 API 进行出错数据的写入操作。但是不提供多行数据写入的原子性保证。即在多行数据一批次写入过程中,会出现部分数据写入成功,部分数据写入失败的情况。 - -## 错误码 - -如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR -错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 -taos_errstr 获取具体的错误原因。 diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx deleted file mode 100644 index b54989f0115bc07bef81ca363b5909ffa970c6ad..0000000000000000000000000000000000000000 --- a/docs-cn/20-third-party/01-grafana.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -sidebar_label: Grafana -title: Grafana ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/) 快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表的内容可以在仪表盘(DashBoard)上进行可视化展现。关于 TDengine 插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。 - -## 前置条件 - -要让 Grafana 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 - -- TDengine 集群已经部署并正常运行 -- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter) - -记录以下信息: - -- TDengine 集群 REST API 地址,如:`http://tdengine.local:6041`。 -- TDengine 集群认证信息,可使用用户名及密码。 - -## 安装 Grafana - -目前 TDengine 支持 Grafana 7.5 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:。 - -## 配置 Grafana - -### 安装 Grafana Plugin 并配置数据源 - - - - -将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html): - -```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password - -# 其他环境变量: -# - 是否安装数据源,默认为 true,表示安装 -export TDENGINE_DS_ENABLED=false -# - 数据源名称,默认为 TDengine -export TDENGINE_DS_NAME=TDengine -# - 数据源所属组织 ID,默认为 1 -export GF_ORG_ID=1 -# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑 -export TDENGINE_EDITABLE=1 -``` - -运行安装脚本: - -```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -``` - -该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。 - -保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。 - - - - -使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。 - -```bash -grafana-cli plugins install tdengine-datasource -# with sudo -sudo -u grafana grafana-cli plugins install tdengine-datasource -``` - -或者从 [GitHub](https://github.com/taosdata/grafanaplugin/tags) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下: - -```bash -GF_VERSION=3.2.2 -# from GitHub -wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip -# from Grafana -wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download -``` - -以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 - -```bash -sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ -``` - -如果 Grafana 在 Docker 环境下运行,可以使用如下的环境变量设置自动安装 TDengine 数据源插件: - -```bash -GF_INSTALL_PLUGINS=tdengine-datasource -``` - -之后,用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: - -![TDengine Database Grafana plugin add data source](./add_datasource1.webp) - -点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: - -![TDengine Database Grafana plugin add data source](./add_datasource2.webp) - -进入数据源配置页面,按照默认提示修改相应配置即可: - -![TDengine Database Grafana plugin add data source](./add_datasource3.webp) - -- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 。 -- User:TDengine 用户名。 -- Password:TDengine 用户密码。 - -点击 `Save & Test` 进行测试,成功会有如下提示: - -![TDengine Database Grafana plugin add data source](./add_datasource4.webp) - - - - -### 创建 Dashboard - -回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: - -![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp) - -如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: - -- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。 -- ALIAS BY:可设置当前查询别名。 -- GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 - -按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: - -![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp) - -> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。 - -### 导入 Dashboard - -在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。 - -使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表: - -- [15146](https://grafana.com/grafana/dashboards/15146): 监控多个 TDengine 集群 -- [15155](https://grafana.com/grafana/dashboards/15155): TDengine 告警示例 -- [15167](https://grafana.com/grafana/dashboards/15167): TDinsight -- [16388](https://grafana.com/grafana/dashboards/16388): Telegraf 采集节点信息的数据展示 diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md deleted file mode 100644 index 2125545f393819d74fc2c5df1c37784823e33343..0000000000000000000000000000000000000000 --- a/docs-cn/20-third-party/09-emq-broker.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -sidebar_label: EMQX Broker -title: EMQX Broker 写入 ---- - -MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。 - -## 前置条件 - -要让 EMQX 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 - -- TDengine 集群已经部署并正常运行 -- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter) -- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js,推荐安装 v12 - -## 安装并启动 EMQX - -用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。 - - -## 创建数据库和表 - -在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构。进入 TDengine CLI 复制并执行以下 SQL 语句: - -```sql -CREATE DATABASE test; -USE test; -CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); -``` - -注:表结构以博客[数据传输、存储、展现,EMQX + TDengine 搭建 MQTT 物联网数据可视化平台](https://www.taosdata.com/blog/2020/08/04/1722.html)为例。后续操作均以此博客场景为例进行,请你根据实际应用场景进行修改。 - -## 配置 EMQX 规则 - -由于 EMQX 不同版本配置界面所有不同,这里仅以 v4.4.3 为例,其他版本请参考相应官网文档。 - -### 登录 EMQX Dashboard - -使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`。 - -![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) - -### 创建规则(Rule) - -选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮: - -![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) - -### 编辑 SQL 字段 - -复制以下内容输入到 SQL 编辑框: - -```sql -SELECT - payload -FROM - "sensor/data" -``` - -其中 `payload` 代表整个消息体, `sensor/data` 为本规则选取的消息主题。 - -![TDengine Database EMQX create rule](./emqx/create-rule.webp) - -### 新增“动作(action handler)” - -![TDengine Database EMQX](./emqx/add-action-handler.webp) - -### 新增“资源(Resource)” - -![TDengine Database EMQX create resource](./emqx/create-resource.webp) - -选择“发送数据到 Web 服务”并点击“新建资源”按钮: - -### 编辑“资源(Resource)” - -选择“WebHook”并填写“请求 URL”为 taosAdapter 提供 REST 服务的地址,如果是本地启动的 taosadapter, 那么默认地址为: - -``` -http://127.0.0.1:6041/rest/sql -``` - -其他属性请保持默认值。 - -![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) - -### 编辑“动作(action)” - -编辑资源配置,增加 Authorization 认证的键/值配对项。默认用户名和密码对应的 Authorization 值为: -``` -Basic cm9vdDp0YW9zZGF0YQ== -``` -相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。 - -在消息体中输入规则引擎替换模板: - -```sql -INSERT INTO test.sensor_data VALUES( - now, - ${payload.temperature}, - ${payload.humidity}, - ${payload.volume}, - ${payload.PM10}, - ${payload.pm25}, - ${payload.SO2}, - ${payload.NO2}, - ${payload.CO}, - '${payload.id}', - ${payload.area}, - ${payload.ts} -) -``` - -![TDengine Database EMQX edit action](./emqx/edit-action.webp) - -最后点击左下方的 “Create” 按钮,保存规则。 -## 编写模拟测试程序 - -```javascript -{{#include docs-examples/other/mock.js}} -``` - -注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 - -![TDengine Database EMQX client num](./emqx/client-num.webp) - -## 执行测试模拟发送 MQTT 数据 - -``` -npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org -node mock.js -``` - -![TDengine Database EMQX run-mock](./emqx/run-mock.webp) - -## 验证 EMQX 接收到数据 - -在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到: - -![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) - -## 验证数据写入到 TDengine - -使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中: - -![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) - -TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 -EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 diff --git a/docs-cn/20-third-party/10-hive-mq-broker.md b/docs-cn/20-third-party/10-hive-mq-broker.md deleted file mode 100644 index f75ed793d6272ae27f92676e2096ef455f638aa6..0000000000000000000000000000000000000000 --- a/docs-cn/20-third-party/10-hive-mq-broker.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -sidebar_label: HiveMQ Broker -title: HiveMQ Broker 写入 ---- - -[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md deleted file mode 100644 index 8369806adcfe1b195348e7d60160609cde9150e8..0000000000000000000000000000000000000000 --- a/docs-cn/20-third-party/11-kafka.md +++ /dev/null @@ -1,448 +0,0 @@ ---- -sidebar_label: Kafka -title: TDengine Kafka Connector 使用教程 ---- - -TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。 - -## 什么是 Kafka Connect? - -Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 - -![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) - -TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。 - -![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) - -## 什么是 Confluent? - -[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: - -1. Schema Registry -2. REST 代理 -3. 非 Java 客户端 -4. 很多打包好的 Kafka Connect 插件 -5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 - -这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 -![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) - -Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 - -## 前置条件 - -运行本教程中示例的前提条件。 - -1. Linux 操作系统 -2. 已安装 Java 8 和 Maven -3. 已安装 Git -4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install) - -## 安装 Confluent - -Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。 - -在任意目录下执行: - -``` -curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz -tar xzf confluent-7.1.1.tar.gz -C /opt/test -``` - -然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。 - -```title=".profile" -export CONFLUENT_HOME=/opt/confluent-7.1.1 -PATH=$CONFLUENT_HOME/bin -export PATH -``` - -以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) - -安装完成之后,可以输入`confluent version`做简单验证: - -``` -# confluent version -confluent - Confluent CLI - -Version: v2.6.1 -Git Ref: 6d920590 -Build Date: 2022-02-18T06:14:21Z -Go Version: go1.17.6 (linux/amd64) -Development: false -``` - -## 安装 TDengine Connector 插件 - -### 从源码安装 - -``` -git clone https://github.com:taosdata/kafka-connect-tdengine.git -cd kafka-connect-tdengine -mvn clean package -unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip -``` - -以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 - -### 用 confluent-hub 安装 - -[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。 -**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。 - -## 启动 Confluent - -``` -confluent local services start -``` - -:::note -一定要先安装插件再启动 Confluent, 否则加载插件会失败。 -::: - -:::tip -若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 : - -```title="控制台输出日志" {1} -Using CONFLUENT_CURRENT: /tmp/confluent.106668 -Starting ZooKeeper -ZooKeeper is [UP] -Starting Kafka -Kafka is [UP] -Starting Schema Registry -Schema Registry is [UP] -Starting Kafka REST -Kafka REST is [UP] -Starting Connect -Connect is [UP] -Starting ksqlDB Server -ksqlDB Server is [UP] -Starting Control Center -Control Center is [UP] -``` - -清空数据可执行 `rm -rf /tmp/confluent.106668`。 -::: - -### 验证各个组件是否启动成功 - -输入命令: - -``` -confluent local services status -``` - -如果各组件都启动成功,会得到如下输出: - -``` -Connect is [UP] -Control Center is [UP] -Kafka is [UP] -Kafka REST is [UP] -ksqlDB Server is [UP] -Schema Registry is [UP] -ZooKeeper is [UP] -``` - -### 验证插件是否安装成功 - -在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: - -``` -confluent local services connect plugin list -``` - -如果成功安装,会输出如下: - -```txt {4,9} -Available Connect Plugins: -[ - { - "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", - "type": "sink", - "version": "1.0.0" - }, - { - "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", - "type": "source", - "version": "1.0.0" - }, -...... -``` - -如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: -``` -echo `cat /tmp/confluent.current`/connect/connect.stdout -``` -该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 - -与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 - - -## TDengine Sink Connector 的使用 - -TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 - -TDengine Sink Connector 内部使用 TDengine [无模式写入接口](/reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。 - -下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 - -### 添加配置文件 - -``` -mkdir ~/test -cd ~/test -vi sink-demo.properties -``` - -sink-demo.properties 内容如下: - -```ini title="sink-demo.properties" -name=TDengineSinkConnector -connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector -tasks.max=1 -topics=meters -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.user=root -connection.password=taosdata -connection.database=power -db.schemaless=line -data.precision=ns -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter -``` - -关键配置说明: - -1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。 -2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。 - -### 创建 Connector 实例 - -``` -confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties -``` - -若以上命令执行成功,则有如下输出: - -```json -{ - "name": "TDengineSinkConnector", - "config": { - "connection.database": "power", - "connection.password": "taosdata", - "connection.url": "jdbc:TAOS://127.0.0.1:6030", - "connection.user": "root", - "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", - "data.precision": "ns", - "db.schemaless": "line", - "key.converter": "org.apache.kafka.connect.storage.StringConverter", - "tasks.max": "1", - "topics": "meters", - "value.converter": "org.apache.kafka.connect.storage.StringConverter", - "name": "TDengineSinkConnector" - }, - "tasks": [], - "type": "sink" -} -``` - -### 写入测试数据 - -准备测试数据的文本文件,内容如下: - -```txt title="test-data.txt" -meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 -meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 -meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 -meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 -``` - -使用 kafka-console-producer 向主题 meters 添加测试数据。 - -``` -cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters -``` - -:::note -如果目标数据库 power 不存在,那么 TDengine Sink Connector 会自动创建数据库。自动创建数据库使用的时间精度为纳秒,这就要求写入数据的时间戳精度也是纳秒。如果写入数据的时间戳精度不是纳秒,将会抛异常。 -::: - -### 验证同步是否成功 - -使用 TDengine CLI 验证同步是否成功。 - -``` -taos> use power; -Database changed. - -taos> select * from meters; - ts | current | voltage | phase | groupid | location | -=============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | -Query OK, 4 row(s) in set (0.004208s) -``` - -若看到了以上数据,则说明同步成功。若没有,请检查 Kafka Connect 的日志。配置参数的详细说明见[配置参考](#配置参考)。 - -## TDengine Source Connector 的使用 - -TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻之后的数据全部推送到 Kafka。TDengine Source Connector 的实现原理是,先分批拉取历史数据,再用定时查询的策略同步增量数据。同时会监控表的变化,可以自动同步新增的表。如果重启 Kafka Connect, 会从上次中断的位置继续同步。 - -TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。 - -下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。 - -### 添加配置文件 - -``` -vi source-demo.properties -``` - -输入以下内容: - -```ini title="source-demo.properties" -name=TDengineSourceConnector -connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector -tasks.max=1 -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.username=root -connection.password=taosdata -connection.database=test -connection.attempts=3 -connection.backoff.ms=5000 -topic.prefix=tdengine-source- -poll.interval.ms=1000 -fetch.max.rows=100 -out.format=line -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter -``` - -### 准备测试数据 - -准备生成测试数据的 SQL 文件。 - -```sql title="prepare-source-data.sql" -DROP DATABASE IF EXISTS test; -CREATE DATABASE test; -USE test; -CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); -``` - -使用 TDengine CLI, 执行 SQL 文件。 - -``` -taos -f prepare-source-data.sql -``` - -### 创建 Connector 实例 - -``` -confluent local services connect connector load TDengineSourceConnector --config source-demo.properties -``` - -### 查看 topic 数据 - -使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 - -``` -kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test -``` - -输出: - -``` -...... -meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 -meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 -...... -``` - -此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据: - -``` -USE test; -INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); -INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); -``` - -再切换回 kafka-console-consumer, 此时命令行窗口已经打印出刚插入的 2 条数据。 - -### unload 插件 - -测试完毕之后,用 unload 命令停止已加载的 connector。 - -查看当前活跃的 connector: - -``` -confluent local services connect connector status -``` - -如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload: - -``` -confluent local services connect connector unload TDengineSourceConnector -confluent local services connect connector unload TDengineSourceConnector -``` - -## 配置参考 - -### 通用配置 - -以下配置项对 TDengine Sink Connector 和 TDengine Source Connector 均适用。 - -1. `name`: connector 名称。 -2. `connector.class`: connector 的完整类名, 如: com.taosdata.kafka.connect.sink.TDengineSinkConnector。 -3. `tasks.max`: 最大任务数, 默认 1。 -4. `topics`: 需要同步的 topic 列表, 多个用逗号分隔, 如 `topic1,topic2`。 -5. `connection.url`: TDengine JDBC 连接字符串, 如 `jdbc:TAOS://127.0.0.1:6030`。 -6. `connection.user`: TDengine 用户名, 默认 root。 -7. `connection.password` :TDengine 用户密码, 默认 taosdata。 -8. `connection.attempts` :最大尝试连接次数。默认 3。 -9. `connection.backoff.ms` : 创建连接失败重试时间隔时间,单位为 ms。 默认 5000。 - -### TDengine Sink Connector 特有的配置 - -1. `connection.database`: 目标数据库名。如果指定的数据库不存在会则自动创建。自动建库使用的时间精度为纳秒。默认值为 null。为 null 时目标数据库命名规则参考 `connection.database.prefix` 参数的说明 -2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。 -3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。 -4. `max.retries`: 发生错误时的最大重试次数。默认为 1。 -5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。 -6. `db.schemaless`: 数据格式,可选值为: - 1. line :代表 InfluxDB 行协议格式 - 2. json : 代表 OpenTSDB JSON 格式 - 3. telnet :代表 OpenTSDB Telnet 行协议格式 -7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: - 1. ms : 表示毫秒 - 2. us : 表示微秒 - 3. ns : 表示纳秒。默认为纳秒。 - -### TDengine Source Connector 特有的配置 - -1. `connection.database`: 源数据库名称,无缺省值。 -2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。 -3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。 -4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。 -5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 -6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。 - -## 其他说明 - -1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 -2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 - -## 问题反馈 - -无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 - -## 参考 - -1. https://www.confluent.io/what-is-apache-kafka -2. https://developer.confluent.io/learn-kafka/kafka-connect/intro -3. https://docs.confluent.io/platform/current/platform.html diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md deleted file mode 100644 index 433cb4808b60ce73c639a23beef45fb8e1afb7dd..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/01-arch.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -sidebar_label: 整体架构 -title: 整体架构 ---- - -## 集群与基本逻辑单元 - -TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以自动化负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。 - -### 主要逻辑单元 - -TDengine 分布式架构的逻辑结构图如下: - -![TDengine Database 架构示意图](./structure.webp) - -
图 1 TDengine架构示意图
- -一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine 应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过 taosc 的 API 与 TDengine 集群进行互动。下面对每个逻辑单元进行简要介绍。 - -**物理节点(pnode):** pnode 是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有 OS 的物理机、虚拟机或 Docker 容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine 完全依赖 FQDN 来进行网络通讯,如果不了解 FQDN,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 - -**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode 包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode 在系统中的唯一标识由实例的 End Point(EP)决定。EP 是 dnode 所在物理节点的 FQDN(Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 - -**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。 - -**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 - -**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。 - -**Taosc** taosc 是 TDengine 给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供 C/C++ 语言原生接口,内嵌于 JDBC、C#、Python、Go、Node.js 语言连接库里。应用都是通过 taosc 而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于 JDBC、C/C++、C#、Python、Go、Node.js 接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的 RESTful 接口,taosc 在 TDengine 集群的每个 dnode 上都有一运行实例。 - -### 节点之间的通讯 - -**通讯方式:**TDengine 系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过 TCP/UDP 进行的。因为考虑到物联网场景,数据写入的包一般不大,因此 TDengine 除采用 TCP 做传输之外,还采用 UDP 方式,因为 UDP 更加高效,而且不受连接数的限制。TDengine 实现了自己的超时、重传、确认等机制,以确保 UDP 的可靠传输。对于数据量不到 15K 的数据包,采取 UDP 的方式进行传输,超过 15K 的,或者是查询类的操作,自动采取 TCP 的方式进行传输。同时,TDengine 根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用 TCP 方式进行数据传输。 - -**FQDN 配置:**一个数据节点有一个或多个 FQDN,可以在系统配置文件 taos.cfg 通过参数“fqdn”进行指定,如果没有指定,系统将自动获取计算机的 hostname 作为其 FQDN。如果节点没有配置 FQDN,可以直接将该节点的配置参数 fqdn 设置为它的 IP 地址。但不建议使用 IP,因为 IP 地址可变,一旦变化,将让集群无法正常工作。一个数据节点的 EP(End Point)由 FQDN + Port 组成。采用 FQDN,需要保证 DNS 服务正常工作,或者在节点以及应用所在的节点配置好 hosts 文件。另外,这个参数值的长度需要控制在 96 个字符以内。 - -**端口配置:**一个数据节点对外的端口由 TDengine 的系统配置参数 serverPort 决定,对集群内部通讯的端口是 serverPort+5。为支持多线程高效的处理 UDP 数据,每个对内和对外的 UDP 连接,都需要占用 5 个连续的端口。 - -- 集群内数据节点之间的数据复制操作占用一个 TCP 端口,是 serverPort+10。 -- 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。 -- 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。 - -因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) - -**集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。 - -**集群内部通讯:**各个数据节点之间通过 TCP/UDP 进行连接。一个数据节点启动时,将获取 mnode 所在的 dnode 的 EP 信息,然后与系统中的 mnode 建立起连接,交换信息。获取 mnode 的 EP 信息有三步: - -1. 检查 mnodeEpSet.json 文件是否存在,如果不存在或不能正常打开获得 mnode EP 信息,进入第二步; -2. 检查系统配置文件 taos.cfg,获取节点配置参数 firstEp、secondEp(这两个参数指定的节点可以是不带 mnode 的普通节点,这样的话,节点被连接时会尝试重定向到 mnode 节点),如果不存在或者 taos.cfg 里没有这两个配置参数,或无效,进入第三步; -3. 将自己的 EP 设为 mnode EP,并独立运行起来。 - -获取 mnode EP 列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试 mnode EP 列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 - -**Mnode 的选择:**TDengine 逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码 taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的 End Point,并与获取的 mnode EP List 进行比对,如果在其中,该数据节点认为自己应该启动 mnode 模块,成为 mnode。如果自己的 EP 不在 mnode EP List 里,则不启动 mnode 模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode 有可能迁移至新的 dnode,但一切都是透明的,无需人工干预,配置参数的修改,是 mnode 自己根据资源做出的决定。 - -**新数据节点的加入:**系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用 TDengine CLI 连接到现有工作的数据节点,然后用命令“CREATE DNODE”将新的数据节点的 End Point 添加进去;第二步:在新的数据节点的系统配置参数文件 taos.cfg 里,将 firstEp,secondEp 参数设置为现有集群中任意两个数据节点的 EP 即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 - -**重定向:**无论是 dnode 还是 taosc,最先都是要发起与 mnode 的连接,但 mnode 是系统自动创建并维护的,因此对于用户来说,并不知道哪个 dnode 在运行 mnode。TDengine 只要求向系统中任何一个工作的 dnode 发起连接即可。因为任何一个正在运行的 dnode,都维护有目前运行的 mnode EP List。当收到一个来自新启动的 dnode 或 taosc 的连接请求,如果自己不是 mnode,则将 mnode EP List 回复给对方,taosc 或新启动的 dnode 收到这个 list,就重新尝试建立连接。当 mnode EP List 发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知 taosc。 - -### 一个典型的消息流程 - -为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 - -![TDengine Database 典型的操作流程](./message.webp) - -
图 2 TDengine 典型的操作流程
- -1. 应用通过 JDBC 或其他 API 接口发起插入数据的请求。 -2. taosc 会检查缓存,看是否保存有该表的 meta data。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get meta-data 请求。 -3. mnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema,而且还有该表所属的 vgroup 信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point)。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode,taosc 将向下一个 mnode 发出请求。 -4. taosc 向 master vnode 发起插入请求。 -5. vnode 插入数据后,给 taosc 一个应答,表示插入成功。如果 taosc 迟迟得不到 vnode 的回应,taosc 会认为该节点已经离线。这种情况下,如果被插入的数据库有多个副本,taosc 将向 vgroup 里下一个 vnode 发出插入请求。 -6. taosc 通知 APP,写入成功。 - -对于第二和第三步,taosc 启动时,并不知道 mnode 的 End Point,因此会直接向配置的集群对外服务的 End Point 发起请求。如果接收到该请求的 dnode 并没有配置 mnode,该 dnode 会在回复的消息中告知 mnode EP 列表,这样 taosc 会重新向新的 mnode 的 EP 发出获取 meta-data 的请求。 - -对于第四和第五步,没有缓存的情况下,taosc 无法知道虚拟节点组里谁是 master,就假设第一个 vnodeID 就是 master,向它发出请求。如果接收到请求的 vnode 并不是 master,它会在回复中告知谁是 master,这样 taosc 就向建议的 master vnode 发出请求。一旦得到插入成功的回复,taosc 会缓存 master 节点的信息。 - -上述是插入数据的流程,查询、计算的流程也完全一致。taosc 把这些复杂的流程全部封装屏蔽了,对于应用来说无感知也无需任何特别处理。 - -通过 taosc 缓存机制,只有在第一次对一张表操作时,才需要访问 mnode,因此 mnode 不会成为系统瓶颈。但因为 schema 有可能变化,而且 vgroup 有可能发生改变(比如负载均衡发生),因此 taosc 会定时和 mnode 交互,自动更新缓存。 - -## 存储模型与数据分区、分片 - -### 存储模型 - -TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: - -- 时序数据:存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 -- 标签数据:存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量不大,有 N 张表,就有 N 条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此 TDengine 支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。 -- 元数据:存放于 mnode 里,包含系统节点、用户、DB、Table Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 - -与典型的 NoSQL 存储模型相比,TDengine 将标签数据与时序数据完全分离存储,它具有两大优势: - -- 能够极大地降低标签数据存储的冗余度:一般的 NoSQL 数据库或时序数据库,采用的 K-V 存储,其中的 Key 包含时间戳、设备 ID、各种标签。每条记录都带有这些重复的内容,浪费存储空间。而且如果应用要在历史数据上增加、修改或删除标签,需要遍历数据,重写一遍,操作成本极其昂贵。 -- 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。而且标签数据采用全内存的结构进行管理和维护,千万级别规模的标签数据查询可以在毫秒级别返回。 - -### 数据分片 - -对于海量的数据管理,为实现水平扩展,一般都需要采取分片(Sharding)分区(Partitioning)策略。TDengine 是通过 vnode 来实现数据分片的,通过一个时间段一个数据文件来实现时序数据分区的。 - -vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和计算功能。为便于负载均衡、数据恢复、支持异构环境,TDengine 将一个数据节点根据其计算和存储资源切分为多个 vnode。这些 vnode 的管理是 TDengine 自动完成的,对应用完全透明。 - -对于单独一个数据采集点,无论其数据量多大,一个 vnode(或 vgroup,如果副本数大于 1)有足够的计算资源和存储资源来处理(如果每秒生成一条 16 字节的记录,一年产生的原始数据不到 0.5G),因此 TDengine 将一张表(一个数据采集点)的所有数据都存放在一个 vnode 里,而不会让同一个采集点的数据分布到两个或多个 dnode 上。而且一个 vnode 可存储多个数据采集点(表)的数据,一个 vnode 可容纳的表的数目的上限为一百万。设计上,一个 vnode 里所有的表都属于同一个 DB。一个数据节点上,除非特殊配置,一个 DB 拥有的 vnode 数目不会超过系统核的数目。 - -创建 DB 时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的 vnode,且该 vnode 是否有空余的表空间,如果有,立即在该有空位的 vnode 创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个 dnode 上创建一新的 vnode,然后创建表。如果 DB 有多个副本,系统不是只创建一个 vnode,而是一个 vgroup(虚拟数据节点组)。系统对 vnode 的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。 - -每张表的 meta data(包含 schema,标签等)也存放于 vnode 里,而不是集中存放于 mnode,实际上这是对 Meta 数据的分片,这样便于高效并行的进行标签过滤操作。 - -### 数据分区 - -TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区。每个数据文件只包含一个时间段的时序数据,时间段的长度由 DB 的配置参数 days 决定。这种按时间段分区的方法还便于高效实现数据的保留策略,只要数据文件超过规定的天数(系统配置参数 keep),将被自动删除。而且不同的时间段可以存放于不同的路径和存储介质,以便于大数据的冷热管理,实现多级存储。 - -总的来说,**TDengine 是通过 vnode 以及时间两个维度,对大数据进行切分**,便于并行高效的管理,实现水平扩展。 - -### 负载均衡 - -每个 dnode 都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此 mnode 了解整个集群的状态。基于整体状态,当 mnode 发现某个 dnode 负载过重,它会将 dnode 上的一个或多个 vnode 挪到其他 dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。 - -如果 mnode 一段时间没有收到 dnode 的状态报告,mnode 会认为这个 dnode 已经离线。如果离线时间超过一定时长(时长由配置参数 offlineThreshold 决定),该 dnode 将被 mnode 强制剔除出集群。该 dnode 上的 vnodes 如果副本数大于 1,系统将自动在其他 dnode 上创建新的副本,以保证数据的副本数。如果该 dnode 上还有 mnode,而且 mnode 的副本数大于 1,系统也将自动在其他 dnode 上创建新的 mnode,以保证 mnode 的副本数。 - -当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。 - -负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 - -**提示:负载均衡由参数 balance 控制,决定开启/关闭自动负载均衡。** - -## 数据写入与复制流程 - -如果一个数据库有 N 个副本,那一个虚拟节点组就有 N 个虚拟节点,但是只有一个是 master,其他都是 slave。当应用将新的记录写入系统时,只有 master vnode 能接受写的请求。如果 slave vnode 收到写的请求,系统将通知 taosc 需要重新定向。 - -### Master Vnode 写入流程 - -Master Vnode 遵循下面的写入流程: - -![TDengine Database Master写入流程](./write_master.webp) - -
图 3 TDengine Master 写入流程
- -1. master vnode 收到应用的数据插入请求,验证 OK,进入下一步; -2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; -3. 如果有多个副本,vnode 将把数据包转发给同一虚拟节点组内的 slave vnodes,该转发包带有数据的版本号(version); -4. 写入内存,并将记录加入到 skip list; -5. master vnode 返回确认信息给应用,表示写入成功; -6. 如果第 2、3、4 步中任何一步失败,将直接返回错误给应用。 - -### Slave Vnode 写入流程 - -对于 slave vnode,写入流程是: - -![TDengine Database Slave 写入流程](./write_slave.webp) - -
图 4 TDengine Slave 写入流程
- -1. slave vnode 收到 Master vnode 转发了的数据插入请求。检查 last version 是否与 master 一致,如果一致,进入下一步。如果不一致,需要进入同步状态。 -2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。 -3. 写入内存,更新内存中的 skip list。 - -与 master vnode 相比,slave vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。 - -### 主从选择 - -Vnode 会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加 1。 - -一个 vnode 启动时,角色(master、slave)是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立 TCP 连接,并互相交换 status,其中包括 version 和自己的角色。通过 status 的交换,系统进入选主流程,规则如下: - -1. 如果只有一个副本,该副本永远就是 master -2. 所有副本都在线时,版本最高的被选为 master -3. 在线的虚拟节点数过半,而且有虚拟节点是 slave 的话,该虚拟节点自动成为 master -4. 对于 2 和 3,如果多个虚拟节点满足成为 master 的要求,那么虚拟节点组的节点列表里,最前面的选为 master - -更多的关于数据复制的流程,请见[《TDengine 2.0 数据复制模块设计》](/tdinternal/replica/)。 - -### 同步复制 - -对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 replica 之外,用户还需要指定新的参数 quorum。如果 quorum 大于 1,它表示每次 master 转发给副本时,需要等待 quorum-1 个回复确认,才能通知应用,数据在 slave 已经写入成功。如果在一定的时间内,得不到 quorum-1 个回复确认,master vnode 将返回错误给应用。 - -采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致,mnode 之间的数据同步缺省就是采用的同步复制。 - -## 缓存与持久化 - -### 缓存 - -TDengine 采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine 充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。 - -TDengine 通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将 TDengine 作为数据缓存来使用,而不需要再部署 Redis 或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine 重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的 key-value 缓存系统再将之前缓存的数据重新加载到缓存中。 - -每个 vnode 有自己独立的内存,而且由多个固定大小的内存块组成,不同 vnode 之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个 vnode 维护有自己的 skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个 vnode 里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个 vnode 的内存块的个数由配置参数 blocks 决定,内存块的大小由配置参数 cache 决定。 - -### 持久化存储 - -TDengine 采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当 vnode 中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine 也会拉起落盘线程将缓存的数据写入持久化存储。TDengine 在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制地增长。 - -为充分利用时序数据特点,TDengine 将一个 vnode 保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数 days 决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。 - -对于采集的数据,一般有保留时长,这个时长由系统配置参数 keep 决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。 - -给定 days 与 keep 两个参数,一个典型工作状态的 vnode 中总的数据文件数为:向上取整 `(keep/days)+1` 个。总的数据文件个数不宜过大,也不宜过小。10 到 100 以内合适。基于这个原则,可以设置合理的 days。目前的版本,参数 keep 可以修改,但对于参数 days,一旦设置后,不可修改。 - -在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数 maxRows (每块最大记录条数)决定,缺省值为 4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。 - -每个数据文件(.data 结尾)都有一个对应的索引文件(.head 结尾),该索引文件对每张表都有一数据块的摘要信息,记录了每个数据块在数据文件中的偏移量,数据的起止时间等信息,以帮助系统迅速定位需要查找的数据。每个数据文件还有一对应的 last 文件(.last 结尾),该文件是为防止落盘时数据块碎片化而设计的。如果一张表落盘的记录条数没有达到系统配置参数 minRows(每块最小记录条数),将被先存储到 last 文件,等下次落盘时,新落盘的记录将与 last 文件的记录进行合并,再写入数据文件。 - -数据写入磁盘时,根据系统配置参数 comp 决定是否压缩数据。TDengine 提供了三种压缩选项:无压缩、一阶段压缩和两阶段压缩,分别对应 comp 值为 0、1 和 2 的情况。一阶段压缩根据数据的类型进行了相应的压缩,压缩算法包括 delta-delta 编码、simple 8B 方法、zig-zag 编码、LZ4 等算法。二阶段压缩在一阶段压缩的基础上又用通用压缩算法进行了压缩,压缩率更高。 - -### 多级存储 - -说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 - -在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。 - -除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。 - -多级存储支持 3 级,每级最多可配置 16 个挂载点。 - -TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中): - -``` -dataDir [path] -``` - -- path: 挂载点的文件夹路径 -- level: 介质存储等级,取值为 0,1,2。 - 0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。 - 各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。 - 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。 - 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。 -- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。 - -在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式: - -``` -dataDir /mnt/data1 0 1 -dataDir /mnt/data2 0 0 -dataDir /mnt/data3 1 0 -dataDir /mnt/data4 1 0 -dataDir /mnt/data5 2 0 -dataDir /mnt/data6 2 0 -``` - -:::note - -1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。 -2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。 -3. 多级存储目前不支持删除已经挂载的硬盘的功能。 - -::: - -## 数据查询 - -TDengine 提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine 的查询处理需要客户端、vnode、mnode 节点协同完成。 - -### 单表查询 - -SQL 语句的解析和校验工作在客户端完成。解析 SQL 语句并生成抽象语法树(Abstract Syntax Tree,AST),然后对其进行校验和检查。以及向管理节点(mnode)请求查询中指定表的元数据信息(table metadata)。 - -根据元数据信息中的 End Point 信息,将查询请求序列化后发送到该表所在的数据节点(dnode)。dnode 接收到查询请求后,识别出该查询请求指向的虚拟节点(vnode),将消息转发到 vnode 的查询执行队列。vnode 的查询执行线程建立基础的查询执行环境,并立即返回该查询请求,同时开始执行该查询。 - -客户端在获取查询结果的时候,dnode 的查询执行队列中的工作线程会等待 vnode 执行线程执行完成,才能将查询结果返回到请求的客户端。 - -### 按时间轴聚合、降采样、插值 - -时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳的数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。 - -在 TDengine 中引入关键词 interval 来进行时间轴上固定长度时间窗口的切分,并按照时间窗口对数据进行聚合,对窗口范围内的数据按需进行聚合。例如: - -```sql -SELECT COUNT(*) FROM d1001 INTERVAL(1h); -``` - -针对 d1001 设备采集的数据,按照 1 小时的时间窗口返回每小时存储的记录数量。 - -在需要连续获得查询结果的应用场景下,如果给定的时间区间存在数据缺失,会导致该区间数据结果也丢失。TDengine 提供策略针对时间轴聚合计算的结果进行插值,通过使用关键词 fill 就能够对时间轴聚合结果进行插值。例如: - -```sql -SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 23:59:59' INTERVAL(1h) FILL(PREV); -``` - -针对 d1001 设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine 提供前向插值(prev)、线性插值(linear)、空值填充(NULL)、特定值填充(value)。 - -### 多表聚合查询 - -TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: - -![TDengine Database 多表聚合查询原理图](./multi_tables.webp) - -
图 5 多表聚合查询原理图
- -1. 应用将一个查询条件发往系统; -2. taosc 将超级表的名字发往 meta node(管理节点); -3. 管理节点将超级表所拥有的 vnode 列表发回 taosc; -4. taosc 将计算的请求连同标签过滤条件发往这些 vnode 对应的多个数据节点; -5. 每个 vnode 先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给 taosc; -6. taosc 将多个数据节点返回的结果做最后的聚合,将其返回给应用。 - -由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 - -### 预计算 - -为有效提升查询处理的性能,针对物联网数据的不可更改的特点,在数据块头部记录该数据块中存储数据的统计信息:包括最大值、最小值、和。我们称之为预计算单元。如果查询处理涉及整个数据块的全部数据,直接使用预计算结果,完全不需要读取数据块的内容。由于预计算数据量远小于磁盘上存储的数据块数据的大小,对于磁盘 I/O 为瓶颈的查询处理,使用预计算结果可以极大地减小读取 I/O 压力,加速查询处理的流程。预计算机制与 PostgreSQL 的索引 BRIN(block range index)有异曲同工之妙。 diff --git a/docs-cn/21-tdinternal/03-taosd.md b/docs-cn/21-tdinternal/03-taosd.md deleted file mode 100644 index 0cf0a1aaa222e82f7ca6cc4f0314aa5a50442924..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/03-taosd.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -sidebar_label: taosd 的设计 -title: taosd的设计 ---- - -逻辑上,TDengine 系统包含 dnode,taosc 和 App,dnode 是服务器侧执行代码 taosd 的一个运行实例,因此 taosd 是 TDengine 的核心,本文对 taosd 的设计做一简单的介绍,模块内的实现细节请见其他文档。 - -## 系统模块图 - -taosd 包含 rpc,dnode,vnode,tsdb,query,cq,sync,wal,mnode,http,monitor 等模块,具体如下图: - -![TDengine Database module](./modules.webp) - -taosd 的启动入口是 dnode 模块,dnode 然后启动其他模块,包括可选配置的 http,monitor 模块。taosc 或 dnode 之间交互的消息都是通过 rpc 模块进行,dnode 模块根据接收到的消息类型,将消息分发到 vnode 或 mnode 的消息队列,或由 dnode 模块自己消费。dnode 的工作线程(worker)消费消息队列里的消息,交给 mnode 或 vnode 进行处理。下面对各个模块做简要说明。 - -## RPC 模块 - -该模块负责 taosd 与 taosc,以及其他数据节点之间的通讯。TDengine 没有采取标准的 HTTP 或 gRPC 等第三方工具,而是实现了自己的通讯模块 RPC。 - -考虑到物联网场景下,数据写入的包一般不大,因此除支持 TCP 连接之外,RPC 还支持 UDP 连接。当数据包小于 15K 时,RPC 将采用 UDP 方式进行连接,否则将采用 TCP 连接。对于查询类的消息,RPC 不管包的大小,总是采取 TCP 连接。对于 UDP 连接,RPC 实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。 - -RPC 模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数 compressMsgSize,RPC 在传输中将自动压缩数据,以节省带宽。 - -为保证数据的安全和数据的 integrity,RPC 模块采用 MD5 做数字签名,对数据的真实性和完整性进行认证。 - -## DNODE 模块 - -该模块是整个 taosd 的入口,它具体负责如下任务: - -- 系统的初始化,包括 - - 从文件 taos.cfg 读取系统配置参数,从文件 dnodeCfg.json 读取数据节点的配置参数; - - 启动 RPC 模块,并建立起与 taosc 通讯的 server 连接,与其他数据节点通讯的 server 连接; - - 启动并初始化 dnode 的内部管理,该模块将扫描该数据节点已有的 vnode ,并打开它们; - - 初始化可配置的模块,如 mnode,http,monitor 等。 -- 数据节点的管理,包括 - - 定时的向 mnode 发送 status 消息,报告自己的状态; - - 根据 mnode 的指示,创建、改变、删除 vnode; - - 根据 mnode 的指示,修改自己的配置参数; -- 消息的分发、消费,包括 - - 为每一个 vnode 和 mnode 的创建并维护一个读队列、一个写队列; - - 将从 taosc 或其他数据节点来的消息,根据消息类型,将其直接分发到不同的消息队列,或由自己的管理模块直接消费; - - 维护一个读的线程池,消费读队列的消息,交给 vnode 或 mnode 处理。为支持高并发,一个读线程(worker)可以消费多个队列的消息,一个读队列可以由多个 worker 消费; - - 维护一个写的线程池,消费写队列的消息,交给 vnode 或 mnode 处理。为保证写操作的序列化,一个写队列只能由一个写线程负责,但一个写线程可以负责多个写队列。 - -taosd 的消息消费由 dnode 通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: - -![TDengine Database dnode](./dnode.webp) - -## VNODE 模块 - -vnode 是一独立的数据存储查询逻辑单元,但因为一个 vnode 只能容许一个 DB ,因此 vnode 内部没有 account,DB,user 等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的 TSDB,负责查询的 query,负责数据复制的 sync,负责数据库日志的的 WAL,负责连续查询的 cq(continuous query),负责事件触发的流计算的 event 等模块,这些子模块只与 vnode 模块发生关系,与其他模块没有任何调用关系。模块图如下: - -![TDengine Database vnode](./vnode.webp) - -vnode 模块向下,与 dnodeVRead,dnodeVWrite 发生互动,向上,与子模块发生互动。它主要的功能有: - -- 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过 vnode 模块进行; -- 对于来自 taosc 或 mnode 的写操作,vnode 模块将其分解为写日志(WAL),转发(sync),本地存储(TSDB)子模块的操作; -- 对于查询操作,分发到 query 模块进行。 - -一个数据节点里有多个 vnode,因此 vnode 模块是有多个运行实例的。每个运行实例是完全独立的。 - -vnode 与其子模块是通过 API 直接调用,而不是通过消息队列传递。而且各个子模块只与 vnode 模块有交互,不与 dnode,rpc 等模块发生任何直接关联。 - -## MNODE 模块 - -mnode 是整个系统的大脑,负责整个系统的资源调度,负责 meta data 的管理与存储。 - -一个运行的系统里,只有一个 mnode,但它有多个副本(由系统配置参数 numOfMnodes 控制)。这些副本分布在不同的 dnode 里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个 Master,其他副本是 slave。所有数据更新类的操作,都只能在 master 上进行,而查询类的可以在 slave 节点上进行。代码实现上,同步模块与 vnode 共享,但 mnode 被分配一个特殊的 vgroup ID: 1,而且 quorum 大于 1。整个集群系统是由多个 dnode 组成的,运行的 mnode 的副本数不可能超过 dnode 的个数,但不会超过配置的副本数。如果某个 mnode 副本宕机一段时间,只要超过半数的 mnode 副本仍在运行,运行的 mnode 会自动根据整个系统的资源情况,在其他 dnode 里再启动一个 mnode,以保证运行的副本数。 - -各个 dnode 通过信息交换,保存有 mnode 各个副本的 End Point 列表,并向其中的 master 节点定时(间隔由系统配置参数 statusInterval 控制)发送 status 消息,消息体里包含该 dnode 的 CPU、内存、剩余存储空间、vnode 个数,以及各个 vnode 的状态(存储空间、原始数据大小、记录条数、角色等)。这样 mnode 就了解整个系统的资源情况,如果用户创建新的表,就可以决定需要在哪个 dnode 创建;如果增加或删除 dnode,或者监测到某 dnode 数据过热、或离线太长,就可以决定需要挪动那些 vnode,以实现负载均衡。 - -mnode 里还负责 account,user,DB,stable,table,vgroup,dnode 的创建、删除与更新。mnode 不仅把这些 entity 的 meta data 保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在 mnode(保存在 vnode),而且子表不维护自己的 schema,而是与 stable 共享。为减小 mnode 的查询压力,taosc 会缓存 table、stable 的 schema。对于查询类的操作,各个 slave mnode 也可以提供,以减轻 master 压力。 - -## TSDB 模块 - -TSDB 模块是 vnode 中的负责快速高并发地存储和读取属于该 vnode 的表的元数据及采集的时序数据的引擎。除此之外,TSDB 还提供了表结构的修改、表标签值的修改等功能。TSDB 提供 API 供 vnode 和 query 等模块调用。TSDB 中存储了两类数据,1:元数据信息;2:时序数据 - -### 元数据信息 - -TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schema 的定义等。对于超级表和超级表下的子表而言,又包含了 tag 的 schema 定义以及子表的 tag 值等。对于元数据信息而言,TSDB 就相当于一个全内存的 KV 型数据库,属于该 vnode 的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB 还对其中的子表,按照 tag 的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB 中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到 meta 文件中。meta 文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB 也提供了对于元数据的修改操作,如表 schema 的修改,tag schema 的修改以及 tag 值的修改等。 - -### 时序数据 - -每个 TSDB 在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入 TSDB 时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的 1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。 - -而时序数据在写入到 TSDB 的数据文件时,是以列(column)的形式存储的。TSDB 中的数据文件包含多个数据文件组,每个数据文件组中又包含 .head、.data 和 .last 三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB 中的数据文件组是按照时间跨度进行分片的,默认是 10 天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在 TSDB 的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head 文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在 .head 文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head 和 .last 文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入 .data 文件中,否则,会写入 .last 文件中,等待下次落盘时合并数据写入 .data 文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。 - -## Query 模块 - -该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0 查询模块设计》。 - -## SYNC 模块 - -该模块实现数据的多副本复制,包括 vnode 与 mnode 的数据复制,支持异步和同步两种复制方式,以满足 meta data 与时序数据不同复制的需求。因为它为 mnode 与 vnode 共享,系统为 mnode 副本预留了一个特殊的 vgroup ID:1。因此 vnode group 的 ID 是从 2 开始的。 - -每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](/tdinternal/replica/) - -## WAL 模块 - -该模块负责将新插入的数据写入 write ahead log(WAL),为 vnode,mnode 共享。以保证服务器 crash 或其他故障,能从 WAL 中恢复数据。 - -每个 vnode/mnode 模块实例会有一对应的 WAL 模块实例,是完全一一对应的。WAL 的落盘操作由两个参数 walLevel,fsync 控制。看具体场景,如果要 100% 保证数据不会丢失,需要将 walLevel 配置为 2,fsync 设置为 0,每条数据插入请求,都会实时落盘后,才会给应用确认 - -## HTTP 模块 - -该模块负责处理系统对外的 RESTful 接口,可以通过配置,由 dnode 启动或停止 。(仅 2.2 及之前的版本中存在) - -该模块将接收到的 RESTful 请求,做了各种合法性检查后,将其变成标准的 SQL 语句,通过 taosc 的异步接口,将请求发往整个系统中的任一 dnode 。收到处理后的结果后,再翻译成 HTTP 协议,返回给应用。 - -如果 HTTP 模块启动,就意味着启动了一个 taosc 的实例。任一一个 dnode 都可以启动该模块,以实现对 RESTful 请求的分布式处理。 - -## Monitor 模块 - -该模块负责检测一个 dnode 的运行状态,可以通过配置,由 dnode 启动或停止。原则上,每个 dnode 都应该启动一个 monitor 实例。 - -Monitor 采集 TDengine 里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集 CPU、内存、网络等资源的使用情况(采集周期由系统配置参数 monitorInterval 控制)。获得这些数据后,monitor 模块将采集的数据写入系统的日志库(DB 名字由系统配置参数 monitorDbName 控制)。 - -Monitor 模块使用 taosc 来将采集的数据写入系统,因此每个 monitor 实例,都有一个 taosc 运行实例。 diff --git a/docs-cn/25-application/03-immigrate.md b/docs-cn/25-application/03-immigrate.md deleted file mode 100644 index 9d8946bc4a69639c5327ac1ffb6c0539ddbd0e63..0000000000000000000000000000000000000000 --- a/docs-cn/25-application/03-immigrate.md +++ /dev/null @@ -1,423 +0,0 @@ ---- -sidebar_label: OpenTSDB 迁移到 TDengine -title: OpenTSDB 应用迁移到 TDengine 的最佳实践 ---- - -作为一个分布式、可伸缩、基于 HBase 的分布式时序数据库系统,得益于其先发优势,OpenTSDB 被 DevOps 领域的人员引入并广泛地应用在了运维监控领域。但最近几年,随着云计算、微服务、容器化等新技术快速落地发展,企业级服务种类变得越来越多,架构也越来越复杂,应用运行基础环境日益多样化,给系统和运行监控带来的压力也越来越大。从这一现状出发,使用 OpenTSDB 作为 DevOps 的监控后端存储,越来越受困于其性能问题以及迟缓的功能升级,以及由此而衍生出来的应用部署成本上升和运行效率降低等问题,这些问题随着系统规模的扩大日益严重。 - -在这一背景下,为满足高速增长的物联网大数据市场和技术需求,在吸取众多传统关系型数据库、NoSQL 数据库、流计算引擎、消息队列等软件的优点之后,涛思数据自主开发出创新型大数据处理产品 TDengine。在时序大数据处理上,TDengine 有着自己独特的优势。就 OpenTSDB 当前遇到的问题来说,TDengine 能够有效解决。 - -相对于 OpenTSDB,TDengine 具有如下显著特点: - -- 数据写入和查询的性能远超 OpenTSDB; -- 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; -- 安装部署非常简单,单一安装包完成安装部署,不依赖其他的第三方软件,整个安装部署过程秒级搞定; -- 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 -- 支持多达 128 个标签,标签总长度可达到 16 KB; -- 除 REST 接口之外,还提供 C/C++、Java、Python、Go、Rust、Node.js、C#、Lua(社区贡献)、PHP(社区贡献)等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 - -如果我们将原本运行在 OpenTSDB 上的应用迁移到 TDengine 上,不仅可以有效地降低计算和存储资源的占用、减少部署服务器的规模,还能够极大减少运行维护的成本的输出,让运维管理工作更简单、更轻松,大幅降低总拥有成本。与 OpenTSDB 一样,TDengine 也已经进行了开源,不同的是,除了单机版,后者还实现了集群版开源,被厂商绑定的顾虑一扫而空。 - -在下文中我们将就“使用最典型并广泛应用的运维监控(DevOps)场景”来说明,如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。后续的章节会做更深度的介绍,以便于进行非 DevOps 场景的迁移。 - -## DevOps 应用快速迁移 - -### 1、典型应用场景 - -一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。 - -**图 1. DevOps 场景中典型架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "图1. DevOps 场景中典型架构") - -在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。 - -其中,部署在应用节点的 Agents 负责向 collectd/Statsd 提供不同来源的运行指标,collectd/StatsD 则负责将汇聚的数据推送到 OpenTSDB 集群系统,然后使用可视化看板 Grafana 将数据可视化呈现出来。 - -### 2、迁移服务 - -- **TDengine 安装部署** - -首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意,安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后再启动。 - -- **调整数据收集器配置** - -在 TDengine 2.4 版本中,包含一个组件 taosAdapter。taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 - -用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 - -通过 taosAdapter,用户可以将 collectd 或 StatsD 收集的数据直接推送到 TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter 还支持 Telegraf、Icinga、TCollector 、node_exporter 的数据接入,使用详情参考[taosAdapter](/reference/taosadapter/)。 - -如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为 192.168.1.130,端口为 6046,配置如下: - -```html -LoadPlugin write_tsdb - - - Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates - false AlwaysAppendDS false - - -``` - -即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 TDengine 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 - -- **调整看板(Dashboard)系统** - -在数据能够正常写入 TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用 TDengine 提供的 Grafana 插件请参考[与其他工具的连接](/third-party/grafana)。 - -TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。 - -**图 2. 导入 Grafana 模板** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "图2. 导入 Grafana 模板") - -操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。 - -### 3、迁移后架构 - -完成迁移以后,此时的系统整体的架构如下图(图 3)所示,而整个过程中采集端、数据写入端、以及监控呈现端均保持了稳定,除了极少的配置调整外,不涉及任何重要的更改和变动。OpenTSDB 大量的应用场景均为 DevOps ,这种场景下,简单的参数设置即可完成 OpenTSDB 到 TDengine 迁移动作,使用上 TDengine 更加强大的处理能力和查询性能。 - -在绝大多数的 DevOps 场景中,如果你拥有一个小规模的 OpenTSDB 集群(3 台及以下的节点)作为 DevOps 的存储端,依赖于 OpenTSDB 为系统持久化层提供数据存储和查询功能,那么你可以安全地将其替换为 TDengine,并节约更多的计算和存储资源。在同等计算资源配置情况下,单台 TDengine 即可满足 3 ~ 5 台 OpenTSDB 节点提供的服务能力。如果规模比较大,那便需要采用 TDengine 集群。 - -如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。 - -**图 3. 迁移完成后的系统架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "图 3. 迁移完成后的系统架构") - -## 其他场景的迁移评估与策略 - -### 1、TDengine 与 OpenTSDB 的差异 - -本章将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本章的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 - -TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 - -在 2.3.0.x 版本中,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 - -此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: - -1. `/api/stats`:如果你的应用中使用了该项特性来监控 OpenTSDB 的服务状态,并在应用中建立了相关的逻辑来联动处理,那么这部分状态读取和获取的逻辑需要重新适配到 TDengine。TDengine 提供了全新的处理集群状态监控机制,来满足你的应用对其进行的监控和维护的需求。 -2. `/api/tree`:如果你依赖于 OpenTSDB 的该项特性来进行时间线的层级化组织和维护,那么便无法将其直接迁移至 TDengine。TDengine 采用了数据库->超级表->子表这样的层级来组织和维护时间线,归属于同一个超级表的所有的时间线在系统中同一个层级,但是可以通过不同标签值的特殊构造来模拟应用逻辑上的多级结构。 -3. `Rollup And PreAggregates`:采用了 Rollup 和 PreAggregates 需要应用来决定在合适的地方访问 Rollup 的结果,在某些场景下又要访问原始的结果,这种结构的不透明性让应用处理逻辑变得极为复杂而且完全不具有移植性。我们认为这种策略是时序数据库无法提供高性能聚合情况下的妥协与折中。TDengine 暂不支持多个时间线的自动降采样和(时间段范围的)预聚合,由于 其拥有的高性能查询处理逻辑,即使不依赖于 Rollup 和 (时间段)预聚合计算结果,也能够提供很高性能的查询响应,而且让你的应用查询处理逻辑更加简单。 -4. `Rate`: TDengine 提供了两个计算数值变化率的函数,分别是 Derivative(其计算结果与 InfluxDB 的 Derivative 行为一致)和 IRate(其计算结果与 Prometheus 中的 IRate 函数计算结果一致)。但是这两个函数的计算结果与 Rate 有细微的差别,但整体上功能更强大。此外,**OpenTSDB 提供的所有计算函数,TDengine 均有对应的查询函数支持,并且 TDengine 的查询函数功能远超过 OpenTSDB 支持的查询函数,**可以极大地简化你的应用处理逻辑。 - -通过上面的介绍,相信你应该能够了解 OpenTSDB 迁移到 TDengine 带来的变化,这些信息也有助于你正确地判断是否可以接受将应用 迁移到 TDengine 之上,体验 TDengine 提供的强大的时序数据处理能力和便捷的使用体验。 - -### 2、迁移策略 - -首先将基于 OpenTSDB 的系统进行迁移涉及到的数据模式设计、系统规模估算、数据写入端改造,进行数据分流、应用适配工作;之后将两个系统并行运行一段时间,再将历史数据迁移到 TDengine 中。当然如果你的应用中有部分功能强依赖于上述 OpenTSDB 特性,同时又不希望停止使用,可以考虑保持原有的 OpenTSDB 系统运行,同时启动 TDengine 来提供主要的服务。 - -## 数据模型设计 - -一方面,TDengine 要求其入库的数据具有严格的模式定义。另一方面,TDengine 的数据模型相对于 OpenTSDB 来说又更加丰富,多值模型能够兼容全部的单值模型的建立需求。 - -现在让我们假设一个 DevOps 的场景,我们使用了 collectd 收集设备的基础度量(metrics),包含了 memory 、swap、disk 等几个度量,其在 OpenTSDB 中的模式如下: - -| 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 | -| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | -| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | -| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | -| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | - -TDengine 要求存储的数据具有数据模式,即写入数据之前需创建超级表并指定超级表的模式。对于数据模式的建立,你有两种方式来完成此项工作:1)充分利用 TDengine 对 OpenTSDB 的数据原生写入的支持,调用 TDengine 提供的 API 将(文本行或 JSON 格式)数据写入,并自动化地建立单值模型。采用这种方式不需要对数据写入应用进行较大的调整,也不需要对写入的数据格式进行转换。 - -在 C 语言层面,TDengine 提供了 `taos_schemaless_insert()` 函数来直接写入 OpenTSDB 格式的数据(在更早版本中该函数名称是 `taos_insert_lines()`)。其代码参考示例请参见安装包目录下示例代码 schemaless.c。 - -2)在充分理解 TDengine 的数据模型基础上,结合生成数据的特点,手动方式建立 OpenTSDB 到 TDengine 的数据模型调整的映射关系。TDengine 能够支持多值模型和单值模型,考虑到 OpenTSDB 均为单值映射模型,这里推荐使用单值模型在 TDengine 中进行建模。 - -- **单值模型**。 - -具体步骤如下:将度量(metrics)的名称作为 TDengine 超级表的名称,该超级表建成后具有两个基础的数据列—时间戳(timestamp)和值(value),超级表的标签等效于 度量 的标签信息,标签数量等同于度量 的标签的数量。子表的表名采用具有固定规则的方式进行命名:`metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...`作为子表名称。 - -在 TDengine 中建立 3 个超级表: - -```sql -create stable memory(ts timestamp, val float) tags(host binary(12),memory_type binary(20), memory_type_instance binary(20), source binary(20)); -create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); -create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); -``` - -对于子表使用动态建表的方式创建如下所示: - -```sql -insert into memory_vm130_memory_buffered_collectd using memory tags(‘vm130’, ‘memory’, 'buffer', 'collectd') values(1632979445, 3.0656); -``` - -最终系统中会建立 340 个左右的子表,3 个超级表。需要注意的是,如果采用串联标签值的方式导致子表名称超过系统限制(191 字节),那么需要采用一定的编码方式(例如 MD5)将其转化为可接受长度。 - -- **多值模型** - -如果你想要利用 TDengine 的多值模型能力,需要首先满足以下要求:不同的采集量具有相同的采集频率,且能够通过消息队列**同时到达**数据写入端,从而确保使用 SQL 语句将多个指标一次性写入。将度量的名称作为超级表的名称,建立具有相同采集频率且能够同时到达的数据多列模型。子表的表名采用具有固定规则的方式进行命名。上述每个度量均只包含一个测量值,因此无法将其转化为多值模型。 - -## 数据分流与应用适配 - -从消息队列中订阅数据,并启动调整后的写入程序写入数据。 - -数据开始写入持续一段时间后,可以采用 SQL 语句检查写入的数据量是否符合预计的写入要求。统计数据量使用如下 SQL 语句: - -```sql -select count(*) from memory -``` - -完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。 - -TDengine 不支持采用 OpenTSDB 的查询语法进行查询或数据获取处理,但是针对 OpenTSDB 的每种查询都提供对应的支持。可以用检查附录 1 获取对应的查询处理的调整和应用使用的方式,如果需要全面了解 TDengine 支持的查询类型,请参阅 TDengine 的用户手册。 - -TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 - -## 历史数据迁移 - -### 1、使用工具自动迁移数据 - -为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 - -DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 - -在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 - -| DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | -| ----------------------------- | --------------------- | -| 1 | 约 13.9 万 | -| 2 | 约 21.8 万 | -| 3 | 约 24.9 万 | -| 5 | 约 29.5 万 | -| 10 | 约 33 万 | - -
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) - -### 2、手动迁移数据 - -如果你需要使用多值模型进行数据写入,就需要自行开发一个将数据从 OpenTSDB 导出的工具,然后确认哪些时间线能够合并导入到同一个时间线,再将可以同时导入的时间通过 SQL 语句的写入到数据库中。 - -手动迁移数据需要注意以下两个问题: - -1)在磁盘中存储导出数据时,磁盘需要有足够的存储空间以便能够充分容纳导出的数据文件。为了避免全量数据导出后导致磁盘文件存储紧张,可以采用部分导入的模式,对于归属于同一个超级表的时间线优先导出,然后将导出部分的数据文件导入到 TDengine 系统中。 - -2)在系统全负载运行下,如果有足够的剩余计算和 IO 资源,可以建立多线程的导入机制,最大限度地提升数据迁移的效率。考虑到数据解析对于 CPU 带来的巨大负载,需要控制最大的并行任务数量,以避免因导入历史数据而触发的系统整体过载。 - -由于 TDengine 本身操作简易性,所以不需要在整个过程中进行索引维护、数据格式的变化处理等工作,整个过程只需要顺序执行即可。 - -当历史数据完全导入到 TDengine 以后,此时两个系统处于同时运行的状态,之后便可以将查询请求切换到 TDengine 上,从而实现无缝的应用切换。 - -## 附录 1: OpenTSDB 查询函数对应表 - -### Avg - -等效函数:avg - -示例: - -```sql -SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) -``` - -备注: - -1. Interval 内的数值与外层查询的 interval 数值需要相同。 -2. 在 TDengine 中插值处理需要使用子查询来协助完成,如上所示,在内层查询中指明插值类型即可,由于 OpenTSDB 中数值的插值使用了线性插值,因此在插值子句中使用 fill(linear) 来声明插值类型。以下有相同插值计算需求的函数,均采用该方法处理。 -3. Interval 中参数 20s 表示将内层查询按照 20 秒一个时间窗口生成结果。在真实的查询中,需要调整为不同的记录之间的时间间隔。这样可确保等效于原始数据生成了插值结果。 -4. 由于 OpenTSDB 特殊的插值策略和机制,聚合查询(Aggregate)中先插值再计算的方式导致其计算结果与 TDengine 不可能完全一致。但是在降采样(Downsample)的情况下,TDengine 和 OpenTSDB 能够获得一致的结果(由于 OpenTSDB 在聚合查询和降采样查询中采用了完全不同的插值策略)。 - -### Count - -等效函数:count - -示例: - -```sql -select count(\*) from super_table_name; -``` - -### Dev - -等效函数:stddev - -示例: - -```sql -Select stddev(val) from table_name -``` - -### Estimated percentiles - -等效函数:apercentile - -示例: - -```sql -Select apercentile(col1, 50, “t-digest”) from table_name -``` - -备注: - -1. 近似查询处理过程中,OpenTSDB 默认采用 t-digest 算法,所以为了获得相同的计算结果,需要在 apercentile 函数中指明使用的算法。TDengine 能够支持两种不同的近似处理算法,分别通过“default”和“t-digest”来声明。 -### First - -等效函数:first - -示例: - -```sql -Select first(col1) from table_name -``` - -### Last - -等效函数:last - -示例: - -```sql -Select last(col1) from table_name -``` - -### Max - -等效函数:max - -示例: - -```sql -Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) -``` - -备注:Max 函数需要插值,原因见上。 - -### Min - -等效函数:min - -示例: - -```sql -Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); -``` - -### MinMax - -等效函数:max - -```sql -Select max(val) from table_name -``` - -备注:该函数无插值需求,因此可用直接计算。 - -### MimMin - -等效函数:min - -```sql -Select min(val) from table_name -``` - -备注:该函数无插值需求,因此可用直接计算。 - -### Percentile - -等效函数:percentile - -备注: - -### Sum - -等效函数:sum - -```sql -Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) -``` - -备注:该函数无插值需求,因此可用直接计算。 - -### Zimsum - -等效函数:sum - -```sql -Select sum(val) from table_name -``` - -备注:该函数无插值需求,因此可用直接计算。 - -完整示例: - -```json -// OpenTSDB 查询 JSON -query = { -“start”:1510560000, -“end”: 1515000009, -“queries”:[{ -“aggregator”: “count”, -“metric”:”cpu.usage_user”, -}] -} - -//等效查询 SQL: -SELECT count(*) -FROM `cpu.usage_user` -WHERE ts>=1510560000 AND ts<=1515000009 -``` - -## 附录 2: 资源估算方法 - -### 数据生成环境 - -我们仍然使用第 4 章中的假设环境,3 个测量值。分别是:温度和湿度的数据写入的速率是每 5 秒一条记录,时间线 10 万个。空气质量的写入速率是 10 秒一条记录,时间线 1 万个,查询的请求频率 500 QPS。 - -### 存储资源估算 - -假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源: - -```matlab -(n×t×L)×(365×1.5)×(1+20%)/C -``` - -结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。 - -### 存储设备选型考虑 - -硬盘应该选用具有较好随机读性能的硬盘设备,如果能够有 SSD,尽可能考虑使用 SSD。较好的随机读性能的磁盘对于提升系统查询性能具有极大的帮助,能够整体上提升系统的查询响应性能。为了获得较好的查询性能,硬盘设备的单线程随机读 IOPS 的性能指标不应该低于 1000,能够达到 5000 IOPS 以上为佳。为了获得当前的设备随机读取的 IO 性能的评估,建议使用 `fio` 软件对其进行运行性能评估(具体的使用方式请参阅附录 1),确认其是否能够满足大文件随机读性能要求。 - -硬盘写性能对于 TDengine 的影响不大。TDengine 写入过程采用了追加写的模式,所以只要有较好的顺序写性能即可,一般意义上的 SAS 硬盘和 SSD 均能够很好地满足 TDengine 对于磁盘写入性能的要求。 - -### 计算资源估算 - -由于物联网数据的特殊性,数据产生的频率固定以后,TDengine 写入的过程对于(计算和存储)资源消耗都保持一个相对固定的量。《[TDengine 运维指南](/operation/)》上的描述,该系统中每秒 22000 个写入,消耗 CPU 不到 1 个核。 - -在针对查询所需要消耗的 CPU 资源的估算上,假设应用要求数据库提供的 QPS 为 10000,每次查询消耗的 CPU 时间约 1 ms,那么每个核每秒提供的查询为 1000 QPS,满足 10000 QPS 的查询请求,至少需要 10 个核。为了让系统整体上 CPU 负载小于 50%,整个集群需要 10 个核的两倍,即 20 个核。 - -### 内存资源估算 - -数据库默认为每个 Vnode 分配内存 16MB\*3 缓冲区,集群系统包括 22 个 CPU 核,则默认会建立 22 个虚拟节点 Vnode,每个 Vnode 包含 1000 张表,则可以容纳所有的表。则约 1 个半小时写满一个 block,从而触发落盘,可以不做调整。22 个 Vnode 共计需要内存缓存约 1GB。考虑到查询所需要的内存,假设每次查询的内存开销约 50MB,则 500 个查询并发需要的内存约 25GB。 - -综上所述,可使用单台 16 核 32GB 的机器,或者使用 2 台 8 核 16GB 机器构成的集群。 - -## 附录 3: 集群部署及启动 - -TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方面的内容,这里提供相应的文档列表,供你参考。 - -### 集群部署 - -首先是安装 TDengine,从官网上下载 TDengine 最新稳定版,解压缩后运行 install.sh 进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后才启动 `taosd` 服务。 - -### 设置运行参数并启动服务 - -为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数: - -FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](/cluster/)》 - -按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。 - -最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](/cluster/)》 - -## 附录 4: 超级表名称 - -由于 OpenTSDB 的 metric 名称中带有点号(“.”),例如“cpu.usage_user”这种名称的 metric。但是点号在 TDengine 中具有特殊含义,是用来分隔数据库和表名称的分隔符。TDengine 也提供转义符,以允许用户在(超级)表名称中使用关键词或特殊分隔符(如:点号)。为了使用特殊字符,需要采用转义字符将表的名称括起来,例如:`cpu.usage_user`这样就是合法的(超级)表名称。 - -## 附录 5:参考文章 - -1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](/application/collectd/) -2. [通过 collectd 将采集数据直接写入 TDengine](/third-party/collectd/) diff --git a/docs-cn/27-train-faq/01-faq.md b/docs-cn/27-train-faq/01-faq.md deleted file mode 100644 index e8a106d5d682948d97029cf36b7a47677a491804..0000000000000000000000000000000000000000 --- a/docs-cn/27-train-faq/01-faq.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 常见问题及反馈 ---- - -## 问题反馈 - -如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: - -1. /var/log/taos (如果没有修改过默认路径) -2. /etc/taos - -附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 - -为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 - -``` - alter dnode debugFlag 135; -``` - -但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 - -## 常见问题列表 - -### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆ - -2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: - -1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` -2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` -3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` -4. 安装最新稳定版本的 TDengine -5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 - -### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 - -### 3. 创建数据表时提示 more dnodes are needed - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 - -### 4. 如何让 TDengine crash 时生成 core 文件? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 - -### 5. 遇到错误“Unable to establish connection” 怎么办? - -客户端遇到连接故障,请按照下面的步骤进行检查: - -1. 检查网络环境 - - - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限 - - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname - - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 - -2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 - -3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* - -4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 - -5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 - -6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 - -7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 - -8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) - -9. 如果仍不能排除连接故障 - - - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 - 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` - 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` - 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` - - - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 - -10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 - -### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办? - -产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: - -1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) -2. 如果网络配置有 DNS server,请检查是否正常工作 -3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 -4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 -5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 -6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN - -### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误? - -如果你确认语法正确,2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过,也会返回这个错误。 - -### 8. 是否支持 validation queries? - -TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。 - - - -### 9. 我可以删除或更新一条记录吗? - -TDengine 删除功能只在 2.6.0.0 及以后的企业版中提供。 - -从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。 - -另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。 - -此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。 - -### 10. 我怎么创建超过 1024 列的表? - -使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) - -### 11. 最有效的写入数据的方法是什么? - -批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 - -### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? - -Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: - -```JAVA -Class.forName("com.taosdata.jdbc.TSDBDriver"); -Properties properties = new Properties(); -properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); -Connection = DriverManager.getConnection(url, properties); -``` - -### 13. Windows 系统下客户端无法正常显示中文字符? - -Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 - -【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: - -``` -locale C -charset UTF-8 -``` - -### 14. JDBC 报错: the executed SQL is not a DML or a DDL? - -请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java) - -### 15. taos connect failed, reason: invalid timestamp - -常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 - -### 16. 表名显示不全 - -由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 - -### 17. 如何进行数据迁移? - -TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: - - - 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。 - - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 - -### 18. 如何在命令行程序 taos 中临时调整日志级别 - -为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: - -```sql -ALTER LOCAL flag_name flag_value; -``` - -其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): - - - flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag - - flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) - -```sql -ALTER LOCAL RESETLOG; -``` - -其含义是,清空本机所有由客户端生成的日志文件。 - - - -### 19. go 语言编写组件编译失败怎样解决? - -TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 - -目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: - -```sh -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` - -如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 -`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 - -### 20. 如何查询数据占用的存储空间大小? - -默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 - -若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 - -若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 - -若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) - -### 21. 客户端连接串如何保证高可用? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) - -### 22. 时间戳的时区信息是怎样处理的? - -TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 - -客户端在处理时间戳字符串时,会采取如下逻辑: - -1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 -2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 -3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 -4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 - -### 23. TDengine 2.0 都会用到哪些网络端口? - -使用到的网络端口请看文档:[serverport](/reference/config/#serverport) - -需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 - -### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? - -taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 - -需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 - -有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) - -### 25. 发生了 OOM 怎么办? - -OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 - -TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 diff --git a/docs-cn/27-train-faq/03-docker.md b/docs-cn/27-train-faq/03-docker.md deleted file mode 100644 index 7791569b25e102b4634f0fb899fc0973cacc0aa1..0000000000000000000000000000000000000000 --- a/docs-cn/27-train-faq/03-docker.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -title: 通过 Docker 快速体验 TDengine ---- - -虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 macOS 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从 2.0.14.0 版本开始,TDengine 提供的镜像已经可以同时支持 X86-64、X86、arm64、arm32 平台,像 NAS、树莓派、嵌入式开发板之类可以运行 docker 的非主流计算机也可以基于本文档轻松体验 TDengine。 - -下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 - -## 下载 Docker - -Docker 工具自身的下载请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。 - -安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。 - -```bash -$ docker -v -Docker version 20.10.3, build 48d30b5 -``` - -## 使用 Docker 在容器中运行 TDengine - -### 在 Docker 容器中运行 TDengine server - -```bash -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd -``` - -这条命令,启动一个运行了 TDengine server 的 docker 容器,并且将容器的 6030 到 6049 端口映射到宿主机的 6030 到 6049 端口上。如果宿主机已经运行了 TDengine server 并占用了相同端口,需要映射容器的端口到不同的未使用端口段。(详情参见 [TDengine 2.0 端口说明](/train-faq/faq#port)。为了支持 TDengine 客户端操作 TDengine server 服务, TCP 和 UDP 端口都需要打开。 - -- **docker run**:通过 Docker 运行一个容器 -- **-d**:让容器在后台运行 -- **-p**:指定映射端口。注意:如果不是用端口映射,依然可以进入 Docker 容器内部使用 TDengine 服务或进行应用开发,只是不能对容器外部提供服务 -- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像 -- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器 - -进一步,还可以使用 docker run 命令启动运行 TDengine server 的 docker 容器,并使用 `--name` 命令行参数将容器命名为 `tdengine`,使用 `--hostname` 指定 hostname 为 `tdengine-server`,通过 `-v` 挂载本地目录到容器,实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 - -```bash -docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -- **--name tdengine**:设置容器名称,我们可以通过容器名称来访问对应的容器 -- **--hostname=tdengine-server**:设置容器内 Linux 系统的 hostname,我们可以通过映射 hostname 和 IP 来解决容器 IP 可能变化的问题。 -- **-v**:设置宿主机文件目录映射到容器内目录,避免容器删除后数据丢失。 - -### 使用 docker ps 命令确认容器是否已经正确运行 - -```bash -docker ps -``` - -输出示例如下: - -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS ··· -c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· -``` - -- **docker ps**:列出所有正在运行状态的容器信息。 -- **CONTAINER ID**:容器 ID。 -- **IMAGE**:使用的镜像。 -- **COMMAND**:启动容器时运行的命令。 -- **CREATED**:容器创建时间。 -- **STATUS**:容器状态。UP 表示运行中。 - -### 通过 docker exec 命令,进入到 docker 容器中去做开发 - -```bash -$ docker exec -it tdengine /bin/bash -root@tdengine-server:~/TDengine-server-2.4.0.4# -``` - -- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 -- **-i**:进入交互模式。 -- **-t**:指定一个终端。 -- **tdengine**:容器名称,需要根据 docker ps 指令返回的值进行修改。 -- **/bin/bash**:载入容器后运行 bash 来进行交互。 - -进入容器后,执行 taos shell 客户端程序。 - -```bash -root@tdengine-server:~/TDengine-server-2.4.0.4# taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 - -在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](/taos-sql/)。 - -### 在宿主机访问 Docker 容器中的 TDengine server - -在使用了 -p 命令行参数映射了正确的端口启动了 TDengine Docker 容器后,就在宿主机使用 taos shell 命令即可访问运行在 Docker 容器中的 TDengine。 - -``` -$ taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 - -``` -curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql -``` - -输出示例如下: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} -``` - -这条命令,通过 REST API 访问 TDengine server,这时连接的是本机的 6041 端口,可见连接成功。 - -TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。 - -### 使用 Docker 容器运行 TDengine server 和 taosAdapter - -在 TDengine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认启用了 taosAdapter,也可以使用 docker run 命令中设置 TAOS_DISABLE_ADAPTER=true 来禁用 taosAdapter;也可以在 docker run 命令中单独使用 taosAdapter,而不运行 taosd 。 - -注意:如果容器中运行 taosAdapter,需要根据需要映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter 文档](/reference/taosadapter/)。 - -使用 docker 运行 TDengine 2.4.0.4 版本镜像(taosd + taosAdapter): - -```bash -docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 -``` - -使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosAdapter,需要设置 firstEp 配置项 或 TAOS_FIRST_EP 环境变量): - -```bash -docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter -``` - -使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosd): - -```bash -docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 -``` - -使用 curl 命令验证 RESTful 接口可以正常工作: - -```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql -``` - -输出示例如下: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} -``` - -### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server - -1. 在宿主机命令行界面执行 taosBenchmark (曾命名为 taosdemo)写入数据到 Docker 容器中的 TDengine server - - ```bash - $ taosBenchmark - - taosBenchmark is simulating data generated by power equipments monitoring... - - host: 127.0.0.1:6030 - user: root - password: taosdata - configDir: - resultFile: ./output.txt - thread num of insert data: 10 - thread num of create table: 10 - top insert interval: 0 - number of records per req: 30000 - max sql length: 1048576 - database count: 1 - database[0]: - database[0] name: test - drop: yes - replica: 1 - precision: ms - super table count: 1 - super table[0]: - stbName: meters - autoCreateTable: no - childTblExists: no - childTblCount: 10000 - childTblPrefix: d - dataSource: rand - iface: taosc - insertRows: 10000 - interlaceRows: 0 - disorderRange: 1000 - disorderRatio: 0 - maxSqlLen: 1048576 - timeStampStep: 1 - startTimestamp: 2017-07-14 10:40:00.000 - sampleFormat: - sampleFile: - tagsFile: - columnCount: 3 - column[0]:FLOAT column[1]:INT column[2]:FLOAT - tagCount: 2 - tag[0]:INT tag[1]:BINARY(16) - - Press enter key to continue or Ctrl-C to stop - ``` - - 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。 - - 最后共插入 1 亿条记录。 - -2. 进入 TDengine 终端,查看 taosBenchmark 生成的数据。 - - - **进入命令行。** - - ```bash - $ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos - - Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 - Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - - taos> - ``` - - - **查看数据库。** - - ```bash - $ taos> show databases; - name | created_time | ntables | vgroups | ··· - test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· - log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· - - ``` - - - **查看超级表。** - - ```bash - $ taos> use test; - Database changed. - - $ taos> show stables; - name | created_time | columns | tags | tables | - ============================================================================================ - meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | - Query OK, 1 row(s) in set (0.003259s) - - ``` - - - **查看表,限制输出十条。** - - ```bash - $ taos> select * from test.t0 limit 10; - - DB error: Table does not exist (0.002857s) - taos> select * from test.d0 limit 10; - ts | current | voltage | phase | - ====================================================================================== - 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | - 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | - 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | - 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | - 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | - 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | - 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | - 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | - 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | - 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | - Query OK, 10 row(s) in set (0.016791s) - - ``` - - - **查看 d0 表的标签值。** - - ```bash - $ taos> select groupid, location from test.d0; - groupid | location | - ================================= - 0 | California.SanDieo | - Query OK, 1 row(s) in set (0.003490s) - ``` - -### 应用示例:使用数据收集代理软件写入 TDengine - -taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: - -``` -echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 -``` - -然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: - -``` -taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | - statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | -Query OK, 2 row(s) in set (0.002112s) - -taos> use statsd; -Database changed. - -taos> show stables; - name | created_time | columns | tags | tables | -============================================================================================ - foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | -Query OK, 1 row(s) in set (0.001160s) - -taos> select * from foo; - ts | value | metric_type | -======================================================================================= - 2021-12-28 09:21:48.840820836 | 1 | counter | -Query OK, 1 row(s) in set (0.001639s) - -taos> -``` - -可以看到模拟数据已经被写入到 TDengine 中。 - -## 停止正在 Docker 中运行的 TDengine 服务 - -```bash -docker stop tdengine -``` - -- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 diff --git a/docs-cn/30-release/01-2.6.md b/docs-cn/30-release/01-2.6.md deleted file mode 100644 index 85b76d9999e211336b5859beab3fdfc7988f4fda..0000000000000000000000000000000000000000 --- a/docs-cn/30-release/01-2.6.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 2.6 ---- - -[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4) - -[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1) - -[2.6.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.0) diff --git a/docs-cn/30-release/02-2.4.md b/docs-cn/30-release/02-2.4.md deleted file mode 100644 index 62580b327a3bd5098e1b7f1162a1c398ac2a5eff..0000000000000000000000000000000000000000 --- a/docs-cn/30-release/02-2.4.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 2.4 ---- - -[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) - -[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) - -[2.4.0.24](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.24) - -[2.4.0.20](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.20) - -[2.4.0.18](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.18) - -[2.4.0.16](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.16) - -[2.4.0.14](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.14) - -[2.4.0.12](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.12) - -[2.4.0.10](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.10) - -[2.4.0.7](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.7) - -[2.4.0.5](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.5) - -[2.4.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.4) - -[2.4.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.0) diff --git a/docs-en/01-index.md b/docs-en/01-index.md deleted file mode 100644 index d76c12e10fce24dff9f916945f5b6236857ebb8d..0000000000000000000000000000000000000000 --- a/docs-en/01-index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: TDengine Documentation -sidebar_label: Documentation Home -slug: / ---- - -TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. - -To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro). - -TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly. - -If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. - -We live in the era of big data, and scale-up is unable to meet the growing business needs. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster"](./cluster). - -TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. - -If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section. - -If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. - -If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. - -TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. - -Together, we make a difference. diff --git a/docs-en/02-intro/index.md b/docs-en/02-intro/index.md deleted file mode 100644 index f6766f910f4d7560b782bf02ffa97922523e6167..0000000000000000000000000000000000000000 --- a/docs-en/02-intro/index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Introduction -toc_max_heading_level: 2 ---- - -TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. - -This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. - -## Major Features - -The major features are listed below: - -1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others. -2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code. -3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others. -4. Support for [user defined functions](/develop/udf). -5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. -6. Support for [continuous query](/develop/continuous-query). -7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions. -8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. -9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries. -10. Provides many ways to [import](/operation/import) and [export](/operation/export) data. -11. Provides [monitoring](/operation/monitor) on running instances of TDengine. -12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages. -13. Provides a [REST API](/reference/rest-api/). -14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization. -15. Supports seamless integration with Google Data Studio. - -For more details on features, please read through the entire documentation. - -## Competitive Advantages - -Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages. - -- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs. - -- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. - -- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion. - -- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain. - -- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated. - -- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools. - -- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs. - -- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming. - -With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced. - -## Technical Ecosystem -This is how TDengine would be situated, in a typical time-series data processing platform: - -![TDengine Database Technical Ecosystem ](eco_system.webp) - -
Figure 1. TDengine Technical Ecosystem
- -On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance. - -## Typical Use Cases - -As a high-performance, scalable and SQL supported time-series database, TDengine's typical use case include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data. As such, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM and so on. More generally TDengine is not a suitable storage engine for non-time-series data. This section makes a more detailed analysis of the applicable scenarios. - -### Characteristics and Requirements of Data Sources - -| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- | -| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.| -| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. | -| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. | - -### System Architecture Requirements - -| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. | -| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. | -| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. | - -### System Function Requirements - -| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.| -| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. | - -### System Performance Requirements - -| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. | -| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.| -| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. | - -### System Maintenance Requirements - -| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. | -| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| -| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.| - -## Comparison with other databases - -- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) -- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) -- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf) -- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html) -- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html) -- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html) diff --git a/docs-en/05-get-started/_pkg_install.mdx b/docs-en/05-get-started/_pkg_install.mdx deleted file mode 100644 index cf10497c96ba1d777e45340b0312d97c127b6fcb..0000000000000000000000000000000000000000 --- a/docs-en/05-get-started/_pkg_install.mdx +++ /dev/null @@ -1,17 +0,0 @@ -import PkgList from "/components/PkgList"; - -It's very easy to install TDengine and would take you only a few minutes from downloading to finishing installation. - -For the convenience of users, from version 2.4.0.10, the standard server side installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark` and sample code. If only the `taosd` server and C/C++ connector are required, you can also choose to download the lite package. - -Three kinds of packages are provided, tar.gz, rpm and deb. Especially the tar.gz package is provided for the convenience of enterprise customers on different kinds of operating systems, it includes `taosdump` and TDinsight installation script which are normally only provided in taos-tools rpm and deb packages. - -Between two major release versions, some beta versions may be delivered for users to try some new features. - - - -For the details please refer to [Install and Uninstall](/operation/pkg-install)。 - -To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). - - diff --git a/docs-en/05-get-started/index.md b/docs-en/05-get-started/index.md deleted file mode 100644 index 56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a..0000000000000000000000000000000000000000 --- a/docs-en/05-get-started/index.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Get Started -description: 'Install TDengine from Docker image, apt-get or package, and run TAOS CLI and taosBenchmark to experience the features' ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import PkgInstall from "./\_pkg_install.mdx"; -import AptGetInstall from "./\_apt_get_install.mdx"; - -## Quick Install - -The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. - -TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future. - - - -If docker is already installed on your computer, execute the following command: - -```shell -docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -Make sure the container is running - -```shell -docker ps -``` - -Enter into container and execute bash - -```shell -docker exec -it bash -``` - -Then you can execute the Linux commands and access TDengine. - -For detailed steps, please visit [Experience TDengine via Docker](/train-faq/docker)。 - -:::info -Starting from 2.4.0.10,besides taosd,TDengine docker image includes: taos,taosAdapter,taosdump,taosBenchmark,TDinsight, scripts and sample code. Once the TDengine container is started,it will start both taosAdapter and taosd automatically to support RESTful interface. - -::: - - - - - - - - - - -If you like to check the source code, build the package by yourself or contribute to the project, please check [TDengine GitHub Repository](https://github.com/taosdata/TDengine) - - - - -## Quick Launch - -After installation, you can launch the TDengine service by the 'systemctl' command to start 'taosd'. - -```bash -systemctl start taosd -``` - -Check if taosd is running: - -```bash -systemctl status taosd -``` - -If everything is fine, you can run TDengine command-line interface `taos` to access TDengine and test it out yourself. - -:::info - -- systemctl requires _root_ privileges,if you are not _root_ ,please add sudo before the command. -- To get feedback and keep improving the product, TDengine is collecting some basic usage information, but you can turn it off by setting telemetryReporting to 0 in configuration file taos.cfg. -- TDengine uses FQDN (usually hostname)as the ID for a node. To make the system work, you need to configure the FQDN for the server running taosd, and configure the DNS service or hosts file on the the machine where the application or TDengine CLI runs to ensure that the FQDN can be resolved. -- `systemctl stop taosd` won't stop the server right away, it will wait until all the data in memory are flushed to disk. It may takes time depending on the cache size. - -TDengine supports the installation on system which runs [`systemd`](https://en.wikipedia.org/wiki/Systemd) for process management,use `which systemctl` to check if the system has `systemd` installed: - -```bash -which systemctl -``` - -If the system does not have `systemd`,you can start TDengine manually by executing `/usr/local/taos/bin/taosd` - -:::note - -## Command Line Interface - -To manage the TDengine running instance,or execute ad-hoc queries, TDengine provides a Command Line Interface (hereinafter referred to as TDengine CLI) taos. To enter into the interactive CLI,execute `taos` on a Linux terminal where TDengine is installed. - -```bash -taos -``` - -If it connects to the TDengine server successfully, it will print out the version and welcome message. If it fails, it will print out the error message, please check [FAQ](/train-faq/faq) for trouble shooting connection issue. TDengine CLI's prompt is: - -```cmd -taos> -``` - -Inside TDengine CLI,you can execute SQL commands to create/drop database/table, and run queries. The SQL command must be ended with a semicolon. For example: - -```sql -create database demo; -use demo; -create table t (ts timestamp, speed int); -insert into t values ('2019-07-15 00:00:00', 10); -insert into t values ('2019-07-15 01:00:00', 20); -select * from t; - ts | speed | -======================================== - 2019-07-15 00:00:00.000 | 10 | - 2019-07-15 01:00:00.000 | 20 | -Query OK, 2 row(s) in set (0.003128s) -``` - -Besides executing SQL commands, system administrators can check running status, add/drop user accounts and manage the running instances. TAOS CLI with client driver can be installed and run on either Linux or Windows machines. For more details on CLI, please [check here](../reference/taos-shell/). - -## Experience the blazing fast speed - -After TDengine server is running,execute `taosBenchmark` (previously named taosdemo) from a Linux terminal: - -```bash -taosBenchmark -``` - -This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". - -This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. - -taosBenchmark provides command-line options and a configuration file to customize the scenarios, like number of tables, number of rows per table, number of columns and more. Please execute `taosBenchmark --help` to list them. For details on running taosBenchmark, please check [reference for taosBenchmark](/reference/taosbenchmark) - -## Experience query speed - -After using taosBenchmark to insert a number of rows data, you can execute queries from TDengine CLI to experience the lightning fast query speed. - -query the total number of rows under super table "meters": - -```sql -taos> select count(*) from test.meters; -``` - -query the average, maximum, minimum of 100 million rows: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.meters; -``` - -query the total number of rows with location="California.SanFrancisco": - -```sql -taos> select count(*) from test.meters where location="California.SanFrancisco"; -``` - -query the average, maximum, minimum of all rows with groupId=10: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; -``` - -query the average, maximum, minimum for table d10 in 10 seconds time interval: - -```sql -taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); -``` diff --git a/docs-en/07-develop/01-connect/_connect_c.mdx b/docs-en/07-develop/01-connect/_connect_c.mdx deleted file mode 100644 index 174bf45c4e2f26bab8f57c098f9f8f00d2f5064d..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c title="Native Connection" -{{#include docs-examples/c/connect_example.c}} -``` diff --git a/docs-en/07-develop/01-connect/_connect_cs.mdx b/docs-en/07-develop/01-connect/_connect_cs.mdx deleted file mode 100644 index 52ea2d437123a26bd87e6f3fdc05a17141f9f835..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_cs.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```csharp title="Native Connection" -{{#include docs-examples/csharp/ConnectExample.cs}} -``` - -:::info -C# connector supports only native connection for now. - -::: diff --git a/docs-en/07-develop/01-connect/_connect_go.mdx b/docs-en/07-develop/01-connect/_connect_go.mdx deleted file mode 100644 index 1dd5d67e3533bba21960269e49e3d843b026efc8..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_go.mdx +++ /dev/null @@ -1,17 +0,0 @@ -#### Unified Database Access Interface - -```go title="Native Connection" -{{#include docs-examples/go/connect/cgoexample/main.go}} -``` - -```go title="REST Connection" -{{#include docs-examples/go/connect/restexample/main.go}} -``` - -#### Advanced Features - -The af package of driver-go can also be used to establish connection, with this way some advanced features of TDengine, like parameter binding and subscription, can be used. - -```go title="Establish native connection using af package" -{{#include docs-examples/go/connect/afconn/main.go}} -``` diff --git a/docs-en/07-develop/01-connect/_connect_java.mdx b/docs-en/07-develop/01-connect/_connect_java.mdx deleted file mode 100644 index 1c3e9326bf2ae597ffba683250dd43986e670469..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_java.mdx +++ /dev/null @@ -1,15 +0,0 @@ -```java title="Native Connection" -{{#include docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java}} -``` - -```java title="REST Connection" -{{#include docs-examples/java/src/main/java/com/taos/example/RESTConnectExample.java:main}} -``` - -When using REST connection, the feature of bulk pulling can be enabled if the size of resulting data set is huge. - -```java title="Enable Bulk Pulling" {4} -{{#include docs-examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} -``` - -More configuration about connection,please refer to [Java Connector](/reference/connector/java) diff --git a/docs-en/07-develop/01-connect/_connect_node.mdx b/docs-en/07-develop/01-connect/_connect_node.mdx deleted file mode 100644 index 489b0386e991ee1e8ddd173205637b75ae5a0c95..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_node.mdx +++ /dev/null @@ -1,7 +0,0 @@ -```js title="Native Connection" -{{#include docs-examples/node/nativeexample/connect.js}} -``` - -```js title="REST Connection" -{{#include docs-examples/node/restexample/connect.js}} -``` diff --git a/docs-en/07-develop/01-connect/_connect_python.mdx b/docs-en/07-develop/01-connect/_connect_python.mdx deleted file mode 100644 index 44b7586fadbf618231fce7753d3b4b68853a7f57..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```python title="Native Connection" -{{#include docs-examples/python/connect_example.py}} -``` diff --git a/docs-en/07-develop/01-connect/_connect_r.mdx b/docs-en/07-develop/01-connect/_connect_r.mdx deleted file mode 100644 index 09c3d71ac35b1134d3089247daea9a13db4129e2..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_r.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```r title="Native Connection" -{{#include docs-examples/R/connect_native.r:demo}} -``` diff --git a/docs-en/07-develop/01-connect/_connect_rust.mdx b/docs-en/07-develop/01-connect/_connect_rust.mdx deleted file mode 100644 index aa19f58de6c9bab69df0663e5369402ab1a8f899..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/_connect_rust.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```rust title="Native Connection/REST Connection" -{{#include docs-examples/rust/nativeexample/examples/connect.rs}} -``` - -:::note -For Rust connector, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. - -::: diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md deleted file mode 100644 index 720f8e2384c565d5494ce7d84d531188dae96fe0..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/01-connect/index.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -sidebar_label: Connect -title: Connect -description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import ConnJava from "./\_connect_java.mdx"; -import ConnGo from "./\_connect_go.mdx"; -import ConnRust from "./\_connect_rust.mdx"; -import ConnNode from "./\_connect_node.mdx"; -import ConnPythonNative from "./\_connect_python.mdx"; -import ConnCSNative from "./\_connect_cs.mdx"; -import ConnC from "./\_connect_c.mdx"; -import ConnR from "./\_connect_r.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/\_linux_install.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; - -Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. TDengine community also provides connectors in LUA and PHP languages. For details about the connectors, please refer to [Connectors](/reference/connector/). - -## Establish Connection - -There are two ways for a connector to establish connections to TDengine: - -1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter. -2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter. - -Key differences: - -1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. -2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions. -3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. - -## Install Client Driver taosc - -If you are choosing to use the native connection and the the application is not on the same host as TDengine server, the TDengine client driver taosc needs to be installed on the application host. If choosing to use the REST connection or the application is on the same host as TDengine server, this step can be skipped. It's better to use same version of taosc as the TDengine server. - -### Install - - - - - - - - - - -### Verify - -After the above installation and configuration are done and making sure TDengine service is already started and in service, the TDengine command-line interface `taos` can be launched to access TDengine. - - - - - - - - - - -## Install Connectors - - - - -If `maven` is used to manage the projects, what needs to be done is only adding below dependency in `pom.xml`. - -```xml - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.38 - -``` - - - - -Install from PyPI using `pip`: - -``` -pip install taospy -``` - -Install from Git URL: - -``` -pip install git+https://github.com/taosdata/taos-connector-python.git -``` - - - - -Just need to add `driver-go` dependency in `go.mod` . - -```go-mod title=go.mod -module goexample - -go 1.17 - -require github.com/taosdata/driver-go/v2 develop -``` - -:::note -`driver-go` uses `cgo` to wrap the APIs provided by taosc, while `cgo` needs `gcc` to compile source code in C language, so please make sure you have proper `gcc` on your system. - -::: - - - - -Just need to add `libtaos` dependency in `Cargo.toml`. - -```toml title=Cargo.toml -[dependencies] -libtaos = { version = "0.4.2"} -``` - -:::info -Rust connector uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. - -```toml -libtaos = { version = "*", features = ["rest"] } -``` - -::: - - - - -Node.js connector provides different ways of establishing connections by providing different packages. - -1. Install Node.js Native Connector - -``` -npm i td2.0-connector -``` - -:::note -It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`. -::: - -2. Install Node.js REST Connector - -``` -npm i td2.0-rest-connector -``` - - - - -Just need to add the reference to [TDengine.Connector](https://www.nuget.org/packages/TDengine.Connector/) in the project configuration file. - -```xml title=csharp.csproj {12} - - - - Exe - net6.0 - enable - enable - TDengineExample.AsyncQueryExample - - - - - - - -``` - -Or add by `dotnet` command. - -``` -dotnet add package TDengine.Connector -``` - -:::note -The sample code below are based on dotnet6.0, they may need to be adjusted if your dotnet version is not exactly same. - -::: - - - - -1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/). -2. Install the dependency package `RJDBC`: - -```R -install.packages("RJDBC") -``` - - - - -If the client driver (taosc) is already installed, then the C connector is already available. -
- -
- - -**Download Source Code Package and Unzip:** - -```shell -curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ -&& mkdir php-tdengine \ -&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 -``` - -> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). - -**Non-Swoole Environment:** - -```shell -phpize && ./configure && make -j && make install -``` - -**Specify TDengine Location:** - -```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install -``` - -> `--with-tdengine-dir=` is followed by the TDengine installation location. -> This way is useful in case TDengine location can't be found automatically or macOS. - -**Swoole Environment:** - -```shell -phpize && ./configure --enable-swoole && make -j && make install -``` - -**Enable The Extension:** - -Option One: Add `extension=tdengine` in `php.ini` - -Option Two: Specify the extension on CLI `php -d extension=tdengine test.php` - - -
- -## Establish Connection - -Prior to establishing connection, please make sure TDengine is already running and accessible. The following sample code assumes TDengine is running on the same host as the client program, with FQDN configured to "localhost" and serverPort configured to "6030". - - - - - - - - - - - - - - - - - - - - - - - - - - - - -:::tip -If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq). - -::: diff --git a/docs-en/07-develop/02-model/index.mdx b/docs-en/07-develop/02-model/index.mdx deleted file mode 100644 index 86853aaaa3f7285fe042a892e2ec903d57894111..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/02-model/index.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Data Model ---- - -The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. - -## Create Database - -The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. - -```sql -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; -``` - -In the above SQL statement: -- a database named "power" will be created -- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically -- a new data file will be created every 10 days -- the number of memory blocks is 6 -- data is allowed to be updated - -For more details please refer to [Database](/taos-sql/database). - -After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. - -```sql -USE power; -``` - -:::note - -- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. -- JOIN operations can't be performed on tables from two different databases. -- Timestamp needs to be specified when inserting rows or querying historical rows. - -::: - -## Create STable - -In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the SQL statement below can be used to create the super table. - -```sql -CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); -``` - -:::note -If you are using versions prior to 2.0.15, the `STable` keyword needs to be replaced with `TABLE`. - -::: - -Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. - -For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. - -At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. - -## Create Table - -A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. - -```sql -CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); -``` - -In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. - -In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. - -:::warning -It's not recommended to create a table in a database while using a STable from another database as template. - -:::tip -It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. - -## Create Table Automatically - -In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. - -```sql -INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); -``` - -In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`. - -For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). - -## Single Column vs Multiple Column - -A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. - -However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. - -It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx deleted file mode 100644 index 397b1a14fd76c1372c79eb88575f2bf21cb62050..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -sidebar_label: Insert Using SQL -title: Insert Using SQL ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import JavaSQL from "./_java_sql.mdx"; -import JavaStmt from "./_java_stmt.mdx"; -import PySQL from "./_py_sql.mdx"; -import PyStmt from "./_py_stmt.mdx"; -import GoSQL from "./_go_sql.mdx"; -import GoStmt from "./_go_stmt.mdx"; -import RustSQL from "./_rust_sql.mdx"; -import RustStmt from "./_rust_stmt.mdx"; -import NodeSQL from "./_js_sql.mdx"; -import NodeStmt from "./_js_stmt.mdx"; -import CsSQL from "./_cs_sql.mdx"; -import CsStmt from "./_cs_stmt.mdx"; -import CSQL from "./_c_sql.mdx"; -import CStmt from "./_c_stmt.mdx"; - -## Introduction - -Application programs can execute `INSERT` statement through connectors to insert rows. The TAOS CLI can also be used to manually insert data. - -### Insert Single Row - -The below SQL statement is used to insert one row into table "d1001". - -```sql -INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); -``` - -### Insert Multiple Rows - -Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". - -```sql -INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); -``` - -### Insert into Multiple Tables - -Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". - -```sql -INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); -``` - -For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). - -:::info - -- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. -- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. - -::: - -:::warning - -- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. -- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. - -::: - -## Examples - -### Insert Using SQL - - - - - - - - - - - - - - - - - - - - - - - - - -:::note - -1. With either native connection or REST connection, the above samples can work well. -2. Please note that `use db` can't be used with a REST connection because REST connections are stateless, so in the samples `dbName.tbName` is used to specify the table name. - -::: - -### Insert with Parameter Binding - -TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. - -Parameter binding is available only with native connection. - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs-en/07-develop/03-insert-data/_c_line.mdx b/docs-en/07-develop/03-insert-data/_c_line.mdx deleted file mode 100644 index 5ef2e9af774c54e9f090357286f83d2280c2ab11..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_c_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/line_example.c:main}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_c_opts_json.mdx b/docs-en/07-develop/03-insert-data/_c_opts_json.mdx deleted file mode 100644 index 22ad2e0122797248a372734aac0f3a16a1356530..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_c_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/json_protocol_example.c:main}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_c_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_c_opts_telnet.mdx deleted file mode 100644 index 508d7bc98a149f49766bcd0a474ffe226cbe30bb..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_c_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/telnet_line_example.c:main}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_c_sql.mdx b/docs-en/07-develop/03-insert-data/_c_sql.mdx deleted file mode 100644 index f4153fd2c427677a338d0c377663d0335f2672f0..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_c_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/insert_example.c}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_c_stmt.mdx b/docs-en/07-develop/03-insert-data/_c_stmt.mdx deleted file mode 100644 index 7f5ef23a849689c36e732b6fd374a131695c9090..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_c_stmt.mdx +++ /dev/null @@ -1,6 +0,0 @@ -```c title=Single Row Binding -{{#include docs-examples/c/stmt_example.c}} -``` -```c title=Multiple Row Binding 72:117 -{{#include docs-examples/c/multi_bind_example.c}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_cs_line.mdx b/docs-en/07-develop/03-insert-data/_cs_line.mdx deleted file mode 100644 index 9c275ee3d7c7a1e52fbb34dbae922004543ee3ce..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_cs_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/InfluxDBLineExample.cs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_cs_opts_json.mdx b/docs-en/07-develop/03-insert-data/_cs_opts_json.mdx deleted file mode 100644 index 3d538b8506b298241faecd8098f89571359135c9..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_cs_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/OptsJsonExample.cs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_cs_opts_telnet.mdx deleted file mode 100644 index c53bf3d7233115351e5af03b7d9e6318aa4a0da6..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_cs_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/OptsTelnetExample.cs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_cs_sql.mdx b/docs-en/07-develop/03-insert-data/_cs_sql.mdx deleted file mode 100644 index c7688bfbe77a1135424d829fe9b29fbb1bc93ae2..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_cs_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/SQLInsertExample.cs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_cs_stmt.mdx b/docs-en/07-develop/03-insert-data/_cs_stmt.mdx deleted file mode 100644 index 97c3b910ffeb9e0c88fc143a02014115e819c147..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_cs_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/StmtInsertExample.cs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_go_line.mdx b/docs-en/07-develop/03-insert-data/_go_line.mdx deleted file mode 100644 index cd225945b70e28bef2ca7fdaf0d9be0ad7ffc18c..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_go_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/line/main.go}} -``` diff --git a/docs-en/07-develop/03-insert-data/_go_opts_json.mdx b/docs-en/07-develop/03-insert-data/_go_opts_json.mdx deleted file mode 100644 index 0c0d3e5b6330e046988cdd02234285ec67e92f01..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_go_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/json/main.go}} -``` diff --git a/docs-en/07-develop/03-insert-data/_go_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_go_opts_telnet.mdx deleted file mode 100644 index d5ca40cc146e62412476289853e8e2739e0e9e4b..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_go_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/telnet/main.go}} -``` diff --git a/docs-en/07-develop/03-insert-data/_go_sql.mdx b/docs-en/07-develop/03-insert-data/_go_sql.mdx deleted file mode 100644 index 613a65add1741eb763a4b24e65d180d05f7d670f..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_go_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/insert/sql/main.go}} -``` diff --git a/docs-en/07-develop/03-insert-data/_go_stmt.mdx b/docs-en/07-develop/03-insert-data/_go_stmt.mdx deleted file mode 100644 index c32bc21fb9bcaf45059e4f47df73fb57f047ed1c..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_go_stmt.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```go -{{#include docs-examples/go/insert/stmt/main.go}} -``` - -:::tip -`github.com/taosdata/driver-go/v2/wrapper` module in driver-go is the wrapper for C API, it can be used to insert data with parameter binding. - -::: diff --git a/docs-en/07-develop/03-insert-data/_java_line.mdx b/docs-en/07-develop/03-insert-data/_java_line.mdx deleted file mode 100644 index 2e59a5d4701b2a2ab04ec5711845dc5c80067a1e..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_java_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java}} -``` diff --git a/docs-en/07-develop/03-insert-data/_java_opts_json.mdx b/docs-en/07-develop/03-insert-data/_java_opts_json.mdx deleted file mode 100644 index 826a1a07d9405cb193849f9d21e5444f68517914..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_java_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java}} -``` diff --git a/docs-en/07-develop/03-insert-data/_java_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_java_opts_telnet.mdx deleted file mode 100644 index 954dcc1a482a150dea0b190e1e0593adbfbde796..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_java_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java}} -``` diff --git a/docs-en/07-develop/03-insert-data/_java_sql.mdx b/docs-en/07-develop/03-insert-data/_java_sql.mdx deleted file mode 100644 index a863378defe43b1f22c1f98087a34f053a7d6619..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_java_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java:insert}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_java_stmt.mdx b/docs-en/07-develop/03-insert-data/_java_stmt.mdx deleted file mode 100644 index 54443e535fa84bdf8dc9161ed4ad00f50b26266c..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_java_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java}} -``` diff --git a/docs-en/07-develop/03-insert-data/_js_line.mdx b/docs-en/07-develop/03-insert-data/_js_line.mdx deleted file mode 100644 index 172c9bc17b8cff8b2620720b235a9c8e69bd4197..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_js_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/influxdb_line_example.js}} -``` diff --git a/docs-en/07-develop/03-insert-data/_js_opts_json.mdx b/docs-en/07-develop/03-insert-data/_js_opts_json.mdx deleted file mode 100644 index 20ac9ec91e8dc6675828b16d7da0acb09afd3b5f..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_js_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/opentsdb_json_example.js}} -``` diff --git a/docs-en/07-develop/03-insert-data/_js_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_js_opts_telnet.mdx deleted file mode 100644 index c3c8c40bd642f4f443de88e3db006ad50724d514..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_js_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/opentsdb_telnet_example.js}} -``` diff --git a/docs-en/07-develop/03-insert-data/_js_sql.mdx b/docs-en/07-develop/03-insert-data/_js_sql.mdx deleted file mode 100644 index f5e17c76892a57a94192a95451b508b1c176c984..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_js_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/insert_example.js}} -``` diff --git a/docs-en/07-develop/03-insert-data/_js_stmt.mdx b/docs-en/07-develop/03-insert-data/_js_stmt.mdx deleted file mode 100644 index 964d7ddc11b90031b70936efb85fbaabe873ddbb..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_js_stmt.mdx +++ /dev/null @@ -1,12 +0,0 @@ -```js title=Single Row Binding -{{#include docs-examples/node/nativeexample/param_bind_example.js}} -``` - -```js title=Multiple Row Binding -{{#include docs-examples/node/nativeexample/multi_bind_example.js:insertData}} -``` - -:::info -Multiple row binding is better in performance than single row binding, but it can only be used with `INSERT` statement while single row binding can be used for other SQL statements besides `INSERT`. - -::: diff --git a/docs-en/07-develop/03-insert-data/_py_line.mdx b/docs-en/07-develop/03-insert-data/_py_line.mdx deleted file mode 100644 index d3bb1ebb3403b53fa43bfc9d5d1a0de9764d7583..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_py_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/line_protocol_example.py}} -``` diff --git a/docs-en/07-develop/03-insert-data/_py_opts_json.mdx b/docs-en/07-develop/03-insert-data/_py_opts_json.mdx deleted file mode 100644 index cfbfe13ccfdb4f3f34b77300812863fdf70d0f59..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_py_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/json_protocol_example.py}} -``` diff --git a/docs-en/07-develop/03-insert-data/_py_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_py_opts_telnet.mdx deleted file mode 100644 index 14bc65a7a3da815abadf7f25c8deffeac666c8d7..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_py_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/telnet_line_protocol_example.py}} -``` diff --git a/docs-en/07-develop/03-insert-data/_py_sql.mdx b/docs-en/07-develop/03-insert-data/_py_sql.mdx deleted file mode 100644 index c0e15b8ec115b9244d50a47c9eafec04bcfdd70c..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_py_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/native_insert_example.py}} -``` diff --git a/docs-en/07-develop/03-insert-data/_py_stmt.mdx b/docs-en/07-develop/03-insert-data/_py_stmt.mdx deleted file mode 100644 index 16d98f54329ad0d3dfb463392f5c1d41c9aab25b..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_py_stmt.mdx +++ /dev/null @@ -1,12 +0,0 @@ -```py title=Single Row Binding -{{#include docs-examples/python/bind_param_example.py}} -``` - -```py title=Multiple Row Binding -{{#include docs-examples/python/multi_bind_example.py:bind_batch}} -``` - -:::info -Multiple row binding is better in performance than single row binding, but it can only be used with `INSERT` statement while single row binding can be used for other SQL statements besides `INSERT`. - -::: \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_rust_line.mdx b/docs-en/07-develop/03-insert-data/_rust_line.mdx deleted file mode 100644 index 696ddb7b854751b8dee01047066f97f74212933f..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_rust_line.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_rust_opts_json.mdx b/docs-en/07-develop/03-insert-data/_rust_opts_json.mdx deleted file mode 100644 index 97d9052dacd1894cc7548a59951ecfaad9caee87..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_rust_opts_json.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_rust_opts_telnet.mdx deleted file mode 100644 index 14021f43d8aff30c35dc30c5d278d4e51f375024..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_rust_opts_telnet.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_rust_sql.mdx b/docs-en/07-develop/03-insert-data/_rust_sql.mdx deleted file mode 100644 index 8e8013e4ad734efcc262ea2f750b82210a538e49..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_rust_sql.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/restexample/examples/insert_example.rs}} -``` diff --git a/docs-en/07-develop/03-insert-data/_rust_stmt.mdx b/docs-en/07-develop/03-insert-data/_rust_stmt.mdx deleted file mode 100644 index 590a7a0e717426ed0235331c49dfc578bc55b2f7..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/03-insert-data/_rust_stmt.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/nativeexample/examples/stmt_example.rs}} -``` diff --git a/docs-en/07-develop/04-query-data/_c.mdx b/docs-en/07-develop/04-query-data/_c.mdx deleted file mode 100644 index 76c9067e2f6af19465cf7c52c3e9b48bb868547d..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/query_example.c}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/04-query-data/_c_async.mdx b/docs-en/07-develop/04-query-data/_c_async.mdx deleted file mode 100644 index 09f3d3b3ff6d6644f837642ef41db459ba7c5753..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_c_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/async_query_example.c:demo}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/04-query-data/_cs.mdx b/docs-en/07-develop/04-query-data/_cs.mdx deleted file mode 100644 index 2ab52feb564eff0fe251bc9900ea2539171e5dba..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_cs.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/QueryExample.cs}} -``` diff --git a/docs-en/07-develop/04-query-data/_cs_async.mdx b/docs-en/07-develop/04-query-data/_cs_async.mdx deleted file mode 100644 index f868994b303e62016b5e2f9304275135855c6ae5..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_cs_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/AsyncQueryExample.cs}} -``` diff --git a/docs-en/07-develop/04-query-data/_go.mdx b/docs-en/07-develop/04-query-data/_go.mdx deleted file mode 100644 index 417c12315c06517e2f3de850ac9a379b7714b519..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_go.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/query/sync/main.go}} -``` diff --git a/docs-en/07-develop/04-query-data/_go_async.mdx b/docs-en/07-develop/04-query-data/_go_async.mdx deleted file mode 100644 index 72fff411b980a0dcbdcaf4274722c63e0351db6f..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_go_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/query/async/main.go}} -``` diff --git a/docs-en/07-develop/04-query-data/_java.mdx b/docs-en/07-develop/04-query-data/_java.mdx deleted file mode 100644 index 519b9266144486231caf3ee593e973d438941ee4..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_java.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java}} -``` diff --git a/docs-en/07-develop/04-query-data/_js.mdx b/docs-en/07-develop/04-query-data/_js.mdx deleted file mode 100644 index c5e4c4f3fc20d3940a2bc6e13e6a5dea8a15ff13..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_js.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/query_example.js}} -``` diff --git a/docs-en/07-develop/04-query-data/_js_async.mdx b/docs-en/07-develop/04-query-data/_js_async.mdx deleted file mode 100644 index c65d54ed12f6c4bbeb333e0de0ba9ca4638bff84..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_js_async.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/async_query_example.js}} -``` diff --git a/docs-en/07-develop/04-query-data/_py.mdx b/docs-en/07-develop/04-query-data/_py.mdx deleted file mode 100644 index aeae42a15e5c39b7e9d227afc424e77658109705..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_py.mdx +++ /dev/null @@ -1,11 +0,0 @@ -Result set is iterated row by row. - -```py -{{#include docs-examples/python/query_example.py:iter}} -``` - -Result set is retrieved as a whole, each row is converted to a dict and returned. - -```py -{{#include docs-examples/python/query_example.py:fetch_all}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/04-query-data/_py_async.mdx b/docs-en/07-develop/04-query-data/_py_async.mdx deleted file mode 100644 index ed6880ae64e59a860e7dc75a5d3c1ad5d2614d01..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_py_async.mdx +++ /dev/null @@ -1,8 +0,0 @@ -```py -{{#include docs-examples/python/async_query_example.py}} -``` - -:::note -This sample code can't be run on Windows system for now. - -::: diff --git a/docs-en/07-develop/04-query-data/_rust.mdx b/docs-en/07-develop/04-query-data/_rust.mdx deleted file mode 100644 index 742d70fd025ff44b573eedf78441c9d73defad45..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/_rust.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs-examples/rust/restexample/examples/query_example.rs}} -``` diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx deleted file mode 100644 index a212fa9529215fc24c55c95a166cfc1a407359b2..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/04-query-data/index.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -Sidebar_label: Query data -title: Query data -description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import JavaQuery from "./_java.mdx"; -import PyQuery from "./_py.mdx"; -import GoQuery from "./_go.mdx"; -import RustQuery from "./_rust.mdx"; -import NodeQuery from "./_js.mdx"; -import CsQuery from "./_cs.mdx"; -import CQuery from "./_c.mdx"; -import PyAsync from "./_py_async.mdx"; -import NodeAsync from "./_js_async.mdx"; -import CsAsync from "./_cs_async.mdx"; -import CAsync from "./_c_async.mdx"; - -## Introduction - -SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - -- Query on single column or multiple columns -- Filter on tags or data columns:>, <, =, <\>, like -- Grouping of results: `Group By` -- Sorting of results: `Order By` -- Limit the number of results: `Limit/Offset` -- Arithmetic on columns of numeric types or aggregate results -- Join query with timestamp alignment -- Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff - -For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. - -```sql -select * from d1001 where voltage > 215 order by ts desc limit 2; -``` - -```title=Output -taos> select * from d1001 where voltage > 215 order by ts desc limit 2; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | -Query OK, 2 row(s) in set (0.001100s) -``` - -To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. - -For detailed query syntax please refer to [Select](/taos-sql/select). - -## Aggregation among Tables - -In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. - -In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. - -### Example 1 - -In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. - -``` -taos> SELECT AVG(voltage) FROM meters GROUP BY location; - avg(voltage) | location | -============================================================= - 222.000000000 | California.LosAngeles | - 219.200000000 | California.SanFrancisco | -Query OK, 2 row(s) in set (0.002136s) -``` - -### Example 2 - -In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. - -``` -taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - count(*) | max(current) | -================================== - 5 | 13.4 | -Query OK, 1 row(s) in set (0.002136s) -``` - -Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. - -## Down Sampling and Interpolation - -In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. - -``` -taos> SELECT sum(current) FROM d1001 INTERVAL(10s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:00.000 | 10.300000191 | - 2018-10-03 14:38:10.000 | 24.900000572 | -Query OK, 2 row(s) in set (0.000883s) -``` - -Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. - -``` -taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.000 | 10.199999809 | - 2018-10-03 14:38:05.000 | 32.900000572 | - 2018-10-03 14:38:06.000 | 11.500000000 | - 2018-10-03 14:38:15.000 | 12.600000381 | - 2018-10-03 14:38:16.000 | 36.000000000 | -Query OK, 5 row(s) in set (0.001538s) -``` - -Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. - -``` -taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.500 | 11.189999809 | - 2018-10-03 14:38:05.500 | 31.900000572 | - 2018-10-03 14:38:06.500 | 11.600000000 | - 2018-10-03 14:38:15.500 | 12.300000381 | - 2018-10-03 14:38:16.500 | 35.000000000 | -Query OK, 5 row(s) in set (0.001521s) -``` - -In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. - -Interpolation can be performed in TDengine if there is no data in a time range. - -For more details please refer to [Aggregate by Window](/taos-sql/interval). - -## Examples - -### Query - -In the section describing [Insert](/develop/insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable. - - - - - - - - - - - - - - - - - - - - - - - - - -:::note - -1. With either REST connection or native connection, the above sample code works well. -2. Please note that `use db` can't be used in case of REST connection because it's stateless. - -::: - -### Asynchronous Query - -Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. - -Please note that async query can only be used with a native connection. - - - - - - - - - - - - diff --git a/docs-en/07-develop/07-subscribe.mdx b/docs-en/07-develop/07-subscribe.mdx deleted file mode 100644 index 782fcdbaf221419dd231bd10958e26b8f4f856e5..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/07-subscribe.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -sidebar_label: Data Subscription -description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." -title: Data Subscription ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - -## Introduction - -Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. - -A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. - -There are 3 major APIs related to subscription provided in the TDengine client driver. - -```c -taos_subscribe -taos_consume -taos_unsubscribe -``` - -For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). - -If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: - -The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. - -```sql -select * from D1001 where ts > {last_timestamp1} and current > 10; -select * from D1002 where ts > {last_timestamp2} and current > 10; -... -``` - -The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. - -A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: - -```sql -select * from meters where ts > {last_timestamp} and current > 10; -``` - -However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. - -All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. - -The first step is to create subscription using `taos_subscribe`. - -```c -TAOS_SUB* tsub = NULL; -if (async) { -  // create an asynchronous subscription, the callback function will be called every 1s -  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); -} else { -  // create an synchronous subscription, need to call 'taos_consume' manually -  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); -} -``` - -The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. - -The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. - -The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: - -```sql -select * from meters where current > 10; -``` - -Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: - -```sql -select * from meters where ts > now - 1d and current > 10; -``` - -The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. - -If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. - -If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. - -The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. - -The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. - -After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. - -```c -if (async) { -  getchar(); -} else while(1) { -  TAOS_RES* res = taos_consume(tsub); -  if (res == NULL) { -    printf("failed to consume data."); -    break; -  } else { -    print_result(res, blockFetch); -    getchar(); -  } -} -``` - -In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. - -```c -void print_result(TAOS_RES* res, int blockFetch) { -  TAOS_ROW row = NULL; -  int num_fields = taos_num_fields(res); -  TAOS_FIELD* fields = taos_fetch_fields(res); -  int nRows = 0; -  if (blockFetch) { -    nRows = taos_fetch_block(res, &row); -    for (int i = 0; i < nRows; i++) { -      char temp[256]; -      taos_print_row(temp, row + i, fields, num_fields); -      puts(temp); -    } -  } else { -    while ((row = taos_fetch_row(res))) { -      char temp[256]; -      taos_print_row(temp, row, fields, num_fields); -      puts(temp); -      nRows++; -    } -  } -  printf("%d rows consumed.\n", nRows); -} -``` - -In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. - -In async mode, consuming data is simpler as shown below. - -```c -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { -  print_result(res, *(int*)param); -} -``` - -`taos_unsubscribe` can be invoked to terminate a subscription. - -```c -taos_unsubscribe(tsub, keep); -``` - -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. - -Now let's see the effect of the above sample code, assuming below prerequisites have been done. - -- The sample code has been downloaded to local system -- TDengine has been installed and launched properly on same system -- The database, STable, and subtables required in the sample code are ready - -Launch the command below in the directory where the sample code resides to compile and start the program. - -```bash -make -./subscribe -sql='select * from meters where current > 10;' -``` - -After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**. - -```sql -use test; -insert into D1001 values(now, 12, 220, 1); -``` - -Then, this row of data will be shown by the example program on the first terminal because its current exceeds 10A. More data can be inserted for you to observe the output of the example program. - -## Examples - -The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. - -### Prepare Data - -```bash -# create database "power" -taos> create database power; -# use "power" as the database in following operations -taos> use power; -# create super table "meters" -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); -# create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("California.SanFrancisco", 2); -taos> create table d1002 using meters tags ("California.LoSangeles", 2); -# insert some rows -taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); -taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# filter out the rows in which current is bigger than 10A -taos> select * from meters where current > 10; - ts | current | voltage | phase | location | groupid | -=========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | -Query OK, 5 row(s) in set (0.004896s) -``` - -### Example Programs - - - - - - - - - {/* - - */} - - - - {/* - - - - - */} - - - - - -### Run the Examples - -The example programs first consume all historical data matching the criteria. - -```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 -``` - -Next, use TDengine CLI to insert a new row. - -``` -# taos -taos> use power; -taos> insert into d1001 values(now, 12.4, 220, 1); -``` - -Because the current in the inserted row exceeds 10A, it will be consumed by the example program. - -``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 -``` diff --git a/docs-en/07-develop/_sub_c.mdx b/docs-en/07-develop/_sub_c.mdx deleted file mode 100644 index 95fef0042d0a277f9136e6e6f8c15558487232f9..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs-examples/c/subscribe_demo.c}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/_sub_cs.mdx b/docs-en/07-develop/_sub_cs.mdx deleted file mode 100644 index 80934aa4d014a076896dce7f41e520f06ffd735d..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_cs.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs-examples/csharp/SubscribeDemo.cs}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/_sub_go.mdx b/docs-en/07-develop/_sub_go.mdx deleted file mode 100644 index cd908fc12c3a35f49ca108ee56c3951c5388a95f..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_go.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs-examples/go/sub/main.go}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/_sub_java.mdx b/docs-en/07-develop/_sub_java.mdx deleted file mode 100644 index e65bc576ebed030d935ced6a4572289cd367ffac..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_java.mdx +++ /dev/null @@ -1,7 +0,0 @@ -```java -{{#include docs-examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} -``` -:::note -For now Java connector doesn't provide asynchronous subscription, but `TimerTask` can be used to achieve similar purpose. - -::: \ No newline at end of file diff --git a/docs-en/07-develop/_sub_node.mdx b/docs-en/07-develop/_sub_node.mdx deleted file mode 100644 index c93ad627ce9a77ca71a014b41d571089e6c1727b..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_node.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs-examples/node/nativeexample/subscribe_demo.js}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/_sub_python.mdx b/docs-en/07-develop/_sub_python.mdx deleted file mode 100644 index b817deeba6e283a3ba16fee0d580d3823c999536..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs-examples/python/subscribe_demo.py}} -``` \ No newline at end of file diff --git a/docs-en/07-develop/_sub_rust.mdx b/docs-en/07-develop/_sub_rust.mdx deleted file mode 100644 index 4750cf7a3b871db48c9e5a26b22ab4b8a03f11be..0000000000000000000000000000000000000000 --- a/docs-en/07-develop/_sub_rust.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rs -{{#include docs-examples/rust/nativeexample/examples/subscribe_demo.rs}} -``` \ No newline at end of file diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md deleted file mode 100644 index 674c92e2766a4eb304079140af19c8efea72d55e..0000000000000000000000000000000000000000 --- a/docs-en/10-cluster/02-cluster-mgmt.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -sidebar_label: Operation -title: Manage DNODEs ---- - -The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. - -:::note -All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. - -::: - -## Show DNODEs - -The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. - -```sql -SHOW DNODES; -``` - -Below is the example output of this command. - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | -Query OK, 1 row(s) in set (0.008298s) -``` - -## Show VGROUPs - -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. - -Launch TDengine CLI `taos` and execute below command: - -```sql -USE SOME_DATABASE; -SHOW VGROUPS; -``` - -The example output is below: - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | -Query OK, 1 row(s) in set (0.008298s) - -taos> use db; -Database changed. - -taos> show vgroups; - vgId | tables | status | onlines | v1_dnode | v1_status | compacting | -========================================================================================== - 14 | 38000 | ready | 1 | 1 | master | 0 | - 15 | 38000 | ready | 1 | 1 | master | 0 | - 16 | 38000 | ready | 1 | 1 | master | 0 | - 17 | 38000 | ready | 1 | 1 | master | 0 | - 18 | 37001 | ready | 1 | 1 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | -Query OK, 8 row(s) in set (0.001154s) -``` - -## Add DNODE - -Launch TDengine CLI `taos` and execute the command below to add the end point of a new dnode into the EPI (end point) list of the cluster. "fqdn:port" must be quoted using double quotes. - -```sql -CREATE DNODE "fqdn:port"; -``` - -The example output is as below: - -``` -taos> create dnode "localhost:7030"; -Query OK, 0 of 0 row(s) in database (0.008203s) - -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | -Query OK, 2 row(s) in set (0.001017s) -``` - -It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 3 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 6 | 8 | ready | any | 2022-04-19 08:14:59.165 | | -Query OK, 2 row(s) in set (0.001316s) -``` - -## Drop DNODE - -Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. - -```sql -DROP DNODE "fqdn:port"; -``` - -or - -```sql -DROP DNODE dnodeId; -``` - -The example output is below: - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | -Query OK, 2 row(s) in set (0.001017s) - -taos> drop dnode 2; -Query OK, 0 of 0 row(s) in database (0.000518s) - -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | -Query OK, 1 row(s) in set (0.001137s) -``` - -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. - -:::note - -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. -- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. -- Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. -- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. - -::: - -## Move VNODE - -A vnode can be manually moved from one dnode to another. - -Launch TDengine CLI `taos` and execute below command: - -```sql -ALTER DNODE BALANCE "VNODE:-DNODE:"; -``` - -In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `. - -First `show vgroups` is executed to show the vgroup distribution. - -``` -taos> show vgroups; - vgId | tables | status | onlines | v1_dnode | v1_status | compacting | -========================================================================================== - 14 | 38000 | ready | 1 | 3 | master | 0 | - 15 | 38000 | ready | 1 | 3 | master | 0 | - 16 | 38000 | ready | 1 | 3 | master | 0 | - 17 | 38000 | ready | 1 | 3 | master | 0 | - 18 | 37001 | ready | 1 | 3 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | -Query OK, 8 row(s) in set (0.001314s) -``` - -It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos` - -``` -taos> alter dnode 3 balance "vnode:18-dnode:1"; - -DB error: Balance already enabled (0.00755 -``` - -However, the operation fails with error message show above, which means automatic load balancing has been enabled in the current database so manual load balance can't be performed. - -Shutdown the cluster, configure `balance` parameter in all the dnodes to 0, then restart the cluster, and execute `alter dnode` and `show vgroups` as below. - -``` -taos> alter dnode 3 balance "vnode:18-dnode:1"; -Query OK, 0 row(s) in set (0.000575s) - -taos> show vgroups; - vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting | -================================================================================================================= - 14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | -Query OK, 8 row(s) in set (0.001242s) -``` - -It can be seen from above output that vgId 18 has been moved from dnode 3 to dnode 1. - -:::note - -- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. -- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. -- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. - -::: diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md deleted file mode 100644 index bd718eef9f8dc181628132de831dbca2af59d158..0000000000000000000000000000000000000000 --- a/docs-en/10-cluster/03-ha-and-lb.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -sidebar_label: HA & LB -title: High Availability and Load Balancing ---- - -## High Availability of Vnode - -High availability of vnode and mnode can be achieved through replicas in TDengine. - -A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. - -```sql -CREATE DATABASE demo replica 3; -``` - -The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. - -There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. - -## High Availability of Mnode - -Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. - -There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. - -```sql -SHOW MNODES; -``` - -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. - -For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. - -:::note -If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. - -::: - -## Load Balancing - -Load balancing will be triggered in 3 cases without manual intervention. - -- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. -- When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. -- When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. - -:::tip -Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). - -::: - -## Dnode Offline - -When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: - -- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. - -- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. - -:::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. - -::: - -## Arbitrator - -The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. - -To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. - -Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. - -Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. - -In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. - -Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb". - -```sql -SHOW DNODES; -``` diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md deleted file mode 100644 index 80581b2f1bc7ce9cd046c18873d3f22b6804d8cf..0000000000000000000000000000000000000000 --- a/docs-en/12-taos-sql/02-database.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -sidebar_label: Database -title: Database -description: "create and drop database, show or change database parameters" ---- - -## Create Database - -``` -CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; -``` - -:::info - -1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. -2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. - 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. - 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. -3. The maximum length of database name is 33 bytes. -4. The maximum length of a SQL statement is 65,480 bytes. -5. Below are the parameters that can be used when creating a database - - cache: [Description](/reference/config/#cache) - - blocks: [Description](/reference/config/#blocks) - - days: [Description](/reference/config/#days) - - keep: [Description](/reference/config/#keep) - - minRows: [Description](/reference/config/#minrows) - - maxRows: [Description](/reference/config/#maxrows) - - wal: [Description](/reference/config/#wallevel) - - fsync: [Description](/reference/config/#fsync) - - update: [Description](/reference/config/#update) - - cacheLast: [Description](/reference/config/#cachelast) - - replica: [Description](/reference/config/#replica) - - quorum: [Description](/reference/config/#quorum) - - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - - comp: [Description](/reference/config/#comp) - - precision: [Description](/reference/config/#precision) -6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. - -::: - -## Show Current Configuration - -``` -SHOW VARIABLES; -``` - -## Specify The Database In Use - -``` -USE db_name; -``` - -:::note -This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" - -::: - -## Drop Database - -``` -DROP DATABASE [IF EXISTS] db_name; -``` - -:::note -All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. - -::: - -## Change Database Configuration - -Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). - -``` -ALTER DATABASE db_name COMP 2; -``` - -COMP parameter specifies whether the data is compressed and how the data is compressed. - -``` -ALTER DATABASE db_name REPLICA 2; -``` - -REPLICA parameter specifies the number of replicas of the database. - -``` -ALTER DATABASE db_name KEEP 365; -``` - -KEEP parameter specifies the number of days for which the data will be kept. - -``` -ALTER DATABASE db_name QUORUM 2; -``` - -QUORUM parameter specifies the necessary number of confirmations to determine whether the data is written successfully. - -``` -ALTER DATABASE db_name BLOCKS 100; -``` - -BLOCKS parameter specifies the number of memory blocks used by each VNODE. - -``` -ALTER DATABASE db_name CACHELAST 0; -``` - -CACHELAST parameter specifies whether and how the latest data of a sub table is cached. - -:::tip -The above parameters can be changed using `ALTER DATABASE` command without restarting. For more details of all configuration parameters please refer to [Configuration Parameters](/reference/config/). - -::: - -## Show All Databases - -``` -SHOW DATABASES; -``` - -## Show The Create Statement of A Database - -``` -SHOW CREATE DATABASE db_name; -``` - -This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md deleted file mode 100644 index acfb0de0e1521fd8c6a068497a3df7a17941524c..0000000000000000000000000000000000000000 --- a/docs-en/12-taos-sql/08-interval.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -sidebar_label: Interval -title: Aggregate by Time Window ---- - -Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. - -## Time Window - -The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. - -![TDengine Database Time Window](./timewindow-1.webp) - -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. - -``` -SELECT * FROM temp_tb_1 INTERVAL(1m); -``` - -The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. - -``` -SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); -``` - -When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. - -## Status Window - -In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. - -![TDengine Database Status Window](./timewindow-3.webp) - -`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: - -``` -SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); -``` - -## Session Window - -```sql -SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); -``` - -The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. - -![TDengine Database Session Window](./timewindow-2.webp) - -If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. - -## More On Window Aggregate - -### Syntax - -The full syntax of aggregate by window is as follows: - -```sql -SELECT function_list FROM tb_name - [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - -SELECT function_list FROM stb_name - [WHERE where_condition] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - [GROUP BY tags] -``` - -### Restrictions - -- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. -- `LAST_ROW` can't be used together with window aggregate. -- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. -- `WHERE` clause can be used to specify the starting and ending time and other filter conditions -- `FILL` clause is used to specify how to fill when there is data missing in any window, including: - 1. NONE: No fill (the default fill mode) - 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` - 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` - 4. NULL:Fill with NULL, `FILL(NULL)` - 5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` - 6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` - -:::info - -1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. -2. The result set is in ascending order of timestamp when you aggregate by time window. -3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. - -::: - -Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). - -## Examples - -A table of intelligent meters can be created by the SQL statement below: - -```sql -CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -``` - -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. - -``` -SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters - WHERE ts>=NOW-1d and ts<=now - INTERVAL(10m) - FILL(PREV); -``` diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md deleted file mode 100644 index 7460a5e0ba3ce78ee7744569cda460c477cac19c..0000000000000000000000000000000000000000 --- a/docs-en/12-taos-sql/10-json.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: JSON Type ---- - -## Syntax - -1. Tag of type JSON - - ```sql - create STable s1 (ts timestamp, v1 int) tags (info json); - - create table s1_1 using s1 tags ('{"k1": "v1"}'); - ``` - -2. "->" Operator of JSON - - ```sql - select * from s1 where info->'k1' = 'v1'; - - select info->'k1' from s1; - ``` - -3. "contains" Operator of JSON - - ```sql - select * from s1 where info contains 'k2'; - - select * from s1 where info contains 'k1'; - ``` - -## Applicable Operations - -1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. - - ```sql - select * from s1 where info->'k1' match 'v*'; - - select * from s1 where info->'k1' like 'v%' and info contains 'k2'; - - select * from s1 where info is null; - - select * from s1 where info->'k1' is not null; - ``` - -2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` - -3. `Distinct` can be used with a tag of type JSON - - ```sql - select distinct info->'k1' from s1; - ``` - -4. Tag Operations - - The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - - The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. - -## Other Restrictions - -- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags. - -- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes. - -- JSON format: - - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. - - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - - If one key occurs twice in JSON, only the first one is valid. - - Escape characters are not allowed in JSON. - -- NULL is returned when querying a key that doesn't exist in JSON. - -- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. - -For example, the SQL statements below are not supported. - -```sql; -select jtag->'key' from (select jtag from STable); -select jtag->'key' from (select jtag from STable) where jtag->'key'>0; -``` diff --git a/docs-en/12-taos-sql/12-keywords.md b/docs-en/12-taos-sql/12-keywords.md deleted file mode 100644 index 56a82a02a1fada712141f3572b761e0cd18576c6..0000000000000000000000000000000000000000 --- a/docs-en/12-taos-sql/12-keywords.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Keywords ---- - -There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. - -**Keywords List** - -| | | | | | -| ----------- | ---------- | --------- | ---------- | ------------ | -| ABORT | CREATE | IGNORE | NULL | STAR | -| ACCOUNT | CTIME | IMMEDIATE | OF | STATE | -| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT | -| ADD | DATABASES | IN | OR | STATE_WINDOW | -| AFTER | DAYS | INITIALLY | ORDER | STORAGE | -| ALL | DBS | INSERT | PARTITIONS | STREAM | -| ALTER | DEFERRED | INSTEAD | PASS | STREAMS | -| AND | DELIMITERS | INT | PLUS | STRING | -| AS | DESC | INTEGER | PPS | SYNCDB | -| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE | -| ATTACH | DETACH | INTO | PREV | TABLES | -| BEFORE | DISTINCT | IS | PRIVILEGE | TAG | -| BEGIN | DIVIDE | ISNULL | QTIME | TAGS | -| BETWEEN | DNODE | JOIN | QUERIES | TBNAME | -| BIGINT | DNODES | KEEP | QUERY | TIMES | -| BINARY | DOT | KEY | QUORUM | TIMESTAMP | -| BITAND | DOUBLE | KILL | RAISE | TINYINT | -| BITNOT | DROP | LE | REM | TOPIC | -| BITOR | EACH | LIKE | REPLACE | TOPICS | -| BLOCKS | END | LIMIT | REPLICA | TRIGGER | -| BOOL | EQ | LINEAR | RESET | TSERIES | -| BY | EXISTS | LOCAL | RESTRICT | UMINUS | -| CACHE | EXPLAIN | LP | ROW | UNION | -| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED | -| CASCADE | FILE | LT | RSHIFT | UPDATE | -| CHANGE | FILL | MATCH | SCORES | UPLUS | -| CLUSTER | FLOAT | MAXROWS | SELECT | USE | -| COLON | FOR | MINROWS | SEMI | USER | -| COLUMN | FROM | MINUS | SESSION | USERS | -| COMMA | FSYNC | MNODES | SET | USING | -| COMP | GE | MODIFY | SHOW | VALUES | -| COMPACT | GLOB | MODULES | SLASH | VARIABLE | -| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES | -| CONFLICT | GROUP | NE | SLIMIT | VGROUPS | -| CONNECTION | GT | NONE | SMALLINT | VIEW | -| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | -| CONNS | ID | NOTNULL | STable | WAL | -| COPY | IF | NOW | STableS | WHERE | -| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | -| _WSTOP | _WDURATION | - -## Explanations -### TBNAME -`TBNAME` can be considered as a special tag, which represents the name of the subtable, in STable. - -Get the table name and tag values of all subtables in a STable. -```mysql -SELECT TBNAME, location FROM meters; - -Count the number of subtables in a STable. -```mysql -SELECT COUNT(TBNAME) FROM meters; -``` - -Only filter on TAGS can be used in WHERE clause in the above two query statements. -```mysql -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.SanFrancisco | - d1003 | California.SanFrancisco | - d1002 | California.LosAngeles | - d1001 | California.LosAngeles | -Query OK, 4 row(s) in set (0.000881s) - -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) -``` -### _QSTART/_QSTOP/_QDURATION -The start, stop and duration of a query time window (Since version 2.6.0.0). - -### _WSTART/_WSTOP/_WDURATION -The start, stop and duration of aggegate query by time window, like interval, session window, state window (Since version 2.6.0.0). - -### _c0 -The first column of a table or STable. \ No newline at end of file diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md deleted file mode 100644 index 33656338a7bba38dc55cf536bdba8e95309c5acf..0000000000000000000000000000000000000000 --- a/docs-en/12-taos-sql/index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: TDengine SQL -description: "The syntax supported by TDengine SQL " ---- - -This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. - -TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. - -Syntax Specifications used in this chapter: - -- The content inside <\> needs to be input by the user, excluding <\> itself. -- \[ \] means optional input, excluding [] itself. -- | means one of a few options, excluding | itself. -- … means the item prior to it can be repeated multiple times. - -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: - -```sql -taos> DESCRIBE meters; - Field | Type | Length | Note | -================================================================================= - ts | TIMESTAMP | 8 | | - current | FLOAT | 4 | | - voltage | INT | 4 | | - phase | FLOAT | 4 | | - location | BINARY | 64 | TAG | - groupid | INT | 4 | TAG | -``` - -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx deleted file mode 100644 index 990af861961e9daf4ac775462e21d6d9852d17c1..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: REST API ---- - -To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. - -:::note -One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) -::: - -## Installation - -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. - -## Verification - -If the TDengine server is already installed, it can be verified as follows: - -The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. - -The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. - -```html -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql -``` - -The following return value results indicate that the verification passed. - -```json -{ - "status": "succ", - "head": [ - "name", - "created_time", - "ntables", - "vgroups", - "replica", - "quorum", - "days", - "keep1,keep2,keep(D)", - "cache(MB)", - "blocks", - "minrows", - "maxrows", - "wallevel", - "fsync", - "comp", - "precision", - "status" - ], - "data": [ - [ - "log", - "2020-09-02 17:23:00.039", - 4, - 1, - 1, - 1, - 10, - "30,30,30", - 1, - 3, - 100, - 4096, - 1, - 3000, - 2, - "us", - "ready" - ] - ], - "rows": 1 -} -``` - -## HTTP request URL format - -``` -http://:/rest/sql/[db_name] -``` - -Parameter Description: - -- fqnd: FQDN or IP address of any host in the cluster -- port: httpPort configuration item in the configuration file, default is 6041 -- db_name: Optional parameter that specifies the default database name for the executed SQL command. (supported since version 2.2.0.0) - -For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`. - -TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. - -- The custom authentication information is as follows. More details about "token" later. - - ``` - Authorization: Taosd - ``` - -- Basic authentication information is shown below - - ``` - Authorization: Basic - ``` - -The HTTP request's BODY is a complete SQL command, and the data table in the SQL statement should be provided with a database prefix, e.g., `db_name.tb_name`. If the table name does not have a database prefix and the database name is not specified in the URL, the system will response an error because the HTTP module is a simple forwarder and has no awareness of the current DB. - -Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax. - -```bash -curl -H 'Authorization: Basic ' -d '' :/rest/sql/[db_name] -``` - -Or - -```bash -curl -u username:password -d '' :/rest/sql/[db_name] -``` - -where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`. - -## HTTP Return Format - -The return result is in JSON format, as follows: - -```json -{ - "status": "succ", - "head": ["ts", "current", ...], - "column_meta": [["ts",9,8],["current",6,4], ...], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, ...], - ["2018-10-03 14:38:15.000", 12.6, ...] - ], - "rows": 2 -} -``` - -Description: - -- status: tells you whethre the operation result is success or failure. -- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) -- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. -- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. -- rows: Indicates how many rows of data there are. - -The column types in column_meta are described as follows: - -- 1:BOOL -- 2:TINYINT -- 3:SMALLINT -- 4:INT -- 5:BIGINT -- 6:FLOAT -- 7:DOUBLE -- 8:BINARY -- 9:TIMESTAMP -- 10:NCHAR - -## Custom Authorization Code - -HTTP requests require an authorization code `` for identification purposes. The administrator usually provides the authorization code, and it can be obtained simply by sending an ``HTTP GET`` request as follows: - -```bash -curl http://:/rest/login// -``` - -Where `fqdn` is the FQDN or IP address of the TDengine database. `port` is the port number of the TDengine service. `username` is the database username. `password` is the database password. The return value is in `JSON` format, and the meaning of each field is as follows. - -- status: flag bit of the request result - -- code: return value code - -- desc: authorization code - -Example of getting authorization code. - -```bash -curl http://192.168.0.1:6041/rest/login/root/taosdata -``` - -Response body: - -```json -{ - "status": "succ", - "code": 0, - "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -} -``` - -## For example - -- query all records from table d1001 of database demo - - ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql - ``` - - Response body: - - ```json - { - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], - ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] - ], - "rows": 2 - } - ``` - -- Create database demo: - - ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql - ``` - - Response body: - - ```json - { - "status": "succ", - "head": ["affected_rows"], - "column_meta": [["affected_rows", 4, 4]], - "data": [[1]], - "rows": 1 - } - ``` - -## Other Uses - -### Unix timestamps for result sets - -When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example: - -```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt -``` - -Response body: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - [1538548685000, 10.3, 219, 0.31], - [1538548695000, 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -### UTC format for the result set - -When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example: - -```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc -``` - -Response body: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], - ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -## Important configuration items - -Only some configuration parameters related to the RESTful interface are listed below. Please see the description in the configuration file for other system parameters. - -- The port number of the external RESTful service is bound to 6041 by default (the actual value is serverPort + 11, so it can be changed by modifying the setting of the serverPort parameter). -- httpMaxThreads: the number of threads to start, default is 2 (the default value is rounded down to half of the CPU cores with version 2.0.17.0 and later versions). -- restfulRowLimit: the maximum number of result sets (in JSON format) to return. The default value is 10240. -- httpEnableCompress: whether to support compression, the default is not supported. Currently, TDengine only supports the gzip compression format. -- httpDebugFlag: logging switch, default is 131. 131: error and alarm messages only, 135: debug messages, 143: very detailed debug messages. -- httpDbNameMandatory: users must specify the default database name in the RESTful URL. The default is 0, which turns off this check. If set to 1, users must put a default database name in every RESTful URL. Otherwise, it will return an execution error and reject this SQL statement, regardless of whether the SQL statement executed at this time requires a specified database. - -:::note -If you are using the REST API provided by taosd, you should write the above configuration in taosd's configuration file taos.cfg. If you use the REST API of taosAdapter, you need to refer to taosAdapter [corresponding configuration method](/reference/taosadapter/). -::: diff --git a/docs-en/14-reference/03-connector/_verify_windows.mdx b/docs-en/14-reference/03-connector/_verify_windows.mdx deleted file mode 100644 index c3d6af84d8e8cdf8b75c8efc5bb36955df4884bd..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/_verify_windows.mdx +++ /dev/null @@ -1,14 +0,0 @@ -Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `taos.exe` directly to connect to the TDengine service and enter the TDengine CLI interface, for example, as follows: - -```text - C:\TDengine>taos - Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 - Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | - =================================================================================================================================================================================================================================================================== - test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | - log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | - Query OK, 2 row(s) in set (0.045000s) - taos> -``` diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx deleted file mode 100644 index 8a05f2d841bbcdbab2bdb7471691ca0ae49a4f6b..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/go.mdx +++ /dev/null @@ -1,415 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 4 -sidebar_label: Go -title: TDengine Go Connector ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -import Preparation from "./_preparation.mdx" -import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../07-develop/04-query-data/_go.mdx" - -`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. - -`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. - -This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. - -The source code of `driver-go` is hosted on [GitHub](https://github.com/taosdata/driver-go). - -## Supported Platforms - -Native connections are supported on the same platforms as the TDengine client driver. -REST connections are supported on all platforms that can run Go. - -## Version support - -Please refer to [version support list](/reference/connector#version-support) - -## Supported features - -### Native connections - -A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: - -* Normal queries -* Continuous queries -* Subscriptions -* schemaless interface -* parameter binding interface - -### REST connection - -A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: - -* General queries -* Continuous queries - -## Installation steps - -### Pre-installation - -- Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) -- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps - -Configure the environment variables and check the command. - -* `go env` -* `gcc -v` - -### Use go get to install - -``` -go get -u github.com/taosdata/driver-go/v2@develop -``` - -### Manage with go mod - -1. Initialize the project with the `go mod` command. - - ```text - go mod init taos-demo - ``` - -2. Introduce taosSql - - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" - ) - ``` - -3. Update the dependency packages with `go mod tidy`. - - ```text - go mod tidy - ``` - -4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. - - ```text - go run taos-demo - go build - ``` - -## Create a connection - -### Data source name (DSN) - -Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): - -``` text -[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&... ¶mN=valueN] -``` - -DSN in full form. - -```text -username:password@protocol(address)/dbname?param=value -``` - -### Connecting via connector - - - - -_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. - -Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters. - -* configPath specifies the `taos.cfg` directory - -Example. - -```go -package main - -import ( - "database/sql" - "fmt" - - _ "github.com/taosdata/driver-go/v2/taosSql" -) - -func main() { - var taosUri = "root:taosdata@tcp(localhost:6030)/" - taos, err := sql.Open("taosSql", taosUri) - if err ! = nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } -} -``` - - - - -_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. - -Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN. - -* `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression. -* `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data. - -Example. - -```go -package main - -import ( - "database/sql" - "fmt" - - _ "github.com/taosdata/driver-go/v2/taosRestful" -) - -func main() { - var taosUri = "root:taosdata@http(localhost:6041)/" - taos, err := sql.Open("taosRestful", taosUri) - if err ! = nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } -} -``` - - - -## Usage examples - -### Write data - -#### SQL Write - - - -#### InfluxDB line protocol write - - - -#### OpenTSDB Telnet line protocol write - - - -#### OpenTSDB JSON line protocol write - - - -### Query data - - - -### More sample programs - -* [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go) -* [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). - -## Usage limitations - -Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. - -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. - -The complete example is as follows. - -```go -package main - -import ( - "database/sql" - "fmt" - "time" - - _ "github.com/taosdata/driver-go/v2/taosRestful" -) - -func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) - if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return - } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) - } -} -``` - -## Frequently Asked Questions - -1. Cannot find the package `github.com/taosdata/driver-go/v2/taosRestful` - - Change the `github.com/taosdata/driver-go/v2` line in the require block of the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. - -2. bind interface in database/sql crashes - - REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. - -3. error `[0x217] Database not specified or available` after executing other statements with `use db` statement - - The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. - -4. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` - - Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. - -5. Upgrade `github.com/taosdata/driver-go/v2/taosRestful` - - Change the `github.com/taosdata/driver-go/v2` line in the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. - -6. `readBufferSize` parameter has no significant effect after being increased - - Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. - -7. `disableCompression` parameter is set to `false` when the query efficiency is reduced - - When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. - -8. `go get` command can't get the package, or timeout to get the package - - Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. - -## Common APIs - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - Use This API to open a DB, returning an object of type \*DB. - -:::info -This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. - -::: - -* `func (db *DB) Exec(query string, args . .interface{}) (Result, error)` - - `sql.Open` built-in method to execute non-query related SQL. - -* `func (db *DB) Query(query string, args ... . interface{}) (*Rows, error)` - - `sql.Open` Built-in method to execute query statements. - -### Advanced functions (af) API - -The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. - -#### Connection management - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - This API creates a connection to taosd via cgo. - -* `func (conn *Connector) Close() error` - - Closes the connection. - -#### Subscribe to - -* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` - - Subscribe to data. - -* `func (s *taosSubscriber) Consume() (driver.Rows, error)` - - Consume the subscription data, returning the `Rows` structure of the `database/sql/driver` package. - -* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` - - Unsubscribe from data. - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - Write to influxDB line protocol. - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - Write OpenTDSB telnet protocol data. - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - Writes OpenTSDB JSON protocol data. - -#### parameter binding - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - Parameter bound single row insert. - -* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - - Parameter bound query that returns the `Rows` structure of the `database/sql/driver` package. - -* `func (conn *Connector) InsertStmt() *insertstmt. - - Initialize the parameters. - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - Parameter binding preprocessing SQL statement. - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - Bind the set table name parameter. - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - Parameter binding to set the sub table name. - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - Parameter bind multiple rows of data. - -* `func (stmt *InsertStmt) AddBatch() error` - - Add to a parameter-bound batch. - -* `func (stmt *InsertStmt) Execute() error` - - Execute a parameter binding. - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - Gets the number of affected rows inserted by the parameter binding. - -* `func (stmt *InsertStmt) Close() error` - - Closes the parameter binding. - -## API Reference - -Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v2) diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx deleted file mode 100644 index ff15acf1a9c5dbfd74e6f3101459cfc7bdeda515..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/java.mdx +++ /dev/null @@ -1,845 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 2 -sidebar_label: Java -title: TDengine Java Connector -description: TDengine Java based on JDBC API and provide both native and REST connections ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. - -![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) - -The preceding diagram shows two ways for a Java app to access TDengine via connector: - -- JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). -- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. - -The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. - -:::info -TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: - -- TDengine does not currently support delete operations for individual data records. -- Transactional operations are not currently supported. - -::: - -## Supported platforms - -Native connection supports the same platform as TDengine client-driven support. -REST connection supports all platforms that can run Java. - -## Version support - -Please refer to [Version Support List](/reference/connector#version-support). - -## TDengine DataType vs. Java DataType - -TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: - -| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version > = 2.0.24) | -| ----------------- | ---------------------------------- | ------------------------------------ | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | - -**Note**: Only TAG supports JSON types - -## Installation steps - -### Pre-installation preparation - -Before using Java Connector to connect to the database, the following conditions are required. - -- Java 1.8 or above runtime environment and Maven 3.6 or above installed -- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](/reference/connector#Install-Client-Driver) - -### Install the connectors - - - - -- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) -- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) -- [maven.aliyun](https://maven.aliyun.com/mvn/search) - -Add following dependency in the `pom.xml` file of your Maven project: - -```xml - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.** - -``` - - - - -You can build Java connector from source code after cloning the TDengine project: - -``` -git clone https://github.com/taosdata/taos-connector-jdbc.git -cd taos-connector-jdbc -mvn clean install -Dmaven.test.skip=true -``` - -After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. - - - - -## Establish a connection - -TDengine's JDBC URL specification format is: -`jdbc:[TAOS| TAOS-RS]://[host_name]:[port]/[database_name]? [user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` - -For establishing connections, native connections differ slightly from REST connections. - - - - -```java -Class.forName("com.taosdata.jdbc.TSDBDriver"); -String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; -Connection conn = DriverManager.getConnection(jdbcUrl); -``` - -In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`. - -Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows). - -The configuration parameters in the URL are as follows: - -- user: Log in to the TDengine username. The default value is 'root'. -- password: User login password, the default value is 'taosdata'. -- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS. -- charset: The character set used by the client, the default value is the system character set. -- locale: Client locale, by default, use the system's current locale. -- timezone: The time zone used by the client, the default value is the system's current time zone. -- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large. -- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false. - -For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html). - -**Connect using the TDengine client-driven configuration file ** - -When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below: - -1. Do not specify hostname and port in Java applications. - - ```java - public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; - } - ``` - -2. specify the firstEp and the secondEp in the configuration file taos.cfg - - ```shell - # first fully qualified domain name (FQDN) for TDengine system - firstEp cluster_node1:6030 - - # second fully qualified domain name (FQDN) for TDengine system, for cluster only - secondEp cluster_node2:6030 - - # default system charset - # charset UTF-8 - - # system locale - # locale en_US.UTF-8 - ``` - -In the above example, JDBC uses the client's configuration file to establish a connection to a hostname `cluster_node1`, port 6030, and a database named `test`. When the firstEp node in the cluster fails, JDBC attempts to connect to the cluster using secondEp. - -In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally. - -:::note -The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows. - -::: - - - - -```java -Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); -String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; -Connection conn = DriverManager.getConnection(jdbcUrl); -``` - -In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. - -There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: - -1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". -2. jdbcUrl starting with "jdbc:TAOS-RS://". -3. use 6041 as the connection port. - -The configuration parameters in the URL are as follows. - -- user: Login TDengine user name, default value 'root'. -- password: user login password, default value 'taosdata'. -- batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. -- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. -- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. - -**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection. - -:::note - -- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. - -```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); -``` - -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); - -::: - - - - -### Specify the URL and Properties to get the connection - -In addition to getting the connection from the specified URL, you can use Properties to specify parameters when the connection is established. - -**Note**: - -- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set. -- The following sample code is based on taos-jdbcdriver-2.0.36. - -```java -public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connProps.setProperty("debugFlag", "135"); - connProps.setProperty("maxSQLLength", "1048576"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; -} - -public Connection getRestConn() throws Exception{ - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; -} -``` - -In the above example, a connection is established to `taosdemo.com`, port is 6030/6041, and database named `test`. The connection specifies the user name as `root` and the password as `taosdata` in the URL and specifies the character set, language environment, time zone, and whether to enable bulk fetching in the connProps. - -The configuration parameters in properties are as follows. - -- TSDBDriver.PROPERTY_KEY_USER: Login TDengine user name, default value 'root'. -- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'. -- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false. -- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false. -- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS. -- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. -- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. -- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. - For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). - -### Priority of configuration parameters - -If the configuration parameters are duplicated in the URL, Properties, or client configuration file, the `priority` of the parameters, from highest to lowest, are as follows: - -1. JDBC URL parameters, as described above, can be specified in the parameters of the JDBC URL. -2. Properties connProps -3. the configuration file taos.cfg of the TDengine client driver when using a native connection - -For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. - -## Usage examples - -### Create database and tables - -```java -Statement stmt = conn.createStatement(); - -// create database -stmt.executeUpdate("create database if not exists db"); - -// use database -stmt.executeUpdate("use db"); - -// create table -stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); -``` - -> **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb. - -### Insert data - -```java -// insert data -int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); - -System.out.println("insert " + affectedRows + " rows."); -``` - -> now is an internal function. The default is the current time of the client's computer. -> `now + 1s` represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years). - -### Querying data - -```java -// query data -ResultSet resultSet = stmt.executeQuery("select * from tb"); - -Timestamp ts = null; -int temperature = 0; -float humidity = 0; -while(resultSet.next()){ - - ts = resultSet.getTimestamp(1); - temperature = resultSet.getInt(2); - humidity = resultSet.getFloat("humidity"); - - System.out.printf("%s, %d, %s\n", ts, temperature, humidity); -} -``` - -> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. - -### Handling exceptions - -After an error is reported, the error message and error code can be obtained through SQLException. - -```java -try (Statement statement = connection.createStatement()) { - // executeQuery - ResultSet resultSet = statement.executeQuery(sql); - // print result - printResult(resultSet); -} catch (SQLException e) { - System.out.println("ERROR Message: " + e.getMessage()); - System.out.println("ERROR Code: " + e.getErrorCode()); - e.printStackTrace(); -} -``` - -There are three types of error codes that the JDBC connector can report: - -- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350) -- Error code of the native connection method (error code between 0x2351 and 0x2400) -- Error code of other TDengine function modules - -For specific error codes, please refer to. - -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) -- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) - -### Writing data via parameter binding - -TDengine's native JDBC connection implementation has significantly improved its support for data writing (INSERT) scenarios via bind interface with version 2.1.2.0 and later versions. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. - -**Note**. - -- JDBC REST connections do not currently support bind interface -- The following sample code is based on taos-jdbcdriver-2.0.36 -- The setString method should be called for binary type data, and the setNString method should be called for nchar type data -- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition - -```java -public class ParameterBindingDemo { - - private static final String host = "127.0.0.1"; - private static final Random random = new Random(System.currentTimeMillis()); - private static final int BINARY_COLUMN_SIZE = 20; - private static final String[] schemaList = { - "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", - "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", - "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", - "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", - "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" - }; - private static final int numOfSubTable = 10, numOfRow = 10; - - public static void main(String[] args) throws SQLException { - - String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; - Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); - - init(conn); - - bindInteger(conn); - - bindFloat(conn); - - bindBoolean(conn); - - bindBytes(conn); - - bindString(conn); - - conn.close(); - } - - private static void init(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_parabind"); - stmt.execute("create database if not exists test_parabind"); - stmt.execute("use test_parabind"); - for (int i = 0; i < schemaList.length; i++) { - stmt.execute(schemaList[i]); - } - } - } - - private static void bindInteger(Connection conn) throws SQLException { - String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t1_" + i); - // set tags - pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); - pstmt.setTagLong(3, random.nextLong()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); - pstmt.setByte(1, f1List); - - ArrayList f2List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); - pstmt.setShort(2, f2List); - - ArrayList f3List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f3List.add(random.nextInt(Integer.MAX_VALUE)); - pstmt.setInt(3, f3List); - - ArrayList f4List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f4List.add(random.nextLong()); - pstmt.setLong(4, f4List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute column - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindFloat(Connection conn) throws SQLException { - String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; - - TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t2_" + i); - // set tags - pstmt.setTagFloat(0, random.nextFloat()); - pstmt.setTagDouble(1, random.nextDouble()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextFloat()); - pstmt.setFloat(1, f1List); - - ArrayList f2List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f2List.add(random.nextDouble()); - pstmt.setDouble(2, f2List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - // close if no try-with-catch statement is used - pstmt.close(); - } - - private static void bindBoolean(Connection conn) throws SQLException { - String sql = "insert into ? using stable3 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t3_" + i); - // set tags - pstmt.setTagBoolean(0, random.nextBoolean()); - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - f1List.add(random.nextBoolean()); - pstmt.setBoolean(1, f1List); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindBytes(Connection conn) throws SQLException { - String sql = "insert into ? using stable4 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t4_" + i); - // set tags - pstmt.setTagString(0, new String("abc")); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add(new String("abc")); - } - pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } - - private static void bindString(Connection conn) throws SQLException { - String sql = "insert into ? using stable5 tags(?) values(?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("t5_" + i); - // set tags - pstmt.setTagNString(0, "California-abc"); - - // set columns - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - ArrayList f1List = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) { - f1List.add("California-abc"); - } - pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); - - // add column - pstmt.columnDataAddBatch(); - } - // execute - pstmt.columnDataExecuteBatch(); - } - } -} -``` - -The methods to set TAGS values: - -```java -public void setTagNull(int index, int type) -public void setTagBoolean(int index, boolean value) -public void setTagInt(int index, int value) -public void setTagByte(int index, byte value) -public void setTagShort(int index, short value) -public void setTagLong(int index, long value) -public void setTagTimestamp(int index, long value) -public void setTagFloat(int index, float value) -public void setTagDouble(int index, double value) -public void setTagString(int index, String value) -public void setTagNString(int index, String value) -``` - -The methods to set VALUES columns: - -```java -public void setInt(int columnIndex, ArrayList list) throws SQLException -public void setFloat(int columnIndex, ArrayList list) throws SQLException -public void setTimestamp(int columnIndex, ArrayList list) throws SQLException -public void setLong(int columnIndex, ArrayList list) throws SQLException -public void setDouble(int columnIndex, ArrayList list) throws SQLException -public void setBoolean(int columnIndex, ArrayList list) throws SQLException -public void setByte(int columnIndex, ArrayList list) throws SQLException -public void setShort(int columnIndex, ArrayList list) throws SQLException -public void setString(int columnIndex, ArrayList list, int size) throws SQLException -public void setNString(int columnIndex, ArrayList list, int size) throws SQLException -``` - -### Schemaless Writing - -Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. - -**Note**. - -- JDBC REST connections do not currently support schemaless writes -- The following sample code is based on taos-jdbcdriver-2.0.36 - -```java -public class SchemalessInsertTest { - private static final String host = "127.0.0.1"; - private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; - private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; - - public static void main(String[] args) throws SQLException { - final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; - try (Connection connection = DriverManager.getConnection(url)) { - init(connection); - - SchemalessWriter writer = new SchemalessWriter(connection); - writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); - writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); - writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); - } - } - - private static void init(Connection connection) throws SQLException { - try (Statement stmt = connection.createStatement()) { - stmt.executeUpdate("drop database if exists test_schemaless"); - stmt.executeUpdate("create database if not exists test_schemaless"); - stmt.executeUpdate("use test_schemaless"); - } - } -} -``` - -### Subscriptions - -The TDengine Java Connector supports subscription functionality with the following application API. - -#### Create subscriptions - -```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); -``` - -The three parameters of the `subscribe()` method have the following meanings. - -- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. -- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. -- restart: if the subscription already exists, whether to restart or continue the previous subscription - -The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. - -#### Subscribe to consume data - -```java -int total = 0; -while(true) { - TSDBResultSet rs = sub.consume(); - int count = 0; - while(rs.next()) { - count++; - } - total += count; - System.out.printf("%d rows consumed, total %d\n", count, total); - Thread.sleep(1000); -} -``` - -The `consume()` method returns a result set containing all new data from the last `consume()`. Be sure to choose a reasonable frequency for calling `consume()` as needed (e.g. `Thread.sleep(1000)` in the example). Otherwise, it will cause unnecessary stress on the server-side. - -#### Close subscriptions - -```java -sub.close(true); -``` - -The `close()` method closes a subscription. If its argument is `true` it means that the subscription progress information is retained, and the subscription with the same name can be created to continue consuming data; if it is `false` it does not retain the subscription progress. - -### Closing resources - -```java -resultSet.close(); -stmt.close(); -conn.close(); -``` - -> **Be sure to close the connection**, otherwise, there will be a connection leak. - -### Use with connection pool - -#### HikariCP - -Example usage is as follows. - -```java - public static void main(String[] args) throws SQLException { - HikariConfig config = new HikariConfig(); - // jdbc properties - config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); - config.setUsername("root"); - config.setPassword("taosdata"); - // connection pool configurations - config.setMinimumIdle(10); //minimum number of idle connection - config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool - config.setMaxLifetime(0); // maximum life time for each connection - config.setIdleTimeout(0); // max idle time for recycle idle connection - config.setConnectionTestQuery("select server_status()"); //validation query - - HikariDataSource ds = new HikariDataSource(config); //create datasource - - Connection connection = ds.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - - //query or insert - // ... - - connection.close(); // put back to connection pool -} -``` - -> getConnection(), you need to call the close() method after you finish using it. It doesn't close the connection. It just puts it back into the connection pool. -> For more questions about using HikariCP, please see the [official instructions](https://github.com/brettwooldridge/HikariCP). - -#### Druid - -Example usage is as follows. - -```java -public static void main(String[] args) throws Exception { - - DruidDataSource dataSource = new DruidDataSource(); - // jdbc properties - dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); - dataSource.setUrl(url); - dataSource.setUsername("root"); - dataSource.setPassword("taosdata"); - // pool configurations - dataSource.setInitialSize(10); - dataSource.setMinIdle(10); - dataSource.setMaxActive(10); - dataSource.setMaxWait(30000); - dataSource.setValidationQuery("select server_status()"); - - Connection connection = dataSource.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - //query or insert - // ... - - connection.close(); // put back to connection pool -} -``` - -> For more questions about using druid, please see [Official Instructions](https://github.com/alibaba/druid). - -**Caution:** - -- TDengine `v1.6.4.1` provides a special function `select server_status()` for heartbeat detection, so it is recommended to use `select server_status()` for Validation Query when using connection pooling. - -As you can see below, `select server_status()` returns `1` on successful execution. - -```sql -taos> select server_status(); -server_status()| -================ -1 | -Query OK, 1 row(s) in set (0.000141s) -``` - -### More sample programs - -The source code of the sample application is under `TDengine/examples/JDBC`: - -- JDBCDemo: JDBC sample source code. -- JDBCConnectorChecker: JDBC installation checker source and jar package. -- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc. -- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate. -- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis. - -Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) - -## Recent update logs - -| taos-jdbcdriver version | major changes | -| :---------------------: | :------------------------------------------: | -| 2.0.38 | JDBC REST connections add bulk pull function | -| 2.0.37 | Added support for json tags | -| 2.0.36 | Add support for schemaless writing | - -## Frequently Asked Questions - -1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`? - - **Cause**: In TDengine's JDBC implementation, SQL statements submitted by `addBatch()` method are executed sequentially in the order they are added, which does not reduce the number of interactions with the server and does not bring performance improvement. - - **Solution**: 1. splice multiple values in a single insert statement; 2. use multi-threaded concurrent insertion; 3. use parameter-bound writing - -2. java.lang.UnsatisfiedLinkError: no taos in java.library.path - - **Cause**: The program did not find the dependent native library `taos`. - - **Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work. - -3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on an IA 32-bit platform - - **Cause**: Currently, TDengine only supports 64-bit JDK. - - **Solution**: Reinstall the 64-bit JDK. 4. - -For other questions, please refer to [FAQ](/train-faq/faq) - -## API Reference - -[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) diff --git a/docs-en/14-reference/03-connector/php.mdx b/docs-en/14-reference/03-connector/php.mdx deleted file mode 100644 index 839a5c8c3cd27f39b234b51aab4d41ad05e93fbc..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/php.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: PHP -title: PHP Connector ---- - -`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. - -PHP Connector relies on TDengine client driver. - -Project Repository: - -After TDengine client or server is installed, `taos.h` is located at: - -- Linux:`/usr/local/taos/include` -- Windows:`C:\TDengine\include` - -TDengine client driver is located at: - -- Linux: `/usr/local/taos/driver/libtaos.so` -- Windows: `C:\TDengine\taos.dll` - -## Supported Platforms - -- Windows、Linux、MacOS - -- PHP >= 7.4 - -- TDengine >= 2.0 - -- Swoole >= 4.8 (Optional) - -## Supported Versions - -Because the version of TDengine client driver is tightly associated with that of TDengine server, it's strongly suggested to use the client driver of same version as TDengine server, even though the client driver can work with TDengine server if the first 3 sections of the versions are same. - -## Installation - -### Install TDengine Client Driver - -Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps) - -### Install php-tdengine - -**Download Source Code Package and Unzip:** - -```shell -curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ -&& mkdir php-tdengine \ -&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 -``` - -> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). - -**Non-Swoole Environment:** - -```shell -phpize && ./configure && make -j && make install -``` - -**Specify TDengine location:** - -```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install -``` - -> `--with-tdengine-dir=` is followed by TDengine location. -> It's useful in case TDengine installatio location can't be found automatically or MacOS. - -**Swoole Environment:** - -```shell -phpize && ./configure --enable-swoole && make -j && make install -``` - -**Enable Extension:** - -Option One: Add `extension=tdengine` in `php.ini`. - -Option Two: Use CLI `php -dextension=tdengine test.php`. - -## Sample Programs - -In this section a few sample programs which use TDengine PHP connector to access TDengine cluster are demonstrated. - -> Any error would throw exception: `TDengine\Exception\TDengineException` - -### Establish Conection - -
-Establish Connection - -```c -{{#include docs-examples/php/connect.php}} -``` - -
- -### Insert Data - -
-Insert Data - -```c -{{#include docs-examples/php/insert.php}} -``` - -
- -### Synchronous Query - -
-Synchronous Query - -```c -{{#include docs-examples/php/query.php}} -``` - -
- -### Parameter Binding - -
-Parameter Binding - -```c -{{#include docs-examples/php/insert_stmt.php}} -``` - -
- -## Constants - -| Constant | Description | -| ----------------------------------- | ----------- | -| `TDengine\TSDB_DATA_TYPE_NULL` | null | -| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | -| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | -| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | -| `TDengine\TSDB_DATA_TYPE_INT` | int | -| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | -| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | -| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | -| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | -| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | -| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | -| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | -| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | -| `TDengine\TSDB_DATA_TYPE_UINT` | uint | -| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx deleted file mode 100644 index 58b94f13ae0f08404cef328834ef1c925c307816..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/python.mdx +++ /dev/null @@ -1,345 +0,0 @@ ---- -sidebar_position: 3 -sidebar_label: Python -title: TDengine Python Connector -description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -`taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. -In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). - -The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". - -The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). - -## Supported Platforms - -- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. -- REST connections are supported on all platforms that can run Python. - -## Version selection - -We recommend using the latest version of `taospy`, regardless of the version of TDengine. - -## Supported features - -- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. -- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). - -## Installation - -### Preparation - -1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. -2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. - -If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. - -### Install via pip - -#### Uninstalling an older version - -If you have installed an older version of the Python Connector, please uninstall it beforehand. - -``` -pip3 uninstall taos taospy -``` - -:::note -Earlier TDengine client software includes the Python connector. If the Python connector is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. - -::: - -#### To install `taospy` - - - - -Install the latest version of: - -``` -pip3 install taospy -``` - -You can also specify a specific version to install: - -``` -pip3 install taospy==2.3.0 -``` - - - - -``` -pip3 install git+https://github.com/taosdata/taos-connector-python.git -``` - - - - -### Installation verification - - - - -For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. - -```python -import taos -``` - - - - -For REST connections, verifying that the `taosrest` module can be imported successfully can be done in the Python Interactive Shell by typing. - -```python -import taosrest -``` - - - - -:::tip -If you have multiple versions of Python on your system, you may have various `pip` commands. Be sure to use the correct path for the `pip` command. Above, we installed the `pip3` command, which rules out the possibility of using the `pip` corresponding to Python 2.x versions. However, if you have more than one version of Python 3.x on your system, you still need to check that the installation path is correct. The easiest way to verify this is to type `pip3 install taospy` again in the command, and it will print out the exact location of `taospy`, for example, on Windows. - -``` -C:\> pip3 install taospy -Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple -Requirement already satisfied: taospy in c:\users\username\appdata\local\programs\python\python310\lib\site-packages (2.3.0) - -::: - -## Establish connection - -### Connectivity testing - -Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. - - - - -Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command. - -``` -ping -``` - -Then test if the cluster can be appropriately connected with TDengine CLI: - -``` -taos -h -p -``` - -The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the serverPort corresponding to this dnode. - - - - -For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. - -``` -curl -u root:taosdata http://:/rest/sql -d "select server_version()" -``` - -The FQDN above is the FQDN of the machine running taosAdapter, PORT is the port taosAdapter listening, default is `6041`. -If the test is successful, it will output the server version information, e.g. - -```json -{ - "status": "succ", - "head": ["server_version()"], - "column_meta": [["server_version()", 8, 8]], - "data": [["2.4.0.16"]], - "rows": 1 -} -``` - - - - -### Using connectors to establish connections - -The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort. - - - - -```python -{{#include docs-examples/python/connect_native_reference.py}} -``` - -All arguments of the `connect()` function are optional keyword arguments. The following are the connection parameters specified. - -- `host` : The FQDN of the node to connect to. There is no default value. If this parameter is not provided, the firstEP in the client configuration file will be connected. -- `user` : The TDengine user name. The default value is `root`. -- `password` : TDengine user password. The default value is `taosdata`. -- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided. -- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux systems. -- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone. - -:::warning -`config` and `timezone` are both process-level configurations. We recommend that all connections made by a process use the same parameter values. Otherwise, unpredictable errors may occur. -::: - -:::tip -The `connect()` function returns a `taos.TaosConnection` instance. In client-side multi-threaded scenarios, we recommend that each thread request a separate connection instance rather than sharing a connection between multiple threads. - -::: - - - - -```python -{{#include docs-examples/python/connect_rest_examples.py:connect}} -``` - -All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified. - -- `url`: The URL of taosAdapter REST service. The default is . -- `user`: TDengine user name. The default is `root`. -- `password`: TDengine user password. The default is `taosdata`. -- `timeout`: HTTP request timeout in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. - - - - -## Sample program - -### Basic Usage - - - - -##### TaosConnection class - -The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods). - -```python title="execute method" -{{#include docs-examples/python/connection_usage_native_reference.py:insert}} -``` - -```python title="query method" -{{#include docs-examples/python/connection_usage_native_reference.py:query}} -``` - -:::tip -The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list. -::: - -##### Use of TaosResult class - -In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data. - -```python title="blocks_iter method" -{{#include docs-examples/python/result_set_examples.py}} -``` -##### Use of the TaosCursor class - -The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class. - -```python title="Use of TaosCursor" -{{#include docs-examples/python/cursor_usage_native_reference.py}} -``` - -:::note -The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results. - -::: - - - - -##### Use of TaosRestCursor class - -The ``TaosRestCursor`` class is an implementation of the PEP249 Cursor interface. - -```python title="Use of TaosRestCursor" -{{#include docs-examples/python/connect_rest_examples.py:basic}} -``` -- `cursor.execute` : Used to execute arbitrary SQL statements. -- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. -- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. - -##### Use of the RestClient class - -The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. - -```python title="Use of RestClient" -{{#include docs-examples/python/rest_client_example.py}} -``` - -For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). - - - - -### Used with pandas - - - - -```python -{{#include docs-examples/python/conn_native_pandas.py}} -``` - - - - -```python -{{#include docs-examples/python/conn_rest_pandas.py}} -``` - - - - -### Other sample programs - -| Example program links | Example program content | -| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- | -| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once | -| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py -| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing | -| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags | -| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | Asynchronous subscription | -| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | synchronous-subscribe | - -## Other notes - -### Exception handling - -All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: - -```python -{{#include docs-examples/python/handle_exception.py}} -``` - -### About nanoseconds - -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. - -1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds -2. https://www.python.org/dev/peps/pep-0564/ - - -## Frequently Asked Questions - -Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). - -## Important Update - -| Connector version | Important Update | Release date | -| ---------- | --------------------------------------------------------------------------------- | ---------- | -| 2.3.1 | 1. support TDengine REST API
2. remove support for Python version below 3.6 | 2022-04-28 | -| 2.2.5 | support timezone option when connect | 2022-04-13 | -| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | - -[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases) - -## API Reference - -- [taos](https://docs.taosdata.com/api/taospy/taos/) -- [taosrest](https://docs.taosdata.com/api/taospy/taosrest) diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx deleted file mode 100644 index a5cbaeac8077cda42690d9cc232062a685a51f41..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/03-connector/rust.mdx +++ /dev/null @@ -1,384 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 5 -sidebar_label: Rust -title: TDengine Rust Connector ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -import Preparation from "./_preparation.mdx" -import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" -import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" -import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" -import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" -import RustQuery from "../../07-develop/04-query-data/_rust.mdx" - -`libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data. - -`libtaos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **REST connection**, which connects to TDengine instances via taosAdapter's REST interface. - -The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/libtaos-rs). - -## Supported platforms - -The platforms supported by native connections are the same as those supported by the TDengine client driver. -REST connections are supported on all platforms that can run Rust. - -## Version support - -Please refer to [version support list](/reference/connector#version-support). - -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. - -## Installation - -### Pre-installation -* Install the Rust development toolchain -* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) - -### Adding libtaos dependencies - -Add the [libtaos][libtaos] dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected. - - - - -Add [libtaos][libtaos] to the `Cargo.toml` file. - -```toml -[dependencies] -# use default feature -libtaos = "*" -``` - - - - -Add [libtaos][libtaos] to the `Cargo.toml` file and enable the `rest` feature. - -```toml -[dependencies] -# use rest feature -libtaos = { version = "*", features = ["rest"]} -``` - - - - - -### Using connection pools - -Please enable the `r2d2` feature in `Cargo.toml`. - -```toml -[dependencies] -# with taosc -libtaos = { version = "*", features = ["r2d2"] } -# or rest -libtaos = { version = "*", features = ["rest", "r2d2"] } -``` - -## Create a connection - -The [TaosCfgBuilder] provides the user with an API in the form of a constructor for the subsequent creation of connections or use of connection pools. - -```rust -let cfg: TaosCfg = TaosCfgBuilder::default() - .ip("127.0.0.1") - .user("root") - .pass("taosdata") - .db("log") // do not set if not require a default database. - .port(6030u16) - .build() - .expect("TaosCfg builder error"); -} -``` - -You can now use this object to create the connection. - -```rust -let conn = cfg.connect()? ; -``` - -The connection object can create more than one. - -```rust -let conn = cfg.connect()? ; -let conn2 = cfg.connect()? ; -``` - -You can use connection pools in applications. - -```rust -let pool = r2d2::Pool::builder() - .max_size(10000) // max connections - .build(cfg)? ; - -// ... -// Use pool to get connection -let conn = pool.get()? ; -``` - -After that, you can perform the following operations on the database. - -```rust -async fn demo() -> Result<(), Error> { - // get connection ... - - // create database - conn.exec("create database if not exists demo").await? - // change database context - conn.exec("use demo").await? - // create table - conn.exec("create table if not exists tb1 (ts timestamp, v int)").await? - // insert - conn.exec("insert into tb1 values(now, 1)").await? - // query - let rows = conn.query("select * from tb1").await? - for row in rows.rows { - println!("{}", row.into_iter().join(",")); - } -} -``` - -## Usage examples - -### Write data - -#### SQL Write - - - -#### InfluxDB line protocol write - - - -#### OpenTSDB Telnet line protocol write - - - -#### OpenTSDB JSON line protocol write - - - -### Query data - - - -### More sample programs - -| Program Path | Program Description | -| -------------- | ----------------------------------------------------------------------------- | -| [demo.rs] | Basic API Usage Examples | -| [bailongma-rs] | Using TDengine as the Prometheus remote storage API adapter for the storage backend, using the r2d2 connection pool | - -## API Reference - -### Connection constructor API - -The [Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) constructor pattern is Rust's solution for handling complex data types or optional configuration types. The [libtaos] implementation uses the connection constructor [TaosCfgBuilder] as the entry point for the TDengine Rust connector. The [TaosCfgBuilder] provides optional configuration of servers, ports, databases, usernames, passwords, etc. - -Using the `default()` method, you can construct a [TaosCfg] with default parameters for subsequent connections to the database or establishing connection pools. - -```rust -let cfg = TaosCfgBuilder::default().build()? ; -``` - -Using the constructor pattern, the user can set on-demand. - -```rust -let cfg = TaosCfgBuilder::default() - .ip("127.0.0.1") - .user("root") - .pass("taosdata") - .db("log") - .port(6030u16) - .build()? ; -``` - -Create TDengine connection using [TaosCfg] object. - -```rust -let conn: Taos = cfg.connect(); -``` - -### Connection pooling - -In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. - -As follows, a connection pool with default parameters can be generated. - -```rust -let pool = r2d2::Pool::new(cfg)? ; -``` - -You can set the same connection pool parameters using the connection pool's constructor. - -```rust - use std::time::Duration; - let pool = r2d2::Pool::builder() - .max_size(5000) // max connections - .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection - .min_idle(Some(1000)) // minimal idle connections - .connection_timeout(Duration::from_minutes(2)) - .build(cfg); -``` - -In the application code, use `pool.get()? ` to get a connection object [Taos]. - -```rust -let taos = pool.get()? ; -``` - -The [Taos] structure is the connection manager in [libtaos] and provides two main APIs. - -1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc. - - ```rust - taos.exec().await? - ``` - -2. `query`: Execute the query statement and return the [TaosQueryData] object. - - ```rust - let q = taos.query("select * from log.logs").await? - ``` - - The [TaosQueryData] object stores the query result data and basic information about the returned columns (column name, type, length). - - Column information is stored using [ColumnMeta]. - - ``rust - let cols = &q.column_meta; - for col in cols { - println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes); - } - ``` - - It fetches data line by line. - - ```rust - for (i, row) in q.rows.iter().enumerate() { - for (j, cell) in row.iter().enumerate() { - println!("cell({}, {}) data: {}", i, j, cell); - } - } - ``` - -Note that Rust asynchronous functions and an asynchronous runtime are required. - -[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. - -- `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. -- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. -- `.use_database(database: &str)`: Executes the `USE` statement. - -In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. - -### Bind Interface - -Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. - -```rust -let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; -``` - -The bind object provides a set of interfaces for implementing parameter binding. - -##### `.set_tbname(tbname: impl ToCString)` - -To bind table names. - -##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)` - -Bind sub-table table names and tag values when the SQL statement uses a super table. - -```rust -let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)") ? ; -// tags can be created with any supported type, here is an example using JSON -let v = Field::Json(serde_json::from_str("{\"tag1\":\"one, two, three, four, five, six, seven, eight, nine, ten\"}").unwrap()); -stmt.set_tbname_tags("tb0", [&tag])? ; -``` - -##### `.bind(params: impl IntoParams)` - -Bind value types. Use the [Field] structure to construct the desired type and bind. - -```rust -let ts = Field::Timestamp(Timestamp::now()); -let value = Field::Float(0.0); -stmt.bind(vec![ts, value].iter())? ; -``` - -##### `.execute()` - -Execute SQL.[Stmt] objects can be reused, re-binded, and executed after execution. - -```rust -stmt.execute()? ; - -// next bind cycle. -// stmt.set_tbname()? ; -//stmt.bind()? ; -//stmt.execute()? ; -``` - -### Line protocol interface - -The line protocol interface supports multiple modes and different precision and requires the introduction of constants in the schemaless module to set. - -```rust -use libtaos::*; -use libtaos::schemaless::*; -``` - -- InfluxDB line protocol - - ```rust - let lines = [ - "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000" - "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000" - ]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)? ; - ``` - -- OpenTSDB Telnet Protocol - - ```rust - let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ; - ``` - -- OpenTSDB JSON protocol - - ```rust - let lines = [r#" - { - "metric": "st", - "timestamp": 1626006833, - "value": 10, - "tags": { - "t1": true, - "t2": false, - "t3": 10, - "t4": "123_abc_.! @#$%^&*:;,. /? |+-=()[]{}<>" - } - }"#]; - taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ; - ``` - -Please move to the Rust documentation hosting page for other related structure API usage instructions: . - -[libtaos]: https://github.com/taosdata/libtaos-rs -[tdengine]: https://github.com/taosdata/TDengine -[bailongma-rs]: https://github.com/taosdata/bailongma-rs -[r2d2]: https://crates.io/crates/r2d2 -[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs -[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html -[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html -[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html -[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html -[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html -[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md deleted file mode 100644 index 3264124655e7040e1d94b43500a0b582d95cb5a1..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/04-taosadapter.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -title: "taosAdapter" -description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine." -sidebar_label: "taosAdapter" ---- - -import Prometheus from "./_prometheus.mdx" -import CollectD from "./_collectd.mdx" -import StatsD from "./_statsd.mdx" -import Icinga2 from "./_icinga2.mdx" -import TCollector from "./_tcollector.mdx" - -taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface that allows InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine. - -taosAdapter provides the following features. - -- RESTful interface -- InfluxDB v1 compliant write interface -- OpenTSDB JSON and telnet format writes compatible -- Seamless connection to Telegraf -- Seamless connection to collectd -- Seamless connection to StatsD -- Supports Prometheus remote_read and remote_write - -## taosAdapter architecture diagram - -![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) - -## taosAdapter Deployment Method - -### Install taosAdapter - -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. - -### Start/Stop taosAdapter - -On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. - -### Remove taosAdapter - -Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. - -### Upgrade taosAdapter - -taosAdapter and TDengine server need to use the same version. Please upgrade the taosAdapter by upgrading the TDengine server. -You need to upgrade the taosAdapter deployed separately from TDengine server by upgrading the TDengine server on the deployed server. - -## taosAdapter parameter list - -taosAdapter is configurable via command-line arguments, environment variables and configuration files. The default configuration file is /etc/taos/taosadapter.toml on Linux. - -Command-line arguments take precedence over environment variables over configuration files. The command-line usage is arg=val, e.g., taosadapter -p=30000 --debug=true. The detailed list is as follows: - -```shell -Usage of taosAdapter: - --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") - --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) - --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") - --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) - --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") - --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) - -c, --config string config path default /etc/taos/taosadapter.toml - --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) - --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" - --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" - --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" - --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" - --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" - --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" - --help Print this help message and exit - --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) - --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") - --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) - --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") - --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) - --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") - --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) - --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" - --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) - --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) - --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") - --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) - --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" - --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" - --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") - --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" - --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) - --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" - --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" - --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" - --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) - --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" - --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") - --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) - --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) - --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") - --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) - --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) - --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" - --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) - --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") - --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) - --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" - --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") - --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) - --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) - --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) - -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) - --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) - --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" - --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" - --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" - --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) - --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") - --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) - --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) - --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) - --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) - --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) - --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) - --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) - --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") - --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) - --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") - --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" - --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") - --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) - --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" - --version Print the version and exit -``` - -Note: -Please set the following Cross-Origin Resource Sharing (CORS) parameters according to the actual situation when using a browser for interface calls. - -```text -AllowAllOrigins -AllowOrigins -AllowHeaders -ExposeHeaders -AllowCredentials -AllowWebSockets -``` - -You do not need to care about these configurations if you do not make interface calls through the browser. - -For details on the CORS protocol, please refer to: [https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) or [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS). - -See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml) for sample configuration files. - -## Feature List - -- Compatible with RESTful interfaces [REST API](/reference/rest-api/) -- Compatible with InfluxDB v1 write interface - [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) -- Compatible with OpenTSDB JSON and telnet format writes - - - - -- Seamless connection to collectd - collectd is a system statistics collection daemon, please visit [https://collectd.org/](https://collectd.org/) for more information. -- Seamless connection with StatsD - StatsD is a simple yet powerful daemon for aggregating statistical information. Please visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information. -- Seamless connection with icinga2 - icinga2 is a software that collects inspection result metrics and performance data. Please visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information. -- Seamless connection to TCollector - TCollector is a client process that collects data from a local collector and pushes the data to OpenTSDB. Please visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information. -- Seamless connection to node_exporter - node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information. -- Support for Prometheus remote_read and remote_write - remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information. - -## Interfaces - -### TDengine RESTful interface - -You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://:6041/`. See the [official documentation](/reference/connector#restful) for details. The following EndPoint is supported. - -```text -/rest/sql -/rest/sqlt -/rest/sqlutc -``` - -### InfluxDB - -You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: - -```text -/influxdb/v1/write -``` - -Support InfluxDB query parameters as follows. - -- `db` Specifies the database name used by TDengine -- `precision` The time precision used by TDengine -- `u` TDengine user name -- `p` TDengine password - -Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported. - -### OpenTSDB - -You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. - -```text -/opentsdb/v1/put/json/:db -/opentsdb/v1/put/telnet/:db -``` - -### collectd - - - -### StatsD - - - -### icinga2 OpenTSDB writer - - - -### TCollector - - - -### node_exporter - -node_export is an exporter of hardware and OS metrics exposed by the \*NIX kernel used by Prometheus - -- Enable the taosAdapter configuration `node_exporter.enable` -- Set the configuration of the node_exporter -- Restart taosAdapter - -### Prometheus - - - -## Memory usage optimization methods - -taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. - -- pauseQueryMemoryThreshold -- pauseAllMemoryThreshold - -Stops processing query requests when the `pauseQueryMemoryThreshold` threshold is exceeded. - -HTTP response content. - -- code 503 -- body "query memory exceeds threshold" - -Stops processing all write and query requests when the `pauseAllMemoryThreshold` threshold is exceeded. - -HTTP response: code 503 - -- code 503 -- body "memory exceeds threshold" - -Resume the corresponding function when the memory falls back below the threshold. - -Status check interface `http://:6041/-/ping` - -- Normal returns `code 200` -- No parameter If memory exceeds pauseAllMemoryThreshold returns `code 503` -- Request parameter `action=query` returns `code 503` if memory exceeds `pauseQueryMemoryThreshold` or `pauseAllMemoryThreshold` - -Corresponding configuration parameter - -``text - monitor.collectDuration monitoring interval environment variable `TAOS_MONITOR_COLLECT_DURATION` (default value 3s) - monitor.incgroup whether to run in cgroup (set to true for running in container) environment variable `TAOS_MONITOR_INCGROUP` - monitor.pauseAllMemoryThreshold memory threshold for no more inserts and queries environment variable `TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD` (default 80) - monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) -``` - -You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. - -## taosAdapter Monitoring Metrics - -taosAdapter collects HTTP-related metrics, CPU percentage, and memory percentage. - -### HTTP interface - -Provides an interface conforming to [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md). - -```text -http://:6041/metrics -``` - -### Write to TDengine - -taosAdapter supports writing the metrics of HTTP monitoring, CPU percentage, and memory percentage to TDengine. - -For configuration parameters - -| **Configuration items** | **Description** | **Default values** | -| ----------------------- | --------------------------------------------------------- | ---------- | -| monitor.collectDuration | CPU and memory collection interval | 3s | -| monitor.identity | The current taosadapter identifier will be used if not set to `hostname:port` | | -| monitor.incgroup | whether it is running in a cgroup (set to true for running in a container) | false | -| monitor.writeToTD | Whether to write to TDengine | true | -| monitor.user | TDengine connection username | root | -| monitor.password | TDengine connection password | taosdata | -| monitor.writeInterval | Write to TDengine interval | 30s | - -## Limit the number of results returned - -taosAdapter controls the number of results returned by the parameter `restfulRowLimit`, -1 means no limit, default is no limit. - -This parameter controls the number of results returned by the following interfaces: - -- `http://:6041/rest/sql` -- `http://:6041/rest/sqlt` -- `http://:6041/rest/sqlutc` -- ` http://:6041/prometheus/v1/remote_read/:db` - -## Troubleshooting - -You can check the taosAdapter running status with the `systemctl status taosadapter` command. - -You can also adjust the level of the taosAdapter log output by setting the `--logLevel` parameter or the environment variable `TAOS_ADAPTER_LOG_LEVEL`. Valid values are: panic, fatal, error, warn, warning, info, debug and trace. - -## How to migrate from older TDengine versions to taosAdapter - -In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. - -| **#** | **embedded httpd** | **taosAdapter** | **comment** | -| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | -| 1 | httpEnableRecordSql | --logLevel=debug | | -| 2 | httpMaxThreads | n/a | taosAdapter Automatically manages thread pools without this parameter | -| 3 | telegrafUseFieldNum | See the taosAdapter telegraf configuration method | | -| 4 | restfulRowLimit | restfulRowLimit | Embedded httpd outputs 10240 rows of data by default, the maximum allowed is 102400. taosAdapter also provides restfulRowLimit but it is not limited by default. You can configure it according to the actual scenario. -| 5 | httpDebugFlag | Not applicable | httpdDebugFlag does not work for taosAdapter | -| 6 | httpDBNameMandatory | N/A | taosAdapter requires the database name to be specified in the URL | diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md deleted file mode 100644 index 7cf1f95eb116b5f87b3bc1e05b647b9b0da3c544..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/05-taosbenchmark.md +++ /dev/null @@ -1,434 +0,0 @@ ---- -title: taosBenchmark -sidebar_label: taosBenchmark -toc_max_heading_level: 4 -description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine." ---- - -## Introduction - -taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users. - -## Installation - -There are two ways to install taosBenchmark: - -- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details. - -- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. - -## Run - -### Configuration and running methods - -TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. - -taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. - -**Make sure that the TDengine cluster is running correctly before running taosBenchmark. ** - -### Run without command-line arguments - -Execute the following commands to quickly experience taosBenchmark's default configuration-based write performance testing of TDengine. - -```bash -taosBenchmark -``` - -When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database. - -### Run with command-line configuration parameters - -The `-f ` argument cannot be used when running taosBenchmark with command-line parameters and controlling its behavior. Users must specify all configuration parameters from the command-line. The following is an example of testing taosBenchmark writing performance using the command-line approach. - -```bash -taosBenchmark -I stmt -n 200 -t 100 -``` - -Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. - -### Run with the configuration file - -A sample configuration file is provided in the taosBenchmark installation package under `/examples/taosbenchmark-json`. - -Use the following command-line to run taosBenchmark and control its behavior via a configuration file. - -```bash -taosBenchmark -f -``` - -**Here are a few examples of configuration files:** - -#### Example of inserting a scenario JSON configuration file - -
-insert.json - -```json -{{#include /taos-tools/example/insert.json}} -``` - -
- -#### Query Scenario JSON Profile Example - -
-query.json - -```json -{{#include /taos-tools/example/query.json}} -``` - -
- -#### Subscription JSON configuration example - -
-subscribe.json - -```json -{{#include /taos-tools/example/subscribe.json}} -``` - -
- -## Command-line argument in detailed - -- **-f/--file ** : - specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. - -- **-c/--config-dir ** : - specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`. - -- **-h/--host ** : - Specify the FQDN of the TDengine server to connect to. The default value is localhost. - -- **-P/--port ** : - The port number of the TDengine server to connect to, the default value is 6030. - -- **-I/--interface ** : - Insert mode. Options are taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface writing, parameter binding interface writing, schemaless interface writing, RESTful schemaless interface writing (provided by taosAdapter). The default value is taosc. - -- **-u/--user ** : - User name to connect to the TDengine server. Default is root. - -- **-p/--password ** : - The default password to connect to the TDengine server is `taosdata`. - -- **-o/--output ** : - specify the path of the result output file, the default value is `. /output.txt`. - -- **-T/--thread ** : - The number of threads to insert data. Default is 8. - -- **-B/--interlace-rows ** : - Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. - -- **-i/--insert-interval ** : - Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. That means that after inserting interlaced rows for each child table, the data insertion with multiple threads will wait for the interval specified by this value before proceeding to the next round of writes. - -- **-r/--rec-per-req ** : - Writing the number of rows of records per request to TDengine, the default value is 30000. - -- **-t/--tables ** : - Specify the number of sub-tables. The default is 10000. - -- **-S/--timestampstep ** : - Timestamp step for inserting data in each child table in ms, default is 1. - -- **-n/--records ** : - The default value of the number of records inserted in each sub-table is 10000. - -- **-d/--database ** : - The name of the database used, the default value is `test`. - -- **-b/--data-type ** : - specify the type of the data columns of the super table. It defaults to three columns of type FLOAT, INT, and FLOAT if not used. - -- **-l/--columns ** : - specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`. - -- **-A/--tag-type ** : - The tag column type of the super table. nchar and binary types can both set the length, for example: - -``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY(16) -``` - -If users did not set tag type, the default is two tags, whose types are INT and BINARY(16). -Note: In some shells, such as bash, "()" needs to be escaped, so the above command should be - -``` -taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) -``` - -- **-w/--binwidth **: - specify the default length for nchar and binary types. The default value is 64. - -- **-m/--table-prefix ** : - The prefix of the sub-table name, the default value is "d". - -- **-E/--escape-character** : - Switch parameter specifying whether to use escape characters in the super table and sub-table names. By default is not used. - -- **-C/--chinese** : - Switch specifying whether to use Unicode Chinese characters in nchar and binary. By default is not used. - -- **-N/--normal-table** : - This parameter indicates that taosBenchmark will create only normal tables instead of super tables. The default value is false. It can be used if the insert mode is taosc, stmt, and rest. - -- **-M/--random** : - This parameter indicates writing data with random values. The default is false. If users use this parameter, taosBenchmark will generate the random values. For tag/data columns of numeric type, the value is a random value within the range of values of that type. For NCHAR and BINARY type tag columns/data columns, the value is the random string within the specified length range. - -- **-x/--aggr-func** : - Switch parameter to indicate query aggregation function after insertion. The default value is false. - -- **-y/--answer-yes** : - Switch parameter that requires the user to confirm at the prompt to continue. The default value is false. - -- **-O/--disorder ** : - Specify the percentage probability of disordered data, with a value range of [0,50]. The default is 0, i.e., there is no disordered data. - -- **-R/--disorder-range ** : - Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. - -- **-F/--prepare_rand ** : - Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. - -- **-a/--replica ** : - Specify the number of replicas when creating the database. The default value is 1. - -- **-V/--version** : - Show version information only. Users should not use it with other parameters. - -- **-? /--help** : - Show help information and exit. Users should not use it with other parameters. - -## Configuration file parameters in detailed - -### General configuration parameters - -The parameters listed in this section apply to all function modes. - -- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file. -**cfgdir**: specify the TDengine cluster configuration file's directory. The default path is /etc/taos. - -- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`. - -- **port**: The port number of the TDengine server to connect to, the default value is `6030`. - -- **user**: The user name of the TDengine server to connect to, the default is `root`. - -- **password**: The password to connect to the TDengine server, the default value is `taosdata`. - -### Insert scenario configuration parameters - -`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters) - -#### Database related configuration parameters - -The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. These parameters correspond to the database parameters specified when `create database` in TDengine. - -- **name**: specify the name of the database. - -- **drop**: indicate whether to delete the database before inserting. The default is true. - -- **replica**: specify the number of replicas when creating the database. - -- **days**: specify the time span for storing data in a single data file. The default is 10. - -- **cache**: specify the size of the cache blocks in MB. The default value is 16. - -- **blocks**: specify the number of cache blocks in each vnode. The default is 6. - -- **precision**: specify the database time precision. The default value is "ms". - -- **keep**: specify the number of days to keep the data. The default value is 3650. - -- **minRows**: specify the minimum number of records in the file block. The default value is 100. - -- **maxRows**: specify the maximum number of records in the file block. The default value is 4096. - -- **comp**: specify the file compression level. The default value is 2. - -- **walLevel** : specify WAL level, default is 1. - -- **cacheLast**: indicate whether to allow the last record of each table to be kept in memory. The default value is 0. The value can be 0, 1, 2, or 3. - -- **quorum**: specify the number of writing acknowledgments in multi-replica mode. The default value is 1. - -- **fsync**: specify the interval of fsync in ms when users set WAL to 2. The default value is 3000. - -- **update** : indicate whether to support data update, default value is 0, optional values are 0, 1, 2. - -#### Super table related configuration parameters - -The parameters for creating super tables are configured in `super_tables` in the json configuration file, as shown below. - -- **name**: Super table name, mandatory, no default value. -- **child_table_exists** : whether the child table already exists, default value is "no", optional value is "yes" or "no". - -- **child_table_count** : The number of child tables, the default value is 10. - -- **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value. - -- **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no". - -- **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. - -- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. - -- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. - -- **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. - -- **non_stop_mode**: Specify whether to keep writing. If "yes", insert_rows will be disabled, and writing will not stop until Ctrl + C stops the program. The default value is "no", i.e., taosBenchmark will stop the writing after the specified number of rows are written. Note: insert_rows must be configured as a non-zero positive integer even if it fails in continuous write mode. - -- **line_protocol**: Insert data using line protocol. Only works when insert_mode is sml or sml-rest. The value can be `line`, `telnet`, or `json`. - -- **tcp_transfer**: Communication protocol in telnet mode only takes effect when insert_mode is sml-rest, and line_protocol is telnet. If not configured, the default protocol is http. - -- **insert_rows** : The number of inserted rows per child table, default is 0. - -- **childtable_offset**: Effective only if childtable_exists is yes, specifies the offset when fetching the list of child tables from the super table, i.e., starting from the first child table. - -- **childtable_limit**: Effective only when childtable_exists is yes, specifies the upper limit for fetching the list of child tables from the super table. - -- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Staggered insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. - -- **insert_interval** : Specifies the insertion interval in ms for interleaved insertion mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. - -- **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. - -- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data. - -- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. - -- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1. - -- **start_timestamp** : The timestamp start value of each sub-table, the default value is now. - -- **sample_format**: The type of the sample data file; for now only "csv" is supported. - -- **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. - -- **use_sample_ts**: effective only when data_source is `sample`, indicates whether the CSV file specified by sample_file contains the first timestamp column. Default is no. If set to yes, the first column of the CSV file is used as `timestamp`. Since the timestamp of the same sub-table cannot be repeated, the amount of data generated depends on the same number of rows of data in the CSV file, and insert_rows will be invalidated. - -- **tags_file** : only works when insert_mode is taosc, rest. The final tag value is related to the childtable_count. Suppose the tag data rows in the CSV file are smaller than the given number of child tables. In that case, taosBenchmark will read the CSV file data cyclically until the number of child tables specified by childtable_count is generated. Otherwise, taosBenchmark will read the childtable_count rows of tag data only. The final number of child tables generated is the smaller of the two. - -#### Tag and Data Column Configuration Parameters - -The configuration parameters for specifying super table tag columns and data columns are in `columns` and `tag` in `super_tables`, respectively. - -- **type**: Specify the column type. For optional values, please refer to the data types supported by TDengine. - Note: JSON data type is unique and can only be used for tags. When using JSON type as a tag, there is and can only be this one tag. At this time, `count` and `len` represent the meaning of the number of key-value pairs within the JSON tag and the length of the value of each KV pair. Respectively, the value is a string by default. - -- **len**: Specifies the length of this data type, valid for NCHAR, BINARY, and JSON data types. If this parameter is configured for other data types, a value of 0 means that the column is always written with a null value; if it is not 0, it is ignored. - -- **count**: Specifies the number of consecutive occurrences of the column type, e.g., "count": 4096 generates 4096 columns of the specified type. - -- **name** : The name of the column, if used together with count, e.g. "name": "current", "count":3, then the names of the 3 columns are current, current_2. current_3. - -- **min**: The minimum value of the column/label of the data type. - -- **max**: The maximum value of the column/label of the data type. - -- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. - -#### insertion behavior configuration parameters - -- **thread_count**: specify the number of threads to insert data. Default is 8. - -- **create_table_thread_count** : The number of threads to build the table, default is 8. - -- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified. - -- **result_file** : The path to the result output file, the default value is . /output.txt. - -- **confirm_parameter_prompt**: The switch parameter requires the user to confirm after the prompt to continue. The default value is false. - -- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables are inserted. The default value is 0, which means that data will be inserted into the following child table only after data is inserted into one child table. - This parameter can also be configured in `super_tables`, and if so, the configuration in `super_tables` takes precedence and overrides the global setting. - -- **insert_interval** : - Specifies the insertion interval in ms for interleaved insertion mode. The default value is 0. Only works if `-B/--interlace-rows` is greater than 0. It means that after inserting interlace rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. - This parameter can also be configured in `super_tables`, and if configured, the configuration in `super_tables` takes high priority, overriding the global setting. - -- **num_of_records_per_req** : - The number of rows of data to be written per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements. - -- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are the same. The default value is 10000. - -### Query scenario configuration parameters - -`filetype` must be set to `query` in the query scenario. See [General Configuration Parameters](#General Configuration Parameters) for details of this parameter and other general parameters - -#### Configuration parameters for executing the specified query statement - -The configuration parameters for querying the sub-tables or the normal tables are set in `specified_table_query`. - -- **query_interval** : The query interval in seconds, the default value is 0. - -- **threads**: The number of threads to execute the query SQL, the default value is 1. - -- **sqls**. - - **sql**: the SQL command to be executed. - - **result**: the file to save the query result. If it is unspecified, taosBenchark will not save the result. - -#### Configuration parameters of query super table - -The configuration parameters of the super table query are set in `super_table_query`. - -- **stblname**: Specify the name of the super table to be queried, required. - -- **query_interval** : The query interval in seconds, the default value is 0. - -- **threads**: The number of threads to execute the query SQL, the default value is 1. - -- **sqls** : The default value is 1. - - **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table. - Replace it with all the sub-table names in the super table. - - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. - -### Subscription scenario configuration parameters - -`filetype` must be set to `subscribe` in the subscription scenario. See [General Configuration Parameters](#General Configuration Parameters) for details of this and other general parameters - -#### Configuration parameters for executing the specified subscription statement - -The configuration parameters for subscribing to a sub-table or a generic table are set in `specified_table_query`. - -- **threads**: The number of threads to execute SQL, default is 1. - -- **interval**: The time interval to execute the subscription, in seconds, default is 0. - -- **restart** : "yes" means start a new subscription, "no" means continue the previous subscription, the default value is "no". - -- **keepProgress**: "yes" means keep the progress of the subscription, "no" means don't keep it, and the default value is "no". - -- **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". - -- **sqls** : The default value is "no". - - **sql** : The SQL command to be executed, required. - - **result** : The file to save the query result, unspecified is not saved. - -#### Configuration parameters for subscribing to supertables - -The configuration parameters for subscribing to a super table are set in `super_table_query`. - -- **stblname**: The name of the super table to subscribe. - -- **threads**: The number of threads to execute SQL, default is 1. - -- **interval**: The time interval to execute the subscription, in seconds, default is 0. - -- **restart** : "yes" means start a new subscription, "no" means continue the previous subscription, the default value is "no". - -- **keepProgress**: "yes" means keep the progress of the subscription, "no" means don't keep it, and the default value is "no". - -- **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". - -- **sqls** : The default value is "no". - - **sql**: SQL command to be executed, required; for the query SQL of the super table, keep "xxxx" in the SQL command, and the program will replace it with all the sub-table names of the super table automatically. - Replace it with all the sub-table names in the super table. - - **result**: The file to save the query result, if not specified, it will not be saved. diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md deleted file mode 100644 index 5403e40925f633ce62795cc6037fc8c8f7aad07a..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/06-taosdump.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: taosdump -description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." ---- - -## Introduction - -taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. - -taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. - -If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup. - -Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security. - -Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. - -## Installation - -There are two ways to install taosdump: - -- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - -- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. - -## Common usage scenarios - -### taosdump backup data - -1. backing up all databases: specify `-A` or `-all-databases` parameter. -2. backup multiple specified databases: use `-D db1,db2,... ` parameters; -3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. -4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. -5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. - -:::tip -- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. -- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. - -::: - -### taosdump recover data - -Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. - -:::tip -taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. - -::: - -## Detailed command-line parameter list - -The following is a detailed list of taosdump command-line arguments. - -``` -Usage: taosdump [OPTION...] dbname [tbname ...] - or: taosdump [OPTION...] --databases db1,db2,... - or: taosdump [OPTION...] --all-databases - or: taosdump [OPTION...] -i inpath - or: taosdump [OPTION...] -o outpath - - -h, --host=HOST Server host from which to dump data. Default is - localhost. - -p, --password User password to connect to server. Default is - taosdata. - -P, --port=PORT Port to connect - -u, --user=USER User name used to connect to server. Default is - root. - -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos - -i, --inpath=INPATH Input file path. - -o, --outpath=OUTPATH Output file path. - -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. - -a, --allow-sys Allow to dump system database - -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump listed databases. Use comma to separate - database names. - -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump table schemas. - -y, --answer-yes Input yes for prompt. It will skip data file - checking! - -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, - and lzma. - -S, --start-time=START_TIME Start time to dump. Either epoch or - ISO8601/RFC3339 format is acceptable. ISO8601 - format example: 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00:000+0800 or '2017-10-01 - 00:00:00.000+0800' - -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 - format is acceptable. ISO8601 format example: - 2017-10-01T00:00:00.000+0800 or - 2017-10-0100:00:00.000+0800 or '2017-10-01 - 00:00:00.000+0800' - -B, --data-batch=DATA_BATCH Number of data per query/insert statement when - backup/restore. Default value is 16384. If you see - 'error actual dump .. batch ..' when backup or if - you see 'WAL size exceeds limit' error when - restore, please adjust the value to a smaller one - and try. The workable value is related to the - length of the row and type of table schema. - -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Use loose mode if the table name and column name - use letter and number only. Default is NOT. - -n, --no-escape No escape char '`'. Default is using it. - -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is - 5. - -g, --debug Print debug info. - -?, --help Give this help list - --usage Give a short usage message - -V, --version Print program version - -Mandatory or optional arguments to long options are also mandatory or optional -for any corresponding short options. - -Report bugs to . -``` diff --git a/docs-en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs-en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json deleted file mode 100644 index f651983528ca824b4e6b14586aac5a5bfb4ecab8..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json +++ /dev/null @@ -1,3191 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_TDENGINE", - "label": "TDengine", - "description": "", - "type": "datasource", - "pluginId": "tdengine-datasource", - "pluginName": "TDengine" - } - ], - "__requires": [ - { - "type": "panel", - "id": "gauge", - "name": "Gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "7.5.10" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "" - }, - { - "type": "panel", - "id": "piechart", - "name": "Pie chart v2", - "version": "" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "" - }, - { - "type": "datasource", - "id": "tdengine-datasource", - "name": "TDengine", - "version": "3.1.0" - }, - { - "type": "panel", - "id": "text", - "name": "Text", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "description": "TDengine nodes metrics.", - "editable": true, - "gnetId": 15146, - "graphTooltip": 0, - "id": null, - "iteration": 1635263227798, - "links": [], - "panels": [ - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 57, - "panels": [], - "title": "Cluster Status", - "type": "row" - }, - { - "datasource": null, - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 1 - }, - "id": 32, - "options": { - "content": "

TDengine Cluster Dashboard

>\n", - "mode": "markdown" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "mnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "-- OVERVIEW --", - "transformations": [ - { - "id": "calculateField", - "options": { - "binary": { - "left": "Time", - "operator": "+", - "reducer": "sum", - "right": "" - }, - "mode": "binary", - "reduce": { - "reducer": "sum" - } - } - } - ], - "type": "text" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 8, - "x": 0, - "y": 4 - }, - "id": 28, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Master MNode", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "master" - } - }, - "fieldName": "role" - } - ], - "match": "all", - "type": "include" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["dnodes"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 7, - "x": 8, - "y": 4 - }, - "id": 70, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "/^Time$/", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Master MNode Create Time", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "master" - } - }, - "fieldName": "role" - } - ], - "match": "all", - "type": "include" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["Time"] - } - } - }, - { - "id": "calculateField", - "options": { - "mode": "reduceRow", - "reduce": { - "reducer": "min" - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": null, - "filterable": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 9, - "x": 15, - "y": 4 - }, - "id": 29, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show variables", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Variables", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["value", "name"] - } - } - }, - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": ".*" - } - }, - "fieldName": "name" - } - ], - "match": "all", - "type": "include" - } - } - ], - "type": "table" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 0, - "y": 7 - }, - "id": 33, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "/.*/", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "select server_version()", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Server Version", - "transformations": [], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 2, - "y": 7 - }, - "id": 27, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of MNodes", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "greater", - "options": { - "value": 0 - } - }, - "fieldName": "id" - } - ], - "match": "any", - "type": "include" - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["count"] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["id"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 5, - "y": 7 - }, - "id": 41, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "value" - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Dnodes", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["count"] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["id"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 1 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 7, - "y": 7 - }, - "id": 31, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "value" - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Offline Dnodes", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "ready" - } - }, - "fieldName": "status" - } - ], - "match": "all", - "type": "exclude" - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["count"] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["id"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 7 - }, - "id": 65, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of Databases", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["count"] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["name"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 7 - }, - "id": 69, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Number of Vgroups", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["vgroups"] - } - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["sum"] - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "displayMode": "auto", - "filterable": true - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "role" - }, - "properties": [ - { - "id": "mappings", - "value": [ - { - "from": "", - "id": 1, - "text": "", - "to": "", - "type": 2, - "value": "" - } - ] - } - ] - } - ] - }, - "gridPos": { - "h": 3, - "w": 9, - "x": 0, - "y": 10 - }, - "id": 67, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of DNodes for each Role", - "transformations": [ - { - "id": "groupBy", - "options": { - "fields": { - "end_point": { - "aggregations": ["count"], - "operation": "aggregate" - }, - "role": { - "aggregations": [], - "operation": "groupby" - } - } - } - }, - { - "id": "filterFieldsByName", - "options": {} - }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": {}, - "renameByName": { - "end_point (count)": "Number of DNodes", - "role": "Dnode Role" - } - } - } - ], - "type": "table" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 10 - }, - "id": 55, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show connections", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of Connections", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["count"] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["connId"] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 10 - }, - "id": 68, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Number of Tables", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["ntables"] - } - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["sum"] - } - } - ], - "type": "stat" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 24, - "panels": [], - "title": "Dnodes Status", - "type": "row" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "displayMode": "auto", - "filterable": true - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "status" - }, - "properties": [ - { - "id": "custom.width", - "value": 86 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "vnodes" - }, - "properties": [ - { - "id": "custom.width", - "value": 77 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "role" - }, - "properties": [ - { - "id": "custom.width", - "value": 84 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "cores" - }, - "properties": [ - { - "id": "custom.width", - "value": 75 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "end_point" - }, - "properties": [ - { - "id": "custom.width", - "value": 205 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "id" - }, - "properties": [ - { - "id": "custom.width", - "value": 78 - } - ] - } - ] - }, - "gridPos": { - "h": 5, - "w": 16, - "x": 0, - "y": 14 - }, - "id": 36, - "options": { - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "DNodes Status", - "type": "table" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 14 - }, - "id": 40, - "options": { - "displayLabels": [], - "legend": { - "displayMode": "table", - "placement": "right", - "values": ["value"] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": ["lastNotNull"], - "fields": "/.*/", - "values": false - }, - "text": { - "titleSize": 6 - } - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "title": "Offline Reasons", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "ready" - } - }, - "fieldName": "status" - } - ], - "match": "all", - "type": "exclude" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": ["offline reason", "end_point"] - } - } - }, - { - "id": "groupBy", - "options": { - "fields": { - "Time": { - "aggregations": ["count"], - "operation": "aggregate" - }, - "end_point": { - "aggregations": ["count"], - "operation": "aggregate" - }, - "offline reason": { - "aggregations": [], - "operation": "groupby" - } - } - } - } - ], - "type": "piechart" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 19 - }, - "id": 22, - "panels": [], - "title": "Mnodes Status", - "type": "row" - }, - { - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "filterable": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 38, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes;", - "target": "select metric", - "type": "timeserie" - } - ], - "title": "Mnodes Status", - "type": "table" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 25 - }, - "id": 20, - "panels": [], - "repeat": "fqdn", - "title": "节点资源占用 [ $fqdn ]", - "type": "row" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "decmbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 0, - "y": 26 - }, - "id": 66, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["mean"], - "fields": "/^taosd$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "memory", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(mem_taosd) as taosd, last(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Current Memory Usage of taosd", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 0.5 - }, - { - "color": "red", - "value": 0.8 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "last(cpu_taosd)" - }, - "properties": [ - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 5, - "y": 26 - }, - "id": 45, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["mean"], - "fields": "/^last\\(cpu_taosd\\)$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "mem_taosd", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(cpu_taosd) from log.dn where fqdn = '$fqdn'", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Current CPU Usage of taosd", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "avg band speed last one minute", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 4, - "x": 10, - "y": 26 - }, - "id": 14, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "band_speed", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select avg(band_speed) from log.dn where fqdn='$fqdn' and ts >= now-5m and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "band speed", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 14, - "y": 26 - }, - "id": 48, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(io_read) as io_read, last(io_write) as io_write from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "IO Rate", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 1, - "min": 0, - "thresholds": { - "mode": "percentage", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 75 - }, - { - "color": "red", - "value": 80 - }, - { - "color": "dark-red", - "value": 95 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 19, - "y": 26 - }, - "id": 51, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "/^disk_used_percent$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(disk_used) as used from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "disk_total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select last(disk_total) as total from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "disk_used_percent", - "expression": "A/B", - "formatType": "Time series", - "hide": false, - "queryType": "Arithmetic", - "refId": "C", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk Used", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": ["lastNotNull"] - } - } - ], - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "decmbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 0, - "y": 32 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["mean"], - "fields": "/^taosd$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "memory", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(mem_taosd) as taosd, max(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max Memory Usage of taosd in Last 5 minute", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "max": 1, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 0.5 - }, - { - "color": "red", - "value": 0.8 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 5, - "y": 32 - }, - "id": 43, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["mean"], - "fields": "", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "mem_taosd", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(cpu_taosd) from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max CPU Usage of taosd in Last 5 minute", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "avg band speed last one minute", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 4, - "x": 10, - "y": 32 - }, - "id": 50, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "band_speed", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(band_speed) from log.dn where fqdn = '$fqdn' and ts >= now-1h", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max band speed in last hour", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 14, - "y": 32 - }, - "id": 49, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(io_read) as io_read, max(io_write) as io_write from log.dn where fqdn = '$fqdn'", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max IO Rate in last hour", - "type": "gauge" - }, - { - "datasource": "${DS_TDENGINE}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "cpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 19, - "y": 32 - }, - "id": 52, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": ["last"], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "alias": "req-http", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select sum(req_http) as req_http from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req-inserts", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select sum(req_insert) as req_insert from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req-selects", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select sum(req_select) as req_select from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Requests in last Minute", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "description": "monitor system cpu", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 38 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "cpu_system", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(cpu_system) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "cpu_taosd", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(cpu_taosd) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "CPU 资源占用情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "percent", - "label": "使用占比", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "description": "monitor system cpu", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 38 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 42, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "system", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(mem_system) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "taosd", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(mem_taosd) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select avg(mem_total) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "内存资源占用情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "decmbytes", - "label": "使用占比", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 49 - }, - "hiddenSeries": false, - "id": 54, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "percent", - "yaxis": 2 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "disk_total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "percent", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "D", - "sql": "select avg(disk_used)/avg(disk_total) * 100 from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk Used Percent", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "gbytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "percent", - "label": "Disk Used", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 49 - }, - "hiddenSeries": false, - "id": 64, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select derivative(value, 1m, 0) from (select avg(disk_used) as value from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m))", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk Used Increasing Rate per Minute", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "percentunit", - "label": "Disk Used", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "description": "total select request per minute last hour", - "fieldConfig": { - "defaults": { - "unit": "cpm" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 58 - }, - "hiddenSeries": false, - "id": 8, - "interval": null, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "maxDataPoints": 100, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "req_select", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select sum(req_select) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "req_insert", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select sum(req_insert) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req_http", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select sum(req_http) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requets Count per Minutes $fqdn", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "cpm", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "description": "io", - "fieldConfig": { - "defaults": { - "links": [], - "unit": "Kbits" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 58 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 47, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "io-read", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "io-write", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "io-read-last-hour", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-2h and ts < now - 1h interval(1m)", - "target": "select metric", - "timeshift": { - "period": 1, - "unit": "hours" - }, - "type": "timeserie" - }, - { - "alias": "io-write-last-hour", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "D", - "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "timeshift": { - "period": 1, - "unit": "hours" - }, - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "IO", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "Kbits", - "label": "IO Rate", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 67 - }, - "id": 63, - "panels": [], - "title": "Login History", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TDENGINE}", - "fieldConfig": { - "defaults": { - "displayName": "Logins Per Minute", - "unit": "cpm" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 68 - }, - "hiddenSeries": false, - "id": 61, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "logins", - "nullPointMode": "null as zero" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "logins", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select count(*) from log.log where ts >= $from and ts < $to interval (1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Login Counts per Minute", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "cpm", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "1m", - "schemaVersion": 27, - "style": "dark", - "tags": ["TDengine", "multiple"], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "TDengine", - "value": "TDengine" - }, - "description": "TDengine Data Source Selector", - "error": null, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "ds", - "options": [], - "query": "tdengine-datasource", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": null, - "current": {}, - "datasource": "${DS_TDENGINE}", - "definition": "select fqdn from log.dn", - "description": "TDengine Nodes FQDN (Hostname)", - "error": null, - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "fqdn", - "options": [], - "query": "select fqdn from log.dn", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"] - }, - "timezone": "", - "title": "Multiple TDengines Monitoring", - "uid": "tdengine-multiple", - "version": 4 -} diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs-en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json deleted file mode 100644 index b4254c428b28a0084e54b5e3c509dd2e0ec651b9..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json +++ /dev/null @@ -1,3358 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "description": "TDengine nodes metrics.", - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 3, - "iteration": 1634275785625, - "links": [], - "panels": [ - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 57, - "panels": [], - "title": "Cluster Status", - "type": "row" - }, - { - "datasource": null, - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 1 - }, - "id": 32, - "options": { - "content": "

TDengine Cluster Dashboard

>\n", - "mode": "markdown" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "mnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "-- OVERVIEW --", - "transformations": [ - { - "id": "calculateField", - "options": { - "binary": { - "left": "Time", - "operator": "+", - "reducer": "sum", - "right": "" - }, - "mode": "binary", - "reduce": { - "reducer": "sum" - } - } - } - ], - "type": "text" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 8, - "x": 0, - "y": 4 - }, - "id": 28, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Master MNode", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "master" - } - }, - "fieldName": "role" - } - ], - "match": "all", - "type": "include" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "dnodes" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 7, - "x": 8, - "y": 4 - }, - "id": 70, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "/^Time$/", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Master MNode Create Time", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "master" - } - }, - "fieldName": "role" - } - ], - "match": "all", - "type": "include" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "Time" - ] - } - } - }, - { - "id": "calculateField", - "options": { - "mode": "reduceRow", - "reduce": { - "reducer": "min" - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": null, - "filterable": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 9, - "x": 15, - "y": 4 - }, - "id": 29, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show variables", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Variables", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "value", - "name" - ] - } - } - }, - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": ".*" - } - }, - "fieldName": "name" - } - ], - "match": "all", - "type": "include" - } - } - ], - "type": "table" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 0, - "y": 7 - }, - "id": 33, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "/.*/", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "select server_version()", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Server Version", - "transformations": [], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 2, - "y": 7 - }, - "id": 27, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of MNodes", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "greater", - "options": { - "value": 0 - } - }, - "fieldName": "id" - } - ], - "match": "any", - "type": "include" - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "count" - ] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "id" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 5, - "y": 7 - }, - "id": 41, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "value" - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Dnodes", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "count" - ] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "id" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 1 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 7, - "y": 7 - }, - "id": 31, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "value" - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Offline Dnodes", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "ready" - } - }, - "fieldName": "status" - } - ], - "match": "all", - "type": "exclude" - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "count" - ] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "id" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 7 - }, - "id": 65, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of Databases", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "count" - ] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "name" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 7 - }, - "id": 69, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Number of Vgroups", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "vgroups" - ] - } - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "sum" - ] - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "displayMode": "auto", - "filterable": true - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "role" - }, - "properties": [ - { - "id": "mappings", - "value": [ - { - "from": "", - "id": 1, - "text": "", - "to": "", - "type": 2, - "value": "" - } - ] - } - ] - } - ] - }, - "gridPos": { - "h": 3, - "w": 9, - "x": 0, - "y": 10 - }, - "id": 67, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of DNodes for each Role", - "transformations": [ - { - "id": "groupBy", - "options": { - "fields": { - "end_point": { - "aggregations": [ - "count" - ], - "operation": "aggregate" - }, - "role": { - "aggregations": [], - "operation": "groupby" - } - } - } - }, - { - "id": "filterFieldsByName", - "options": {} - }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": {}, - "renameByName": { - "end_point (count)": "Number of DNodes", - "role": "Dnode Role" - } - } - } - ], - "type": "table" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 10 - }, - "id": 55, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show connections", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of Connections", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "count" - ] - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "connId" - ] - } - } - } - ], - "type": "stat" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 10 - }, - "id": 68, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "7.5.10", - "repeatDirection": "h", - "targets": [ - { - "alias": "dnodes", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "show databases;", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Number of Tables", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "ntables" - ] - } - } - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "sum" - ] - } - } - ], - "type": "stat" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 24, - "panels": [], - "title": "Dnodes Status", - "type": "row" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "displayMode": "auto", - "filterable": true - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "status" - }, - "properties": [ - { - "id": "custom.width", - "value": null - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "vnodes" - }, - "properties": [ - { - "id": "custom.width", - "value": null - } - ] - } - ] - }, - "gridPos": { - "h": 5, - "w": 16, - "x": 0, - "y": 14 - }, - "id": 36, - "options": { - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "DNodes Status", - "type": "table" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 14 - }, - "id": 40, - "options": { - "displayLabels": [], - "legend": { - "displayMode": "table", - "placement": "right", - "values": [ - "value" - ] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "/.*/", - "values": false - }, - "text": { - "titleSize": 6 - } - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show dnodes", - "target": "select metric", - "type": "timeserie" - } - ], - "title": "Offline Reasons", - "transformations": [ - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "regex", - "options": { - "value": "ready" - } - }, - "fieldName": "status" - } - ], - "match": "all", - "type": "exclude" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "offline reason", - "end_point" - ] - } - } - }, - { - "id": "groupBy", - "options": { - "fields": { - "Time": { - "aggregations": [ - "count" - ], - "operation": "aggregate" - }, - "end_point": { - "aggregations": [ - "count" - ], - "operation": "aggregate" - }, - "offline reason": { - "aggregations": [], - "operation": "groupby" - } - } - } - } - ], - "type": "piechart" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 19 - }, - "id": 22, - "panels": [], - "title": "Mnodes Status", - "type": "row" - }, - { - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "filterable": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 38, - "options": { - "showHeader": true - }, - "pluginVersion": "7.5.10", - "targets": [ - { - "formatType": "Table", - "queryType": "SQL", - "refId": "A", - "sql": "show mnodes;", - "target": "select metric", - "type": "timeserie" - } - ], - "title": "Mnodes Status", - "type": "table" - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 25 - }, - "id": 20, - "panels": [], - "repeat": "fqdn", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "title": "节点资源占用 [ $fqdn ]", - "type": "row" - }, - { - "datasource": "${ds}", - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "decmbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 0, - "y": 26 - }, - "id": 66, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "/^taosd$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "memory", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(mem_taosd) as taosd, last(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Current Memory Usage of taosd", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 0.5 - }, - { - "color": "red", - "value": 0.8 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "last(cpu_taosd)" - }, - "properties": [ - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 5, - "y": 26 - }, - "id": 45, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "/^last\\(cpu_taosd\\)$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "mem_taosd", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(cpu_taosd) from log.dn where fqdn = '$fqdn'", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Current CPU Usage of taosd", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "avg band speed last one minute", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 4, - "x": 10, - "y": 26 - }, - "id": 14, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "band_speed", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select avg(band_speed) from log.dn where fqdn='$fqdn' and ts >= now-5m and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "band speed", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 14, - "y": 26 - }, - "id": 48, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(io_read) as io_read, last(io_write) as io_write from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "IO Rate", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 1, - "min": 0, - "thresholds": { - "mode": "percentage", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 75 - }, - { - "color": "red", - "value": 80 - }, - { - "color": "dark-red", - "value": 95 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 19, - "y": 26 - }, - "id": 51, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "/^disk_used_percent$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select last(disk_used) as used from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "disk_total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select last(disk_total) as total from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "disk_used_percent", - "expression": "A/B", - "formatType": "Time series", - "hide": false, - "queryType": "Arithmetic", - "refId": "C", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk Used", - "transformations": [ - { - "id": "reduce", - "options": { - "includeTimeField": false, - "mode": "reduceFields", - "reducers": [ - "lastNotNull" - ] - } - } - ], - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "decmbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 0, - "y": 32 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "/^taosd$/", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "memory", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(mem_taosd) as taosd, max(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max Memory Usage of taosd in Last 5 minute", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "taosd max memery last 10 minutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "max": 1, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 0.5 - }, - { - "color": "red", - "value": 0.8 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 5, - "y": 32 - }, - "id": 43, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": true, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "mem_taosd", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(cpu_taosd) from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max CPU Usage of taosd in Last 5 minute", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "avg band speed last one minute", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 4, - "x": 10, - "y": 32 - }, - "id": 50, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "band_speed", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(band_speed) from log.dn where fqdn = '$fqdn' and ts >= now-1h", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max band speed in last hour", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "Kbits" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 14, - "y": 32 - }, - "id": 49, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select max(io_read) as io_read, max(io_write) as io_write from log.dn where fqdn = '$fqdn'", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Max IO Rate in last hour", - "type": "gauge" - }, - { - "datasource": "${ds}", - "description": "io read/write rate", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "max": 8192, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "#EAB839", - "value": 4916 - }, - { - "color": "red", - "value": 6554 - } - ] - }, - "unit": "cpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 19, - "y": 32 - }, - "id": 52, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true, - "text": {} - }, - "pluginVersion": "7.5.10", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "targets": [ - { - "alias": "req-http", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select sum(req_http) as req_http from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req-inserts", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select sum(req_insert) as req_insert from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req-selects", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select sum(req_select) as req_select from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Requests in last Minute", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "description": "monitor system cpu", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 38 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "cpu_system", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(cpu_system) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "cpu_taosd", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(cpu_taosd) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "CPU 资源占用情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:58", - "decimals": null, - "format": "percent", - "label": "使用占比", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:59", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "description": "monitor system cpu", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 38 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 42, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "system", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(mem_system) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "taosd", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(mem_taosd) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select avg(mem_total) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "内存资源占用情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:58", - "decimals": null, - "format": "decmbytes", - "label": "使用占比", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:59", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "unit": "percent" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 49 - }, - "hiddenSeries": false, - "id": 54, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [ - { - "$$hashKey": "object:249", - "alias": "disk_used", - "hiddenSeries": true - }, - { - "$$hashKey": "object:256", - "alias": "disk_total", - "hiddenSeries": true - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "disk_total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "percent", - "expression": "A/B * 100", - "formatType": "Time series", - "hide": false, - "queryType": "Arithmetic", - "refId": "C", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk Used Percent", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:456", - "format": "percent", - "label": null, - "logBase": 1, - "max": "100", - "min": "0", - "show": true - }, - { - "$$hashKey": "object:457", - "format": "percentunit", - "label": "Disk Used", - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 49 - }, - "hiddenSeries": false, - "id": 64, - "legend": { - "alignAsTable": false, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [ - { - "$$hashKey": "object:834", - "alias": "percent", - "yaxis": 2 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "disk_used", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "disk_total", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "percent", - "expression": "A/B", - "formatType": "Time series", - "hide": false, - "queryType": "Arithmetic", - "refId": "C", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk Used", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:456", - "format": "decgbytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:457", - "format": "percentunit", - "label": "Disk Used", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "description": "total select request per minute last hour", - "fieldConfig": { - "defaults": { - "unit": "cpm" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 58 - }, - "hiddenSeries": false, - "id": 8, - "interval": null, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "maxDataPoints": 100, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "req_select", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select sum(req_select) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "timeshift": { - "period": null - }, - "type": "timeserie" - }, - { - "alias": "req_insert", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select sum(req_insert) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "req_http", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select sum(req_http) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requets Count per Minutes $fqdn", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:127", - "format": "cpm", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:128", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "description": "io", - "fieldConfig": { - "defaults": { - "links": [], - "unit": "Kbits" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 58 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 47, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "fqdn": { - "selected": true, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "io-read", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "A", - "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "io-write", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "B", - "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "type": "timeserie" - }, - { - "alias": "io-read-last-hour", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "C", - "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-2h and ts < now - 1h interval(1m)", - "target": "select metric", - "timeshift": { - "period": 1, - "unit": "hours" - }, - "type": "timeserie" - }, - { - "alias": "io-write-last-hour", - "formatType": "Time series", - "hide": false, - "queryType": "SQL", - "refId": "D", - "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", - "target": "select metric", - "timeshift": { - "period": 1, - "unit": "hours" - }, - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "30s", - "title": "IO", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:58", - "decimals": null, - "format": "Kbits", - "label": "使用占比", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:59", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 67 - }, - "id": 63, - "panels": [], - "title": "Login History", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${ds}", - "fieldConfig": { - "defaults": { - "displayName": "Logins Per Minute", - "unit": "cpm" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 68 - }, - "hiddenSeries": false, - "id": 61, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.5.10", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "$$hashKey": "object:756", - "alias": "logins", - "nullPointMode": "null as zero" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "logins", - "formatType": "Time series", - "queryType": "SQL", - "refId": "A", - "sql": "select count(*) from log.log where ts >= $from and ts < $to interval (1m)", - "target": "select metric", - "type": "timeserie" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Login Counts per Minute", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:585", - "format": "cpm", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:586", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "1m", - "schemaVersion": 27, - "style": "dark", - "tags": [ - "TDengine" - ], - "templating": { - "list": [ - { - "current": { - "selected": true, - "text": "TDengine", - "value": "TDengine" - }, - "description": "TDengine Data Source Selector", - "error": null, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "ds", - "options": [], - "query": "tdengine-datasource", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": null, - "current": { - "selected": false, - "text": "huolinhe-TM1701:6030", - "value": "huolinhe-TM1701:6030" - }, - "datasource": "${ds}", - "definition": "select fqdn from log.dn", - "description": "TDengine Nodes FQDN (Hostname)", - "error": null, - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "fqdn", - "options": [], - "query": "select fqdn from log.dn", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "TDengine", - "uid": "tdengine", - "version": 8 -} \ No newline at end of file diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md deleted file mode 100644 index cebfafa225e6e8de75ff84bb51fa664784177910..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/07-tdinsight/index.md +++ /dev/null @@ -1,428 +0,0 @@ ---- -title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine -sidebar_label: TDinsight ---- - -TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. - -After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. - -## System Requirements - -To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). - -## Installing Grafana - -We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana]. - -### Installing Grafana on Debian or Ubuntu - -For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch. - -```bash -sudo apt-get install -y apt-transport-https -sudo apt-get install -y software-properties-common wget -wget -q -O - https://packages.grafana.com/gpg.key |\ - sudo apt-key add - -echo "deb https://packages.grafana.com/oss/deb stable main" |\ - sudo tee -a /etc/apt/sources.list.d/grafana.list -sudo apt-get update -sudo apt-get install grafana -``` - -### Install Grafana on CentOS / RHEL - -You can install it from its official YUM repository. - -```bash -sudo tee /etc/yum.repos.d/grafana.repo << EOF -[grafana] -name=grafana -baseurl=https://packages.grafana.com/oss/rpm -repo_gpgcheck=1 -enabled=1 -gpgcheck=1 -gpgkey=https://packages.grafana.com/gpg.key -sslverify=1 -sslcacert=/etc/pki/tls/certs/ca-bundle.crt -EOF -sudo yum install grafana -``` - -Or install it with RPM package. - -```bash -wget https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm -sudo yum install grafana-7.5.11-1.x86_64.rpm -# or -sudo yum install \ - https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm -``` - -## Automated deployment of TDinsight - -We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. - -You can download the script via `wget` or other tools: - -```bash -wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -chmod +x TDinsight.sh -./TDinsight.sh -``` - -This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. - -Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. - -The following is a description of TDinsight.sh usage. - -```text -Usage: - ./TDinsight.sh - ./TDinsight.sh -h|--help - ./TDinsight.sh -n -a -u -p - -Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 system. - --h, -help, --help Display help - --V, -verbose, --verbose Run script in verbose mode. Will print out each step of execution. - --v, --plugin-version TDengine datasource plugin version, [default: latest] - --P, --grafana-provisioning-dir Grafana provisioning directory, [default: /etc/grafana/provisioning/] --G, --grafana-plugins-dir Grafana plugins directory, [default: /var/lib/grafana/plugins] --O, --grafana-org-id Grafana organization id. [default: 1] - --n, --tdengine-ds-name TDengine datasource name, no space. [default: TDengine] --a, --tdengine-api TDengine REST API endpoint. [default: http://127.0.0.1:6041] --u, --tdengine-user TDengine user name. [default: root] --p, --tdengine-password TDengine password. [default: taosdata] - --i, --tdinsight-uid Replace with a non-space ASCII code as the dashboard id. [default: tdinsight] --t, --tdinsight-title Dashboard title. [default: TDinsight] --e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false] - --E, --external-notifier Apply external notifier uid to TDinsight dashboard. - -Alibaba Cloud SMS as Notifier: --s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook. --N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] --U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. --D, --sms-notifier-is-default Set notifier as default. --I, --sms-access-key-id Alibaba Cloud SMS access key id --K, --sms-access-key-secret Alibaba Cloud SMS access key secret --S, --sms-sign-name Sign name --C, --sms-template-code Template code --T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' --B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" --L, --sms-listen-addr [default: 127.0.0.1:9100] -``` - -Most command-line options can take effect the same as environment variables. - -| Short Options | Long Options | Environment Variables | Description | -| ------ | -------------------------- | ---------------------------- | ------------------------------------------------------------------ --------- | -| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | The TDengine data source plugin version, the latest version is used by default. | -P -| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | The Grafana configuration directory, defaults to `/etc/grafana/provisioning/` | -| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | The Grafana plugin directory, defaults to `/var/lib/grafana/plugins`. | -O -| -O | --grafana-org-id | GF_ORG_ID | The Grafana organization ID, default is 1. | -| -n | --tdengine-ds-name | TDENGINE_DS_NAME | The name of the TDengine data source, defaults to TDengine. | -a | --tdengine-ds-name | The name of the TDengine data source, defaults to TDengine. -| -a | --tdengine-api | TDENGINE_API | The TDengine REST API endpoint. Defaults to `http://127.0.0.1:6041`. | -u -| -u | --tdengine-user | TDENGINE_USER | TDengine username. [default: root] | -| -p | --tdengine-password | TDENGINE_PASSWORD | TDengine password. [default: tadosdata] | -i | --tdengine-password -| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] | -| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title -| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external -| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s -| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s -| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U -| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms -| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default -| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id | -| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key | -| -S | --sms-sign-name | SMS_SIGN_NAME | Signature | -| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code | -| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters | -| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` | -| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` | - -Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script. - -```bash -sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -``` - -We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel. - -```bash -curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq -``` - -Use the `uid` value obtained above as `-E` input. - -```bash -sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier -``` - -If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters. - -- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`. -- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`. -- `-I`: Alibaba Cloud SMS access key id. -- `-K`: Alibaba Cloud SMS access secret key. -- `-S`: Alibaba Cloud SMS signature. -- `-C`: Alibaba Cloud SMS template id. -- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content. -- `-B`: a list of phone numbers, separated by a comma `,`. - -If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature. - -```bash -sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' -# If using built-in SMS notifications -sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ - -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611 -``` - -Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed). - -Specifically, `-O` can be used to set the organization ID when you are using Grafana Cloud or another organization. `-G` specifies the Grafana plugin installation directory. The `-e` parameter sets the dashboard to be editable. - -## Set up TDinsight manually - -### Install the TDengine data source plugin - -Install the latest version of the TDengine Data Source plugin from GitHub. - -```bash -get_latest_release() { - curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" | - grep '"tag_name":' | - sed -E 's/.*"v([^"]+)".*/\1/' -} -TDENGINE_PLUGIN_VERSION=$(get_latest_release) -sudo grafana-cli \ - --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \ - plugins install tdengine-datasource -``` - -:::note -The 3.1.6 and earlier version plugins require the following setting in the configuration file `/etc/grafana/grafana.ini` to enable unsigned plugins. - -```ini -[plugins] -allow_loading_unsigned_plugins = tdengine-datasource -``` -::: - -### Start the Grafana service - -```bash -sudo systemctl start grafana-server -sudo systemctl enable grafana-server -``` - -### Logging into Grafana - -Open the default Grafana URL in a web browser: ``http://localhost:3000``. -The default username/password is `admin`. Grafana will require a password change after the first login. - -### Adding a TDengine Data Source - -Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button. - -![TDengine Database TDinsight Add data source button](./assets/howto-add-datasource-button.webp) - -Search for and select **TDengine**. - -![TDengine Database TDinsight Add datasource](./assets/howto-add-datasource-tdengine.webp) - -Configure the TDengine datasource. - -![TDengine Database TDinsight Datasource Configuration](./assets/howto-add-datasource.webp) - -Save and test. It will report 'TDengine Data source is working' under normal circumstances. - -![TDengine Database TDinsight datasource test](./assets/howto-add-datasource-test.webp) - -### Importing dashboards - -Point to **+** / **Create** - **import** (or `/dashboard/import` url). - -![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) - -Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. - -![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) - -Once the import is complete, the full page view of TDinsight is shown below. - -![TDengine Database TDinsight show](./assets/TDinsight-full.webp) - -## TDinsight dashboard details - -The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases. - -Details of the metrics are as follows. - -### Cluster Status - -![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) - -This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). - -- **First EP**: the `firstEp` setting in the current TDengine cluster. -- **Version**: TDengine server version (master mnode). -- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master. -- **Expire Time** - Enterprise version expiration time. -- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition. -- **Databases** - The number of databases. -- **Connections** - The number of current connections. -- **DNodes/MNodes/VGroups/VNodes** - Total number of each resource and the number of survivors. -- **DNodes/MNodes/VGroups/VNodes Alive Percent**: The ratio of the number of alive/total for each resource, enabling the alert rule and triggering it when the resource liveness rate (the average percentage of healthy resources in 1 minute) is less than 100%. -- **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default). -- **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default). -- **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters. -- **Variables**: `show variables` table display. - -### DNodes Status - -![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - -- **DNodes Status**: simple table view of `show dnodes`. -- **DNodes Lifetime**: the time elapsed since the dnode was created. -- **DNodes Number**: the number of DNodes changes. -- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart. - -### MNode Overview - -![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) - -1. **MNodes Status**: a simple table view of `show mnodes`. -2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. - -### Request - -![TDengine Database TDinsight tdinsight requests](./assets/TDinsight-4-requests.webp) - -1. **Requests Rate(Inserts per Second)**: average number of inserts per second. -2. **Requests (Selects)**: number of query requests and change rate (count of second). -3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second). - -### Database - -![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) - -Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. - -1. **STables**: number of super tables. -2. **Total Tables**: number of all tables. -3. **Sub Tables**: the number of all super table subtables. -4. **Tables**: graph of all normal table numbers over time. -5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. - -### DNode Resource Usage - -![TDengine Database TDinsight dnode usage](./assets/TDinsight-6-dnode-usage.webp) - -Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. - -1. **Uptime**: the time elapsed since the dnode was created. -2. **Has MNodes?**: whether the current dnode is a mnode. -3. **CPU Cores**: the number of CPU cores. -4. **VNodes Number**: the number of VNodes in the current dnode. -5. **VNodes Masters**: the number of vnodes in the master role. -6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. -7. **Current Memory Usage of taosd**: memory usage of taosd processes. -8. **Disk Used**: The total disk usage percentage of the taosd data directory. -9. **CPU Usage**: Process and system CPU usage. -10. **RAM Usage**: Time series view of RAM usage metrics. -11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). -12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. -13. **Disk IO**: Disk IO rate. -14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. - -### Login History - -![TDengine Database TDinsight Login History](./assets/TDinsight-7-login-history.webp) - -Currently, only the number of logins per minute is reported. - -### Monitoring taosAdapter - -![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) - -Support monitoring taosAdapter request statistics and status details. Includes. - -1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed -2. **top 3 request endpoint**: data of the top 3 requests by endpoint group -3. **Memory Used**: taosAdapter memory usage -4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages -5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping -6. **CPU Used**: taosAdapter CPU usage - -## Upgrade - -TDinsight installed via the `TDinsight.sh` script can be upgraded to the latest Grafana plugin and TDinsight Dashboard by re-running the script. - -In the case of a manual installation, follow the steps above to install the new Grafana plugin and Dashboard yourself. - -## Uninstall - -TDinsight installed via the `TDinsight.sh` script can be cleaned up using the command line `TDinsight.sh -R` to clean up the associated resources. - -To completely uninstall TDinsight during a manual installation, you need to clean up the following. - -1. the TDinsight Dashboard in Grafana. -2. the Data Source in Grafana. -3. remove the `tdengine-datasource` plugin from the plugin installation directory. - -## Integrated Docker Example - -```bash -git clone --depth 1 https://github.com/taosdata/grafanaplugin.git -cd grafanaplugin -``` - -Change as needed in the ``docker-compose.yml`` file to - -```yaml -version: '3.7' - -services: - grafana: - image: grafana/grafana:7.5.10 - volumes: - - . /dist:/var/lib/grafana/plugins/tdengine-datasource - - . /grafana/grafana.ini:/etc/grafana/grafana.ini - - . /grafana/provisioning/:/etc/grafana/provisioning/ - - grafana-data:/var/lib/grafana - environment: - TDENGINE_API: ${TDENGINE_API} - TDENGINE_USER: ${TDENGINE_USER} - TDENGINE_PASS: ${TDENGINE_PASS} - SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID} - SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET} - SMS_SIGN_NAME: ${SMS_SIGN_NAME} - SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE} - SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}' - SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS - SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR} - ports: - - 3000:3000 -volumes: - grafana-data: -``` - -Replace the environment variables in `docker-compose.yml` or save the environment variables to the `.env` file, then start Grafana with `docker-compose up`. See [Docker Compose Reference](https://docs.docker.com/compose/) - -```bash -docker-compose up -d -``` - -Then the TDinsight was deployed via Provisioning. Go to http://localhost:3000/d/tdinsight/ to view the dashboard. - -[grafana]: https://grafana.com -[tdengine]: https://tdengine.com diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md deleted file mode 100644 index 304e3bcb434ee9a6ba338577a4d1ba546b548e3f..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/12-directory.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: File directory structure -description: "TDengine installation directory description" ---- - -After TDengine is installed, the following directories or files will be created in the system by default. - -| directory/file | description | -| ------------------------- | -------------------------------------------------------------------- | -| /usr/local/taos/bin | The TDengine executable directory. The executable files are soft-linked to the /usr/bin directory. | -| /usr/local/taos/driver | The TDengine dynamic link library directory. It is soft-linked to the /usr/lib directory. | -| /usr/local/taos/examples | The TDengine various language application examples directory. | -| /usr/local/taos/include | The header files for TDengine's external C interface. | -| /etc/taos/taos.cfg | TDengine default [configuration file] | -| /var/lib/taos | TDengine's default data file directory. The location can be changed via [configuration file]. | -| /var/log/taos | TDengine default log file directory. The location can be changed via [configure file]. | - -## Executable files - -All executable files of TDengine are in the _/usr/local/taos/bin_ directory by default. These include. - -- _taosd_: TDengine server-side executable files -- _taos_: TDengine CLI executable -- _taosdump_: data import and export tool -- _taosBenchmark_: TDengine testing tool -- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos` -- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares -- _tarbitrator_: provides arbitration for two-node cluster deployments -- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter -- _TDinsight.sh_: script to download TDinsight and install it -- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging -- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. - -:::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. -::: - -:::tip -You can configure different data directories and log directories by modifying the system configuration file `taos.cfg`. -::: diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md deleted file mode 100644 index acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd..0000000000000000000000000000000000000000 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: Schemaless Writing -description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." ---- - -In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. - -The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. - -## Schemaless Writing Line Protocol - -TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. - -For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. - -With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). - -```json -measurement,tag_set field_set timestamp -``` - -where : - -- measurement will be used as the data table name. It will be separated from tag_set by a comma. -- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. -- The timestamp is the primary key corresponding to the data in this row. - -All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). - -In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail: - -- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. -- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. -- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) -- Numeric types will be distinguished from data types by the suffix. - -| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | -| -------- | -------- | ------------ | -------------- | -| 1 | none or f64 | double | 8 | -| 2 | f32 | float | 4 | -| 3 | i8 | TinyInt | 1 | -| 4 | i16 | SmallInt | 2 | -| 5 | i32 | Int | 4 | -| 6 | i64 or i | Bigint | 8 | - -- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types. - -For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row. - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 -``` - -Note that if the wrong case is used when describing the data type suffix, or if the wrong data type is specified for the data, it may cause an error message and cause the data to fail to be written. - -## Main processing logic for schemaless writing - -Schemaless writes process row data according to the following principles. - -1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: - -```json -"measurement,tag_key1=tag_value1,tag_key2=tag_value2" -``` - -Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. - -2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. -4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). -5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. -6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. -7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. -8. Errors encountered throughout the processing will interrupt the writing process and return an error code. - -:::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. -::: - -## Time resolution recognition - -Three specified modes are supported in the schemaless writing process, as follows: - -| **Serial** | **Value** | **Description** | -| -------- | ------------------- | ------------------------------- | -| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | -| 3 | SML_JSON_PROTOCOL | JSON protocol format | - -In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. - -| **Serial Number** | **Time Resolution Definition** | **Meaning** | -| -------- | --------------------------------- | -------------- | -| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) | -| 2 | TSDB_SML_TIMESTAMP_HOURS | hour | -| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES -| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS -| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds -| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds -| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds | - -In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point. - -## Data schema mapping rules - -This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: -- The tag name in tag_set is the name of the tag in the data schema -- The name in field_set is the column's name. - -The following data is used as an example to illustrate the mapping rules. - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 -``` - -The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement. - -```json -create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) -``` - -## Data schema change handling - -This section describes the impact on the data schema for different line protocol data writing cases. - -When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 -``` - -The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing. - -If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change. - -```json -st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 -``` - -The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. - -```json -st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 -``` - -The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point. - -## Write integrity - -TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail. - -## Error code - -If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error. diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx deleted file mode 100644 index b51d5a8d904601802efec0db5847203b72fa2668..0000000000000000000000000000000000000000 --- a/docs-en/20-third-party/01-grafana.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -sidebar_label: Grafana -title: Grafana ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. - -You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). - -## Prerequisites - -In order for Grafana to add the TDengine data source successfully, the following preparations are required: - -1. The TDengine cluster is deployed and functioning properly -2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. - -Record these values: - -- TDengine REST API url: `http://tdengine.local:6041`. -- TDengine cluster authorization, with user + password. - -## Installing Grafana - -TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: . - -## Configuring Grafana - -### Install Grafana Plugin and Configure Data Source - - - - -Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html): - -```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password - -# Other useful variables -# - If to install TDengine data source, default is true -export TDENGINE_DS_ENABLED=false -# - Data source name to be created, default is TDengine -export TDENGINE_DS_NAME=TDengine -# - Data source organization id, default is 1 -export GF_ORG_ID=1 -# - Data source is editable in admin ui or not, default is 0 (false) -export TDENGINE_EDITABLE=1 -``` - -Run `install.sh`: - -```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -``` - -With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script. - -And then, restart Grafana service and open Grafana in web-browser, usually . - - - - -Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. - -```bash -grafana-cli plugins install tdengine-datasource -# with sudo -sudo -u grafana grafana-cli plugins install tdengine-datasource -``` - -Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/tags) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory. - -```bash -GF_VERSION=3.2.2 -# from GitHub -wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip -# from Grafana -wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download -``` - -Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. - -```bash -sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ -``` - -If Grafana is running in a Docker environment, the TDengine plugin can be automatically installed and set up using the following environment variable settings: - -```bash -GF_INSTALL_PLUGINS=tdengine-datasource -``` - -Now users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure. - -![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp) - -Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure. - -![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp) - -Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration. - -![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp) - -- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`. -- User: TDengine user name. -- Password: TDengine user password. - -Click `Save & Test` to test. You should see a success message if the test worked. - -![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) - - - - -### Create Dashboard - -Go back to the main interface to create a dashboard and click Add Query to enter the panel query page: - -![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) - -As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. - -- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. -- ALIAS BY: This allows you to set the current query alias. -- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. - -Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows. - -![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp) - -> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/). - -### Importing the Dashboard - -You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details. - -For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list: - -- [15146](https://grafana.com/grafana/dashboards/15146): Monitor multiple TDengine clusters. -- [15155](https://grafana.com/grafana/dashboards/15155): TDengine alert demo. -- [15167](https://grafana.com/grafana/dashboards/15167): TDinsight. -- [16388](https://grafana.com/grafana/dashboards/16388): Telegraf node metrics dashboard using TDengine data source. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md deleted file mode 100644 index 7c6b83cf99dd733f9e9a86435e079a2daee00ad9..0000000000000000000000000000000000000000 --- a/docs-en/20-third-party/09-emq-broker.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -sidebar_label: EMQX Broker -title: EMQX Broker writing ---- - -MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). - -## Prerequisites - -The following preparations are required for EMQX to add TDengine data sources correctly. -- The TDengine cluster is deployed and working properly -- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. -- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended. - -## Install and start EMQX - -Depending on the current operating system, users can download the installation package from the [EMQX official website](https://www.emqx.io/downloads) and execute the installation. After installation, use `sudo emqx start` or `sudo systemctl start emqx` to start the EMQX service. - - -## Create Database and Table - -In this step we create the appropriate database and table schema in TDengine for receiving MQTT data. Open TDengine CLI and execute SQL bellow: - -```sql -CREATE DATABASE test; -USE test; -CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); -``` - -Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario. - -## Configuring EMQX Rules - -Since the configuration interface of EMQX differs from version to version, here is v4.4.3 as an example. For other versions, please refer to the corresponding official documentation. - -### Login EMQX Dashboard - -Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`. - -![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) - -### Creating Rule - -Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! - -![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) - -### Edit SQL fields - -Copy SQL bellow and paste it to the SQL edit area: - -```sql -SELECT - payload -FROM - "sensor/data" -``` - -![TDengine Database EMQX create rule](./emqx/create-rule.webp) - -### Add "action handler" - -![TDengine Database EMQX add action handler](./emqx/add-action-handler.webp) - -### Add "Resource" - -![TDengine Database EMQX create resource](./emqx/create-resource.webp) - -Select "Data to Web Service" and click the "New Resource" button. - -### Edit "Resource" - -Select "WebHook" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. - -![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) - -### Edit "action" - -Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is: -``` -Basic cm9vdDp0YW9zZGF0YQ== -``` - -Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details. - -Enter the rule engine replacement template in the message body: - -```sql -INSERT INTO test.sensor_data VALUES( - now, - ${payload.temperature}, - ${payload.humidity}, - ${payload.volume}, - ${payload.PM10}, - ${payload.pm25}, - ${payload.SO2}, - ${payload.NO2}, - ${payload.CO}, - '${payload.id}', - ${payload.area}, - ${payload.ts} -) -``` - -![TDengine Database EMQX edit action](./emqx/edit-action.webp) - -Finally, click the "Create" button at bottom left corner saving the rule. -## Compose program to mock data - -```javascript -{{#include docs-examples/other/mock.js}} -``` - -Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. - -![TDengine Database EMQX client num](./emqx/client-num.webp) - -## Execute tests to simulate sending MQTT data - -``` -npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org -node mock.js -``` - -![TDengine Database EMQX run mock](./emqx/run-mock.webp) - -## Verify that EMQX is receiving data - -Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly: - -![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) - -## Verify that data writing to TDengine - -Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly: - -![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) - -Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. -EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs-en/20-third-party/10-hive-mq-broker.md b/docs-en/20-third-party/10-hive-mq-broker.md deleted file mode 100644 index 333e00fa0e9b724ffbb067a83ad07d0b846b1a23..0000000000000000000000000000000000000000 --- a/docs-en/20-third-party/10-hive-mq-broker.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -sidebar_label: HiveMQ Broker -title: HiveMQ Broker writing ---- - -[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it. \ No newline at end of file diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md deleted file mode 100644 index 4d8bed4d2d6b3a0404e10213aeab599767325cc2..0000000000000000000000000000000000000000 --- a/docs-en/21-tdinternal/01-arch.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -sidebar_label: Architecture -title: Architecture ---- - -## Cluster and Primary Logic Unit - -The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly. - -### Primary Logic Unit - -Logical structure diagram of TDengine's distributed architecture is as follows: - -![TDengine Database architecture diagram](structure.webp) -
Figure 1: TDengine architecture diagram
- -A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. - -**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). - -**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. - -**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. - -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. - -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. - -**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. - -### Node Communication - -**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. - -**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. - -**Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. - -**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. - -**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: - -1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; -2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; -3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. - -**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. - -**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. -- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" -- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. - -**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. - -### A Typical Data Writing Process - -To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. - -![typical process of TDengine Database](message.webp) -
Figure 2: Typical process of TDengine
- -1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. -3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. -4. TAOSC initiates an insert request to master vnode. -5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. -6. TAOSC notifies APP that writing is successful. - -For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. - -For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. - -The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. - -Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. - -## Storage Model and Data Partitioning/Sharding - -### Storage Model - -The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts: - -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point. -- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds. -- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck. - -Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages: - -- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation. -- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. - -### Data Sharding - -For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. - -VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. - -For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. - -When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. - -The meta data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is good for efficient and parallel tag filtering operations. - -### Data Partitioning - -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs. - -In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. - -### Load Balancing - -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. - -If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. - -When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process. - -The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** - -## Data Writing and Replication Process - -If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. - -### Master vnode Writing Process - -Master Vnode uses a writing process as follows: - -![TDengine Database Master Writing Process](write_master.webp) -
Figure 3: TDengine Master writing process
- -1. Master vnode receives the application data insertion request, verifies, and moves to next step; -2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; -4. Write into memory and add the record to “skip list”; -5. Master vnode returns a confirmation message to the application, indicating a successful write. -6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. - -### Slave vnode Writing Process - -For a slave vnode, the write process as follows: - -![TDengine Database Slave Writing Process](write_slave.webp) -
Figure 4: TDengine Slave Writing Process
- -1. Slave vnode receives a data insertion request forwarded by Master vnode; -2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. Write into memory and add the record to “skip list”. - -Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. - -### Remote Disaster Recovery and IDC (Internet Data Center) Migration - -As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. - -On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. - -However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: - -1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; -2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; -3. Slave vnode will become the new master, thus losing one record. - -In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. - -Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** - -### Master/slave Selection - -Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. - -When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows: - -1. If there’s only one replica, it’s always master -2. When all replicas are online, the one with latest version is master -3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master -4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. - -### Synchronous Replication - -For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. - -With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. - -## Caching and Persistence - -### Caching - -TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. - -TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems. - -Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”. - -### Persistent Storage - -TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth. - -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations. - -For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. - -Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. - -In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. - -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file. - -When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. - -### Tiered Storage - -By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. - -dataDir format is as follows: -``` -dataDir data_path [tier_level] -``` - -Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. - -Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: - -``` -dataDir /mnt/disk1/taos -dataDir /mnt/disk2/taos 0 -dataDir /mnt/disk3/taos 1 -dataDir /mnt/disk4/taos 1 -dataDir /mnt/disk5/taos 2 -dataDir /mnt/disk6/taos 2 -``` - -Mounted disks can also be a non-local network disk, as long as the system can access it. - -Note: Tiered Storage is only supported in Enterprise Edition - -## Data Query - -TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode. - -### Single Table Query - -The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode). - -According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time. - -When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client. - -### Aggregation by Time Axis, Downsampling, Interpolation - -Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines. - -The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example: - -```mysql -select count(*) from d1001 interval(1h); -``` - -For the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. - -In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example: - -```mysql -select count(*) from d1001 interval(1h) fill(prev); -``` - -For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). - -### Multi-table Aggregation Query - -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: - -![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp) -
Figure 5: Diagram of multi-table aggregation query
- -1. Application sends a query condition to system; -2. TAOSC sends the STable name to Meta Node(management node); -3. Management node sends the vnode list owned by the STable back to TAOSC; -4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; -6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. - -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. - -### Precomputation - -In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. - diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md deleted file mode 100644 index 4d47aec1d76014ba63f6be91004abcc3934769f7..0000000000000000000000000000000000000000 --- a/docs-en/25-application/03-immigrate.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -sidebar_label: OpenTSDB Migration to TDengine -title: Best Practices for Migrating OpenTSDB Applications to TDengine ---- - -As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. - -As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up. - -To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**. - -After learning the advantages of many traditional relational databases and NoSQL databases, stream computing engines, and message queues, TDengine has its unique benefits in time-series big data processing. TDengine can effectively solve the problems currently encountered by OpenTSDB. - -Compared with OpenTSDB, TDengine has the following distinctive features. - -- Data writing and querying performance far exceeds that of OpenTSDB. -- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk. -- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds. -- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost. -- Supports up to 128 tags, with a total tag length of 16 KB. -- In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC. - -Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem. - -We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems. - -## DevOps Application Quick Migration - -### 1. Typical Application Scenarios - -The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario. - -**Figure 1. Typical architecture in a DevOps scenario** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") - -In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.). - -The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana. - -### 2. Migration Services - -- **TDengine installation and deployment** - -First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). - -Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. - -- **Adjusting the data collector configuration** - -TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration. - -Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. - -Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). - -If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. - -```html -LoadPlugin write_tsdb - - - Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates - false AlwaysAppendDS false - - -``` - -You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly. - -- **Tuning the Dashboard system** - -After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). - -TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. - -**Importing Grafana Templates** Figure 2. -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") - -With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed. - -### 3. Post-migration architecture - -After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance. - -In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application. - -**Figure 3. System architecture after migration** -![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") - -The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application. - -## Migration evaluation and strategy for other scenarios - -### 1. Differences between TDengine and OpenTSDB - -This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. - -TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly. - -TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. -In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. - -In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine. - -1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application. -2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. -3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. -While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple. -4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. - -With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. - -### 2. Migration strategy suggestion - -OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them. -You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services. - -## Data model design - -On the one hand, TDengine requires a strict schema definition for its incoming data. On the other hand, the data model of TDengine is richer than that of OpenTSDB, and the multi-valued model is compatible with all single-valued model building requirements. - -Let us now assume a DevOps scenario where we use collectd to collect the underlying metrics of the device, including memory, swap, disk, etc. The schema in OpenTSDB is as follows. - -| metric | value name | type | tag1 | tag2 | tag3 | tag4 | tag5 | -| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | -| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | -| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | -| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | - -TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: -1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. - -At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference. - -(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models. - -- **Single-valued model**. - -The steps are as follows: -- Use the name of the metrics as the name of the TDengine super table -- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. -- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. - -Create 3 super tables in TDengine. - -```sql -create stable memory(ts timestamp, val float) tags(host binary(12), memory_type binary(20), memory_type_instance binary(20), source binary(20)) ; -create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); -create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); -``` - -For sub-tables use dynamic table creation as shown below. - -```sql -insert into memory_vm130_memory_buffered_collectd using memory tags('vm130', 'memory', ' buffer', 'collectd') values(1632979445, 3.0656); -``` - -The final system will have about 340 sub-tables and three super-tables. Note that if the use of concatenated tagged values causes the sub-table names to exceed the system limit (191 bytes), then some encoding (e.g., MD5) needs to be used to convert them to an acceptable length. - -- **Multi-value model** - -Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. - -## Data triage and application adaptation - -Subscribe to the message queue and start writing data to TDengine. - -After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. - -```sql -select count(*) from memory -``` - -After completing the query, if the data written does not differ from what is expected and there are no abnormal error messages from the writing program itself, you can confirm that the written data is complete and valid. - -TDengine does not support querying, or data fetching using the OpenTSDB query syntax but does provide a counterpart for each of the OpenTSDB queries. The corresponding query processing can be adapted and applied in a manner obtained by examining Appendix 1. To fully understand the types of queries supported by TDengine, refer to the TDengine user manual. - -TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. Please read the user manual for specific operations and usage. - -## Historical Data Migration - -### 1. Use the tool to migrate data automatically - -To facilitate historical data migration, we provide a plug-in for the data synchronization tool DataX, which can automatically write data into TDengine.The automatic data migration of DataX can only support the data migration process of a single value model. - -For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). - -After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. - -| Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) | -| ----------------------------- | ------------------- -- | -| 1 | About 139,000 | -| 2 | About 218,000 | -| 3 | About 249,000 | -| 5 | About 295,000 | -| 10 | About 330,000 | - -
(Note: The test data comes from a single-node Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16-core 64G hardware device, the channel and batchSize are 8 and 1000 respectively, and each record contains 10 tags) - -### 2. Manual data migration - -Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement—written to the database. - -Manual migration of data requires attention to the following two issues: - -1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine. - -2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data. - -Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially. - -While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching. - -## Appendix 1: OpenTSDB query function correspondence table - -### Avg - -Equivalent function: avg - -Example: - -```sql -SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) -``` - -Remarks: - -1. The value in Interval needs to be the same as the interval value in the outer query. -2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements. -3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data. -4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). - -### Count - -Equivalent function: count - -Example: - -```sql -select count(\*) from super_table_name; -``` - -### Dev - -Equivalent function: stddev - -Example: - -```sql -Select stddev(val) from table_name -``` - -### Estimated percentiles - -Equivalent function: apercentile - -Example: - -```sql -Select apercentile(col1, 50, “t-digest”) from table_name -``` - -Remark: - -1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively. - -### First - -Equivalent function: first - -Example: - -```sql -Select first(col1) from table_name -``` - -### Last - -Equivalent function: last - -Example: - -```sql -Select last(col1) from table_name -``` - -### Max - -Equivalent function: max - -Example: - -```sql -Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) -``` - -Note: The Max function requires interpolation for the reasons described above. - -### Min - -Equivalent function: min - -Example: - -```sql -Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); -``` - -### MinMax - -Equivalent function: max - -```sql -Select max(val) from table_name -``` - -Note: This function has no interpolation requirements, so it can be directly calculated. - -### MimMin - -Equivalent function: min - -```sql -Select min(val) from table_name -``` - -Note: This function has no interpolation requirements, so it can be directly calculated. - -### Percentile - -Equivalent function: percentile - -Remark: - -### Sum - -Equivalent function: sum - -```sql -Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) -``` - -Note: This function has no interpolation requirements, so it can be directly calculated. - -### Zimsum - -Equivalent function: sum - -```sql -Select sum(val) from table_name -``` - -Note: This function has no interpolation requirements, so it can be directly calculated. - -Complete example: - -````json -// OpenTSDB query JSON -query = { -"start": 1510560000, -"end": 1515000009, -"queries": [{ -"aggregator": "count", -"metric": "cpu.usage_user", -}] -} - -// Equivalent query SQL: -SELECT count(*) -FROM `cpu.usage_user` -WHERE ts>=1510560000 AND ts<=1515000009 -```` - -## Appendix 2: Resource Estimation Methodology - -### Data generation environment - -We still use the hypothetical environment from Chapter 4. There are three measurements. Respectively: the data writing rate of temperature and humidity is one record every 5 seconds, and the timeline is 100,000. The writing rate of air pollution is one record every 10 seconds, the timeline is 10,000, and the query request frequency is 500 QPS. - -### Storage resource estimation - -Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. -With additional 20% redundancy, you can calculate the required storage resources: - -```matlab -(n * t * L) * (365 * 1.5) * (1+20%)/C -```` -Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. - -### Storage Device Selection Considerations - -A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. - -Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance. - -### Computational resource estimates - -Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. - -In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. - -### Memory resource estimation - -The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. - -In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough. - -## Appendix 3: Cluster Deployment and Startup - -TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference. - -### Cluster Deployment - -The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats. - -Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters. - -### Set running parameters and start the service - -To ensure that the system can obtain the necessary information for regular operation. Please set the following vital parameters correctly on the server: - -FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" - -Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. - -Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". - -## Appendix 4: Super Table Names - -Since OpenTSDB's metric name has a dot (".") in it, for example, a metric with a name like "cpu.usage_user", the dot has a special meaning in TDengine and is a separator used to separate database and table names. TDengine also provides "escape" characters to allow users to use keywords or special separators (e.g., dots) in (super)table names. To use special characters, enclose the table name in escape characters, e.g.: `cpu.usage_user`. It is a valid (super) table name. - -## Appendix 5: Reference Articles - -1. [Using TDengine + collectd/StatsD + Grafana to quickly build an IT operation and maintenance monitoring system](/application/collectd/) -2. [Write collected data directly to TDengine through collectd](/third-party/collectd/) diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md deleted file mode 100644 index afee13c1377b0b4331d6f7ec20251d1aa2db81a1..0000000000000000000000000000000000000000 --- a/docs-en/27-train-faq/03-docker.md +++ /dev/null @@ -1,285 +0,0 @@ ---- -sidebar_label: TDengine in Docker -title: Deploy TDengine in Docker ---- - -We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . - -In this chapter we introduce a simple step by step guide to use TDengine in Docker. - -## Install Docker - -To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). - -After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. - -```bash -$ docker -v -Docker version 20.10.3, build 48d30b5 -``` - -## Launch TDengine in Docker - -### Launch TDengine Server - -```bash -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd -``` - -In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). - -- **docker run**: Launch a docker container -- **-d**: the container will run in background mode -- **-p**: port mapping -- **tdengine/tdengine**: The image from which to launch the container -- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**: the container ID if successfully launched. - -Furthermore, `--name` can be used with `docker run` to specify name for the container, `--hostname` can be used to specify hostname for the container, `-v` can be used to mount local volumes to the container so that the data generated inside the container can be persisted to disk on the host. - -```bash -docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -- **--name tdengine**: specify the name of the container, the name can be used to specify the container later -- **--hostname=tdengine-server**: specify the hostname inside the container, the hostname can be used inside the container without worrying the container IP may vary -- **-v**: volume mapping between host and container - -### Check the container - -```bash -docker ps -``` - -The output is like below: - -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS ··· -c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· -``` - -- **docker ps**: List all the containers -- **CONTAINER ID**: Container ID -- **IMAGE**: The image used for the container -- **COMMAND**: The command used when launching the container -- **CREATED**: When the container was created -- **STATUS**: Status of the container - -### Access TDengine inside container - -```bash -$ docker exec -it tdengine /bin/bash -root@tdengine-server:~/TDengine-server-2.4.0.4# -``` - -- **docker exec**: Attach to the container -- **-i**: Interactive mode -- **-t**: Use terminal -- **tdengine**: Container name, up to the output of `docker ps` -- **/bin/bash**: The command to execute once the container is attached - -Inside the container, start TDengine CLI `taos` - -```bash -root@tdengine-server:~/TDengine-server-2.4.0.4# taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -The above example is for a successful connection. If `taos` fails to connect to the server side, error information would be shown. - -In TDengine CLI, SQL commands can be executed to create/drop databases, tables, STables, and insert or query data. For details please refer to [TAOS SQL](/taos-sql/). - -### Access TDengine from host - -If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. - -``` -$ taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -It's also able to access the REST interface provided by TDengine in container from the host. - -``` -curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql -``` - -Output is like below: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} -``` - -For details of REST API please refer to [REST API](/reference/rest-api/). - -### Run TDengine server and taosAdapter inside container - -From version 2.4.0.0, in the TDengine Docker image, `taosAdapter` is enabled by default, but can be disabled using environment variable `TAOS_DISABLE_ADAPTER=true` . `taosAdapter` can also be run alone without `taosd` when launching a container. - -For the port mapping of `taosAdapter`, please refer to [taosAdapter](/reference/taosadapter/). - -- Run both `taosd` and `taosAdapter` (by default) in docker container: - -```bash -docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 -``` - -- Run `taosAdapter` only in docker container, `TAOS_FIRST_EP` environment variable needs to be used to specify the container name in which `taosd` is running: - -```bash -docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter -``` - -- Run `taosd` only in docker container: - -```bash -docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 -``` - -- Verify the REST interface: - -```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql -``` - -Below is an example output: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} -``` - -### Use taosBenchmark on host to access TDengine server in container - -1. Run `taosBenchmark`, named as `taosdemo` previously, on the host: - - ```bash - $ taosBenchmark - - taosBenchmark is simulating data generated by power equipments monitoring... - - host: 127.0.0.1:6030 - user: root - password: taosdata - configDir: - resultFile: ./output.txt - thread num of insert data: 10 - thread num of create table: 10 - top insert interval: 0 - number of records per req: 30000 - max sql length: 1048576 - database count: 1 - database[0]: - database[0] name: test - drop: yes - replica: 1 - precision: ms - super table count: 1 - super table[0]: - stbName: meters - autoCreateTable: no - childTblExists: no - childTblCount: 10000 - childTblPrefix: d - dataSource: rand - iface: taosc - insertRows: 10000 - interlaceRows: 0 - disorderRange: 1000 - disorderRatio: 0 - maxSqlLen: 1048576 - timeStampStep: 1 - startTimestamp: 2017-07-14 10:40:00.000 - sampleFormat: - sampleFile: - tagsFile: - columnCount: 3 - column[0]:FLOAT column[1]:INT column[2]:FLOAT - tagCount: 2 - tag[0]:INT tag[1]:BINARY(16) - - Press enter key to continue or Ctrl-C to stop - ``` - - Once the execution is finished, a database `test` is created, a STable `meters` is created in database `test`, 10,000 sub tables are created using `meters` as template, named as "d0" to "d9999", while 10,000 rows are inserted into each table, so totally 100,000,000 rows are inserted. - -2. Check the data - - - **Check database** - - ```bash - $ taos> show databases; - name | created_time | ntables | vgroups | ··· - test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· - log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· - - ``` - - - **Check STable** - - ```bash - $ taos> use test; - Database changed. - - $ taos> show stables; - name | created_time | columns | tags | tables | - ============================================================================================ - meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | - Query OK, 1 row(s) in set (0.003259s) - - ``` - - - **Check Tables** - - ```bash - $ taos> select * from test.t0 limit 10; - - DB error: Table does not exist (0.002857s) - taos> select * from test.d0 limit 10; - ts | current | voltage | phase | - ====================================================================================== - 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | - 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | - 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | - 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | - 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | - 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | - 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | - 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | - 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | - 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | - Query OK, 10 row(s) in set (0.016791s) - - ``` - - - **Check tag values of table d0** - - ```bash - $ taos> select groupid, location from test.d0; - groupid | location | - ================================= - 0 | California.SanDiego | - Query OK, 1 row(s) in set (0.003490s) - ``` - -### Access TDengine from 3rd party tools - -A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). - -There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. - -## Stop TDengine inside container - -```bash -docker stop tdengine -``` - -- **docker stop**: stop a container -- **tdengine**: container name diff --git a/docs-en/30-release/01-2.6.md b/docs-en/30-release/01-2.6.md deleted file mode 100644 index 85b76d9999e211336b5859beab3fdfc7988f4fda..0000000000000000000000000000000000000000 --- a/docs-en/30-release/01-2.6.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 2.6 ---- - -[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4) - -[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1) - -[2.6.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.0) diff --git a/docs-en/30-release/02-2.4.md b/docs-en/30-release/02-2.4.md deleted file mode 100644 index 62580b327a3bd5098e1b7f1162a1c398ac2a5eff..0000000000000000000000000000000000000000 --- a/docs-en/30-release/02-2.4.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 2.4 ---- - -[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) - -[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) - -[2.4.0.24](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.24) - -[2.4.0.20](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.20) - -[2.4.0.18](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.18) - -[2.4.0.16](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.16) - -[2.4.0.14](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.14) - -[2.4.0.12](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.12) - -[2.4.0.10](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.10) - -[2.4.0.7](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.7) - -[2.4.0.5](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.5) - -[2.4.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.4) - -[2.4.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.0) diff --git a/docs-examples/.gitignore b/docs-examples/.gitignore deleted file mode 100644 index 7ed6d403bf5f64c0cb230265b4dffee609dea93b..0000000000000000000000000000000000000000 --- a/docs-examples/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.vscode -*.lock -.idea \ No newline at end of file diff --git a/docs-examples/.gitignre b/docs-examples/.gitignre deleted file mode 100644 index 0853156c65c2c6c1b693290e74c3ee630bcaac19..0000000000000000000000000000000000000000 --- a/docs-examples/.gitignre +++ /dev/null @@ -1,2 +0,0 @@ -.vscode -*.lock \ No newline at end of file diff --git a/docs-examples/go/go.mod b/docs-examples/go/go.mod deleted file mode 100644 index 5945e395e93b373d47fe71f3584c37fed9526638..0000000000000000000000000000000000000000 --- a/docs-examples/go/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module goexample - -go 1.17 - -require github.com/taosdata/driver-go/v2 develop - diff --git a/docs-examples/java/pom.xml b/docs-examples/java/pom.xml deleted file mode 100644 index a48ba398da92f401235819d067aa2ba6f8b173ea..0000000000000000000000000000000000000000 --- a/docs-examples/java/pom.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - 4.0.0 - - com.taos - javaexample - 1.0 - - JavaExample - - - UTF-8 - 1.8 - 1.8 - - - - - - com.taosdata.jdbc - taos-jdbcdriver - 2.0.38 - - - - junit - junit - 4.13.1 - test - - - - diff --git a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java deleted file mode 100644 index 990922b7a516bd32a7e299f5743bd1b5e321868a..0000000000000000000000000000000000000000 --- a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.taos.example; - -import com.taosdata.jdbc.SchemalessWriter; -import com.taosdata.jdbc.enums.SchemalessProtocolType; -import com.taosdata.jdbc.enums.SchemalessTimestampType; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; - -public class LineProtocolExample { - // format: measurement,tag_set field_set timestamp - private static String[] lines = { - "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro - // seconds - "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", - }; - - private static Connection getConnection() throws SQLException { - String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; - return DriverManager.getConnection(jdbcUrl); - } - - private static void createDatabase(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - // the default precision is ms (microsecond), but we use us(microsecond) here. - stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'"); - stmt.execute("USE test"); - } - } - - public static void main(String[] args) throws SQLException { - try (Connection conn = getConnection()) { - createDatabase(conn); - SchemalessWriter writer = new SchemalessWriter(conn); - writer.write(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.MICRO_SECONDS); - } - } -} diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py deleted file mode 100644 index 56942ef57085766cd128b03cabb7a357587eab16..0000000000000000000000000000000000000000 --- a/docs-examples/python/conn_native_pandas.py +++ /dev/null @@ -1,19 +0,0 @@ -import pandas -from sqlalchemy import create_engine - -engine = create_engine("taos://root:taosdata@localhost:6030/power") -df = pandas.read_sql("SELECT * FROM meters", engine) - -# print index -print(df.index) -# print data type of element in ts column -print(type(df.ts[0])) -print(df.head(3)) - -# output: -# RangeIndex(start=0, stop=8, step=1) -# -# ts current ... location groupid -# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 -# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 -# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs/en/01-index.md b/docs/en/01-index.md new file mode 100644 index 0000000000000000000000000000000000000000..1f2f88d47d8d20e55c6a495f571bd0d11a600d74 --- /dev/null +++ b/docs/en/01-index.md @@ -0,0 +1,27 @@ +--- +title: TDengine Documentation +sidebar_label: Documentation Home +slug: / +--- + +TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. + +To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro). If you want to get some basics about time-series databases, please check [here](https://tdengine.com/tsdb). + +TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly. + +If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. + +We live in the era of big data, and scale-up is unable to meet the growing business needs. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster"](./cluster). + +TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. + +If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section. + +If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. + +If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. + +TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. + +Together, we make a difference. diff --git a/docs-en/02-intro/_category_.yml b/docs/en/02-intro/_category_.yml similarity index 100% rename from docs-en/02-intro/_category_.yml rename to docs/en/02-intro/_category_.yml diff --git a/docs-cn/eco_system.webp b/docs/en/02-intro/eco_system.webp similarity index 100% rename from docs-cn/eco_system.webp rename to docs/en/02-intro/eco_system.webp diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md new file mode 100644 index 0000000000000000000000000000000000000000..1dc27ae0a06dc94a6dbadec914041353811a9c5f --- /dev/null +++ b/docs/en/02-intro/index.md @@ -0,0 +1,116 @@ +--- +title: Introduction +toc_max_heading_level: 2 +--- + +TDengine is a high-performance, scalable [time-series database](https://tdengine.com/tsdb) with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/continuous-query), [data subscription](../develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. + +This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. + +## Major Features + +The major features are listed below: + +1. While TDengine supports [using SQL to insert](../develop/insert-data/sql-writing), it also supports [Schemaless writing](../reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others. +2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code. +3. Support for [all kinds of queries](../develop/query-data), including aggregation, nested query, downsampling, interpolation and others. +4. Support for [user defined functions](../develop/udf). +5. Support for [caching](../develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. +6. Support for [continuous query](../develop/continuous-query). +7. Support for [data subscription](../develop/subscribe) with the capability to specify filter conditions. +8. Support for [cluster](../cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. +9. Provides an interactive [command-line interface](../reference/taos-shell) for management, maintenance and ad-hoc queries. +10. Provides many ways to [import](/operation/import) and [export](../operation/export) data. +11. Provides [monitoring](../operation/monitor) on running instances of TDengine. +12. Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages. +13. Provides a [REST API](../reference/rest-api/). +14. Supports seamless integration with [Grafana](../third-party/grafana) for visualization. +15. Supports seamless integration with Google Data Studio. + +For more details on features, please read through the entire documentation. + +## Competitive Advantages + +Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages. + +- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs. + +- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. + +- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion. + +- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain. + +- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated. + +- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools. + +- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs. + +- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming. + +With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced. + +## Technical Ecosystem +This is how TDengine would be situated, in a typical time-series data processing platform: + +![TDengine Database Technical Ecosystem ](eco_system.webp) + +
Figure 1. TDengine Technical Ecosystem
+ +On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance. + +## Typical Use Cases + +As a high-performance, scalable and SQL supported time-series database, TDengine's typical use case include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data. As such, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM and so on. More generally TDengine is not a suitable storage engine for non-time-series data. This section makes a more detailed analysis of the applicable scenarios. + +### Characteristics and Requirements of Data Sources + +| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- | +| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.| +| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. | +| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. | + +### System Architecture Requirements + +| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | +| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. | +| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. | +| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. | + +### System Function Requirements + +| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | +| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.| +| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. | + +### System Performance Requirements + +| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | +| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. | +| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.| +| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. | + +### System Maintenance Requirements + +| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | +| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. | +| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| +| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.| + +## Comparison with other databases + +- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) +- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) +- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf) +- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html) +- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html) +- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html) + + +If you want to learn some basics about time-series databases, please check [here](https://tdengine.com/tsdb). diff --git a/docs-en/04-concept/_category_.yml b/docs/en/04-concept/_category_.yml similarity index 100% rename from docs-en/04-concept/_category_.yml rename to docs/en/04-concept/_category_.yml diff --git a/docs-en/04-concept/index.md b/docs/en/04-concept/index.md similarity index 100% rename from docs-en/04-concept/index.md rename to docs/en/04-concept/index.md diff --git a/docs-en/05-get-started/_apt_get_install.mdx b/docs/en/05-get-started/_apt_get_install.mdx similarity index 100% rename from docs-en/05-get-started/_apt_get_install.mdx rename to docs/en/05-get-started/_apt_get_install.mdx diff --git a/docs-en/05-get-started/_category_.yml b/docs/en/05-get-started/_category_.yml similarity index 100% rename from docs-en/05-get-started/_category_.yml rename to docs/en/05-get-started/_category_.yml diff --git a/docs/en/05-get-started/_pkg_install.mdx b/docs/en/05-get-started/_pkg_install.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d514d6cd22b94cbe3da8e833d9f5f9f24da733f --- /dev/null +++ b/docs/en/05-get-started/_pkg_install.mdx @@ -0,0 +1,15 @@ +import PkgList from "/components/PkgList"; + +It's very easy to install TDengine and would take you only a few minutes from downloading to finishing installation. + +For the convenience of users, from version 2.4.0.10, the standard server side installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark` and sample code. If only the `taosd` server and C/C++ connector are required, you can also choose to download the lite package. + +Three kinds of packages are provided, tar.gz, rpm and deb. Especially the tar.gz package is provided for the convenience of enterprise customers on different kinds of operating systems, it includes `taosdump` and TDinsight installation script which are normally only provided in taos-tools rpm and deb packages. + +Between two major release versions, some beta versions may be delivered for users to try some new features. + + + +For the details please refer to [Install and Uninstall](../13-operation/01-pkg-install.md). + +To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md new file mode 100644 index 0000000000000000000000000000000000000000..0450d132ddb56e16f2f521887637b3e2096da7dd --- /dev/null +++ b/docs/en/05-get-started/index.md @@ -0,0 +1,171 @@ +--- +title: Get Started +description: 'Install TDengine from Docker image, apt-get or package, and run TDengine CLI and taosBenchmark to experience the features' +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import PkgInstall from "./\_pkg_install.mdx"; +import AptGetInstall from "./\_apt_get_install.mdx"; + +## Quick Install + +The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../14-reference/02-rest-api/02-rest-api.mdx) through [taosAdapter](../14-reference/04-taosadapter.md). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. + +TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future. + + + +If docker is already installed on your computer, execute the following command: + +```shell +docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +``` + +Make sure the container is running + +```shell +docker ps +``` + +Enter into container and execute bash + +```shell +docker exec -it bash +``` + +Then you can execute the Linux commands and access TDengine. + +For detailed steps, please visit [Experience TDengine via Docker](../27-train-faq/03-docker.md). + +:::info +Starting from 2.4.0.10,besides taosd,TDengine docker image includes: taos,taosAdapter,taosdump,taosBenchmark,TDinsight, scripts and sample code. Once the TDengine container is started,it will start both taosAdapter and taosd automatically to support RESTful interface. + +::: + + + + + + + + + + +If you like to check the source code, build the package by yourself or contribute to the project, please check [TDengine GitHub Repository](https://github.com/taosdata/TDengine) + + + + +## Quick Launch + +After installation, you can launch the TDengine service by the 'systemctl' command to start 'taosd'. + +```bash +systemctl start taosd +``` + +Check if taosd is running: + +```bash +systemctl status taosd +``` + +If everything is fine, you can run TDengine command-line interface `taos` to access TDengine and test it out yourself. + +:::info + +- systemctl requires _root_ privileges,if you are not _root_ ,please add sudo before the command. +- To get feedback and keep improving the product, TDengine is collecting some basic usage information, but you can turn it off by setting telemetryReporting to 0 in configuration file taos.cfg. +- TDengine uses FQDN (usually hostname)as the ID for a node. To make the system work, you need to configure the FQDN for the server running taosd, and configure the DNS service or hosts file on the the machine where the application or TDengine CLI runs to ensure that the FQDN can be resolved. +- `systemctl stop taosd` won't stop the server right away, it will wait until all the data in memory are flushed to disk. It may takes time depending on the cache size. + +TDengine supports the installation on system which runs [`systemd`](https://en.wikipedia.org/wiki/Systemd) for process management,use `which systemctl` to check if the system has `systemd` installed: + +```bash +which systemctl +``` + +If the system does not have `systemd`,you can start TDengine manually by executing `/usr/local/taos/bin/taosd` + +:::note + +## Command Line Interface + +To manage the TDengine running instance,or execute ad-hoc queries, TDengine provides a Command Line Interface (hereinafter referred to as TDengine CLI) taos. To enter into the interactive CLI,execute `taos` on a Linux terminal where TDengine is installed. + +```bash +taos +``` + +If it connects to the TDengine server successfully, it will print out the version and welcome message. If it fails, it will print out the error message, please check [FAQ](../27-train-faq/01-faq.md) for trouble shooting connection issue. TDengine CLI's prompt is: + +```cmd +taos> +``` + +Inside TDengine CLI,you can execute SQL commands to create/drop database/table, and run queries. The SQL command must be ended with a semicolon. For example: + +```sql +create database demo; +use demo; +create table t (ts timestamp, speed int); +insert into t values ('2019-07-15 00:00:00', 10); +insert into t values ('2019-07-15 01:00:00', 20); +select * from t; + ts | speed | +======================================== + 2019-07-15 00:00:00.000 | 10 | + 2019-07-15 01:00:00.000 | 20 | +Query OK, 2 row(s) in set (0.003128s) +``` + +Besides executing SQL commands, system administrators can check running status, add/drop user accounts and manage the running instances. TDengine CLI with client driver can be installed and run on either Linux or Windows machines. For more details on CLI, please [check here](../14-reference/08-taos-shell.md). + +## Experience the blazing fast speed + +After TDengine server is running,execute `taosBenchmark` (previously named taosdemo) from a Linux terminal: + +```bash +taosBenchmark +``` + +This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". + +This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. + +taosBenchmark provides command-line options and a configuration file to customize the scenarios, like number of tables, number of rows per table, number of columns and more. Please execute `taosBenchmark --help` to list them. For details on running taosBenchmark, please check [reference for taosBenchmark](../14-reference/05-taosbenchmark.md) + +## Experience query speed + +After using taosBenchmark to insert a number of rows data, you can execute queries from TDengine CLI to experience the lightning fast query speed. + +query the total number of rows under super table "meters": + +```sql +taos> select count(*) from test.meters; +``` + +query the average, maximum, minimum of 100 million rows: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.meters; +``` + +query the total number of rows with location="California.SanFrancisco": + +```sql +taos> select count(*) from test.meters where location="California.SanFrancisco"; +``` + +query the average, maximum, minimum of all rows with groupId=10: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; +``` + +query the average, maximum, minimum for table d10 in 10 seconds time interval: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); +``` diff --git a/docs-en/07-develop/01-connect/_category_.yml b/docs/en/07-develop/01-connect/_category_.yml similarity index 100% rename from docs-en/07-develop/01-connect/_category_.yml rename to docs/en/07-develop/01-connect/_category_.yml diff --git a/docs/en/07-develop/01-connect/_connect_c.mdx b/docs/en/07-develop/01-connect/_connect_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4d13d80e085956a7ceccdc404b7106620b22c25e --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_c.mdx @@ -0,0 +1,3 @@ +```c title="Native Connection" +{{#include docs/examples/c/connect_example.c}} +``` diff --git a/docs/en/07-develop/01-connect/_connect_cs.mdx b/docs/en/07-develop/01-connect/_connect_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f8d8e519fde7fc6d0954bbfe865155221c0b0595 --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_cs.mdx @@ -0,0 +1,8 @@ +```csharp title="Native Connection" +{{#include docs/examples/csharp/ConnectExample.cs}} +``` + +:::info +C# connector supports only native connection for now. + +::: diff --git a/docs/en/07-develop/01-connect/_connect_go.mdx b/docs/en/07-develop/01-connect/_connect_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6f742ea0bcf027de6c97132167d4de65e2cbee8a --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_go.mdx @@ -0,0 +1,17 @@ +#### Unified Database Access Interface + +```go title="Native Connection" +{{#include docs/examples/go/connect/cgoexample/main.go}} +``` + +```go title="REST Connection" +{{#include docs/examples/go/connect/restexample/main.go}} +``` + +#### Advanced Features + +The af package of driver-go can also be used to establish connection, with this way some advanced features of TDengine, like parameter binding and subscription, can be used. + +```go title="Establish native connection using af package" +{{#include docs/examples/go/connect/afconn/main.go}} +``` diff --git a/docs/en/07-develop/01-connect/_connect_java.mdx b/docs/en/07-develop/01-connect/_connect_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..880d2aa3e489566203fa0f4b8379feb653a98f73 --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_java.mdx @@ -0,0 +1,15 @@ +```java title="Native Connection" +{{#include docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java}} +``` + +```java title="REST Connection" +{{#include docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java:main}} +``` + +When using REST connection, the feature of bulk pulling can be enabled if the size of resulting data set is huge. + +```java title="Enable Bulk Pulling" {4} +{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} +``` + +More configuration about connection,please refer to [Java Connector](/reference/connector/java) diff --git a/docs/en/07-develop/01-connect/_connect_node.mdx b/docs/en/07-develop/01-connect/_connect_node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..943677b36be22f73c970d5b1f4228ff757b0a62e --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_node.mdx @@ -0,0 +1,7 @@ +```js title="Native Connection" +{{#include docs/examples/node/nativeexample/connect.js}} +``` + +```js title="REST Connection" +{{#include docs/examples/node/restexample/connect.js}} +``` diff --git a/docs/en/07-develop/01-connect/_connect_python.mdx b/docs/en/07-develop/01-connect/_connect_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..60b454d52f3977d1feac9e745da984db83a38668 --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_python.mdx @@ -0,0 +1,3 @@ +```python title="Native Connection" +{{#include docs/examples/python/connect_example.py}} +``` diff --git a/docs/en/07-develop/01-connect/_connect_r.mdx b/docs/en/07-develop/01-connect/_connect_r.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e2d7f631d2c467937589bd00271a7decd036506d --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_r.mdx @@ -0,0 +1,3 @@ +```r title="Native Connection" +{{#include docs/examples/R/connect_native.r:demo}} +``` diff --git a/docs/en/07-develop/01-connect/_connect_rust.mdx b/docs/en/07-develop/01-connect/_connect_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..80ac1f4ff4a8174acc4c2f6af11b31f027ece602 --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_rust.mdx @@ -0,0 +1,8 @@ +```rust title="Native Connection/REST Connection" +{{#include docs/examples/rust/nativeexample/examples/connect.rs}} +``` + +:::note +For Rust connector, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. + +::: diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md new file mode 100644 index 0000000000000000000000000000000000000000..df793f6d3f35cb8d3a9e25f909464c724a2a05c0 --- /dev/null +++ b/docs/en/07-develop/01-connect/index.md @@ -0,0 +1,276 @@ +--- +sidebar_label: Connect +title: Connect +description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import ConnJava from "./\_connect_java.mdx"; +import ConnGo from "./\_connect_go.mdx"; +import ConnRust from "./\_connect_rust.mdx"; +import ConnNode from "./\_connect_node.mdx"; +import ConnPythonNative from "./\_connect_python.mdx"; +import ConnCSNative from "./\_connect_cs.mdx"; +import ConnC from "./\_connect_c.mdx"; +import ConnR from "./\_connect_r.mdx"; +import InstallOnWindows from "../../14-reference/03-connector/\_linux_install.mdx"; +import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.mdx"; +import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; +import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; + +Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. TDengine community also provides connectors in LUA and PHP languages. For details about the connectors, please refer to [Connectors](/reference/connector/). + +## Establish Connection + +There are two ways for a connector to establish connections to TDengine: + +1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter. +2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter. + +Key differences: + +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. +2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions. +3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. + +## Install Client Driver taosc + +If you are choosing to use the native connection and the the application is not on the same host as TDengine server, the TDengine client driver taosc needs to be installed on the application host. If choosing to use the REST connection or the application is on the same host as TDengine server, this step can be skipped. It's better to use same version of taosc as the TDengine server. + +### Install + + + + + + + + + + +### Verify + +After the above installation and configuration are done and making sure TDengine service is already started and in service, the TDengine command-line interface `taos` can be launched to access TDengine. + + + + + + + + + + +## Install Connectors + + + + +If `maven` is used to manage the projects, what needs to be done is only adding below dependency in `pom.xml`. + +```xml + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.38 + +``` + + + + +Install from PyPI using `pip`: + +``` +pip install taospy +``` + +Install from Git URL: + +``` +pip install git+https://github.com/taosdata/taos-connector-python.git +``` + + + + +Just need to add `driver-go` dependency in `go.mod` . + +```go-mod title=go.mod +{{#include docs/examples/go/go.mod}} +``` + +:::note +`driver-go` uses `cgo` to wrap the APIs provided by taosc, while `cgo` needs `gcc` to compile source code in C language, so please make sure you have proper `gcc` on your system. + +::: + + + + +Just need to add `libtaos` dependency in `Cargo.toml`. + +```toml title=Cargo.toml +[dependencies] +libtaos = { version = "0.4.2"} +``` + +:::info +Rust connector uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. + +```toml +libtaos = { version = "*", features = ["rest"] } +``` + +::: + + + + +Node.js connector provides different ways of establishing connections by providing different packages. + +1. Install Node.js Native Connector + +``` +npm i td2.0-connector +``` + +:::note +It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`. +::: + +2. Install Node.js REST Connector + +``` +npm i td2.0-rest-connector +``` + + + + +Just need to add the reference to [TDengine.Connector](https://www.nuget.org/packages/TDengine.Connector/) in the project configuration file. + +```xml title=csharp.csproj {12} + + + + Exe + net6.0 + enable + enable + TDengineExample.AsyncQueryExample + + + + + + + +``` + +Or add by `dotnet` command. + +``` +dotnet add package TDengine.Connector +``` + +:::note +The sample code below are based on dotnet6.0, they may need to be adjusted if your dotnet version is not exactly same. + +::: + + + + +1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/). +2. Install the dependency package `RJDBC`: + +```R +install.packages("RJDBC") +``` + + + + +If the client driver (taosc) is already installed, then the C connector is already available. +
+ +
+ + +**Download Source Code Package and Unzip:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). + +**Non-Swoole Environment:** + +```shell +phpize && ./configure && make -j && make install +``` + +**Specify TDengine Location:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` is followed by the TDengine installation location. +> This way is useful in case TDengine location can't be found automatically or macOS. + +**Swoole Environment:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**Enable The Extension:** + +Option One: Add `extension=tdengine` in `php.ini` + +Option Two: Specify the extension on CLI `php -d extension=tdengine test.php` + + +
+ +## Establish Connection + +Prior to establishing connection, please make sure TDengine is already running and accessible. The following sample code assumes TDengine is running on the same host as the client program, with FQDN configured to "localhost" and serverPort configured to "6030". + + + + + + + + + + + + + + + + + + + + + + + + + + + + +:::tip +If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq). + +::: diff --git a/docs-en/07-develop/02-model/_category_.yml b/docs/en/07-develop/02-model/_category_.yml similarity index 100% rename from docs-en/07-develop/02-model/_category_.yml rename to docs/en/07-develop/02-model/_category_.yml diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e0378cc77ca28a1a82ef6a52fa1f74d6cd580a01 --- /dev/null +++ b/docs/en/07-develop/02-model/index.mdx @@ -0,0 +1,93 @@ +--- +title: Data Model +--- + +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. + +## Create Database + +The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. + +```sql +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; +``` + +In the above SQL statement: +- a database named "power" will be created +- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically +- a new data file will be created every 10 days +- the number of memory blocks is 6 +- data is allowed to be updated + +For more details please refer to [Database](/taos-sql/database). + +After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. + +```sql +USE power; +``` + +:::note + +- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. +- JOIN operations can't be performed on tables from two different databases. +- Timestamp needs to be specified when inserting rows or querying historical rows. + +::: + +## Create STable + +In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/concept/#model_table1), the SQL statement below can be used to create the super table. + +```sql +CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); +``` + +:::note +If you are using versions prior to 2.0.15, the `STable` keyword needs to be replaced with `TABLE`. + +::: + +Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. + +For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. + +At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. + +## Create Table + +A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. + +```sql +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); +``` + +In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. + +In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. + +:::warning +It's not recommended to create a table in a database while using a STable from another database as template. + +:::tip +It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. + +## Create Table Automatically + +In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. + +```sql +INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); +``` + +In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`. + +For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). + +## Single Column vs Multiple Column + +A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. + +However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. + +It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d8c4453f409dfaf1db1ec154e9ba35f8db74862e --- /dev/null +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -0,0 +1,130 @@ +--- +sidebar_label: Insert Using SQL +title: Insert Using SQL +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import JavaSQL from "./_java_sql.mdx"; +import JavaStmt from "./_java_stmt.mdx"; +import PySQL from "./_py_sql.mdx"; +import PyStmt from "./_py_stmt.mdx"; +import GoSQL from "./_go_sql.mdx"; +import GoStmt from "./_go_stmt.mdx"; +import RustSQL from "./_rust_sql.mdx"; +import RustStmt from "./_rust_stmt.mdx"; +import NodeSQL from "./_js_sql.mdx"; +import NodeStmt from "./_js_stmt.mdx"; +import CsSQL from "./_cs_sql.mdx"; +import CsStmt from "./_cs_stmt.mdx"; +import CSQL from "./_c_sql.mdx"; +import CStmt from "./_c_stmt.mdx"; + +## Introduction + +Application programs can execute `INSERT` statement through connectors to insert rows. The TDengine CLI can also be used to manually insert data. + +### Insert Single Row + +The below SQL statement is used to insert one row into table "d1001". + +```sql +INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); +``` + +### Insert Multiple Rows + +Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". + +```sql +INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); +``` + +### Insert into Multiple Tables + +Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". + +```sql +INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); +``` + +For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). + +:::info + +- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. +- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. + +::: + +:::warning + +- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. +- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. + +::: + +## Examples + +### Insert Using SQL + + + + + + + + + + + + + + + + + + + + + + + + + +:::note + +1. With either native connection or REST connection, the above samples can work well. +2. Please note that `use db` can't be used with a REST connection because REST connections are stateless, so in the samples `dbName.tbName` is used to specify the table name. + +::: + +### Insert with Parameter Binding + +TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. + +Parameter binding is available only with native connection. + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx similarity index 100% rename from docs-en/07-develop/03-insert-data/02-influxdb-line.mdx rename to docs/en/07-develop/03-insert-data/02-influxdb-line.mdx diff --git a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx similarity index 100% rename from docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx rename to docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx diff --git a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx similarity index 100% rename from docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx rename to docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md new file mode 100644 index 0000000000000000000000000000000000000000..1a4813f74e680905206b5bdd8fe37cd4eca2b0be --- /dev/null +++ b/docs/en/07-develop/03-insert-data/05-high-volume.md @@ -0,0 +1,444 @@ +--- +sidebar_label: High Performance Writing +title: High Performance Writing +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +This chapter introduces how to write data into TDengine with high throughput. + +## How to achieve high performance data writing + +To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing. + +### Application Program + +From the perspective of application program, you need to consider: + +1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480. + +2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade. + +3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch. + +4. Data Writing Protocol. + - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL. + - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it + - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema + +Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput. + +### Data Source + +Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine. + +1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write. +2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer. +3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency. + +If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing: + +1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch +2. Subscribe multiple topics to accumulate data together. +3. Add more consumers to gain more concurrency and throughput. +4. Incrase the size of single fetch to increase the size of write batch. + +### Tune TDengine + +TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance. + +1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value. +2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance. + +For more performance tuning tips, please refer to [Performance Optimization](../../../operation/optimize) and [Configuration Parameters](../../../reference/config). + +## Sample Programs + +This section will introduce the sample programs to demonstrate how to write into TDengine with high performance. + +### Scenario + +Below are the scenario for the sample programs of high performance wrting. + +- Application program reads data from data source, the sample program simulates a data source by generating data +- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size. +- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread. +- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold. + +![Thread Model of High Performance Writing into TDengine](highvolume.webp) + +### Sample Programs + +The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter. + +The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically. + + + + +**Program Inventory** + +| Class | Description | +| ---------------- | ----------------------------------------------------------------------------------------------------- | +| FastWriteExample | Main Program | +| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name | +| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine | +| MockDataSource | Generate data for some sub tables of super table meters | +| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data | +| StmtWriter | Write in Parameter binding mode (Not finished yet) | +| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds | + +Below is the list of complete code of the classes in above table and more detailed description. + +
+FastWriteExample +The main Program is responsible for: + +1. Create message queues +2. Start writing threads +3. Start reading threads +4. Otuput writing speed every 10 seconds + +The main program provides 4 parameters for tuning: + +1. The number of reading threads, default value is 1 +2. The number of writing threads, default alue is 2 +3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables. +4. The batch size of single write, default value is 3,000 + +The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}} +``` + +
+ +
+ReadTask + +ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source. + +ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}} +``` + +
+ +
+WriteTask + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}} +``` + +
+ +
+ +MockDataSource + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}} +``` + +
+ +
+ +SQLWriter + +SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}} +``` + +
+ +
+ +DataBaseMonitor + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}} +``` + +
+ +**Steps to Launch** + +
+Launch Java Sample Program + +You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below: + +``` +TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" +``` + +**Launch in IDE** + +1. Clone TDengine repolitory + ``` + git clone git@github.com:taosdata/TDengine.git --depth 1 + ``` +2. Use IDE to open `docs/examples/java` directory +3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step. +4. Run class `com.taos.example.highvolume.FastWriteExample` + +**Launch on server** + +If you want to launch the sample program on a remote server, please follow below steps: + +1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` : + ``` + mvn package + ``` +2. Create `examples/java` directory on the server + ``` + mkdir -p examples/java + ``` +3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host) + - Copy dependent packages + ``` + scp -r .\target\lib @:~/examples/java + ``` + - Copy the jar of sample programs + ``` + scp -r .\target\javaexample-1.0.jar @:~/examples/java + ``` +4. Configure environment variable + Edit `~/.bash_profile` or `~/.bashrc` and add below: + + ``` + export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" + ``` + + If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment. + +5. Launch the sample program + + ``` + java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample + ``` + +6. The sample program doesn't exit unless you press CTRL + C to terminate it. + Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk. + + ``` + root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12 + 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000 + 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444 + 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521 + 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394 + 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933 + 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696 + 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729 + 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521 + 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788 + 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950 + ``` + +
+ +
+ + +**Program Inventory** + +Sample programs in Python uses multi-process and cross-process message queues. + +| Function/CLass | Description | +| ---------------------------- | --------------------------------------------------------------------------- | +| main Function | Program entry point, create child processes and message queues | +| run_monitor_process Function | Create database, super table, calculate writing speed and output to console | +| run_read_task Function | Read data and distribute to message queues | +| MockDataSource Class | Simulate data source, return next 1,000 rows of each table | +| run_write_task Function | Read as much as possible data from message queue and write in batch | +| SQLWriter Class | Write in SQL and create table utomatically | +| StmtWriter Class | Write in parameter binding mode (not finished yet) | + +
+main function + +`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes: + +1. Monitoring process, initializes database and calculating writing speed +2. Reading process (n), reads data from data source +3. Writing process (m), wirtes data into TDengine + +`main` function provides 5 parameters: + +1. The number of reading tasks, default value is 1 +2. The number of writing tasks, default value is 1 +3. The number of tables, default value is 1,000 +4. The capacity of message queue, default value is 1,000,000 bytes +5. The batch size in single write, default value is 3000 + +```python +{{#include docs/examples/python/fast_write_example.py:main}} +``` + +
+ +
+run_monitor_process + +Monitoring process initilizes database and monitoring writing speed. + +```python +{{#include docs/examples/python/fast_write_example.py:monitor}} +``` + +
+ +
+ +run_read_task function + +Reading process reads data from other data system and distributes to the message queue allocated for it. + +```python +{{#include docs/examples/python/fast_write_example.py:read}} +``` + +
+ +
+ +MockDataSource + +Below is the simulated data source, we assume table name exists in each generated data. + +```python +{{#include docs/examples/python/mockdatasource.py}} +``` + +
+ +
+run_write_task function + +Writing process tries to read as much as possible data from message queue and writes in batch. + +```python +{{#include docs/examples/python/fast_write_example.py:write}} +``` + +
+ +
+ +SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly. + +SQLWriter + +```python +{{#include docs/examples/python/sql_writer.py}} +``` + +
+ +**Steps to Launch** + +
+ +Launch Sample Program in Python + +1. Prerequisities + + - TDengine client driver has been installed + - Python3 has been installed, the the version >= 3.8 + - TDengine Python connector `taospy` has been installed + +2. Install faster-fifo to replace python builtin multiprocessing.Queue + + ``` + pip3 install faster-fifo + ``` + +3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`. + +4. Execute the program + + ``` + python3 fast_write_example.py + ``` + + Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk. + + ``` + root@vm85$ python3 fast_write_example.py 8 8 + 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000 + 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347 + 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348 + 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349 + 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350 + 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351 + 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352 + 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353 + 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354 + 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355 + 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356 + 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357 + 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358 + 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359 + 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361 + 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364 + 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365 + 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0 + 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0 + 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0 + 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0 + 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0 + 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0 + 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0 + 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0 + 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0 + 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0 + 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0 + 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0 + 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0 + 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0 + 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0 + ``` + +
+ +:::note +Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. + +::: + +
+
diff --git a/docs/en/07-develop/03-insert-data/_c_line.mdx b/docs/en/07-develop/03-insert-data/_c_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7f2f0d5dd8198d52dda1da34256e54a1bbb4c967 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_c_line.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/line_example.c:main}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_c_opts_json.mdx b/docs/en/07-develop/03-insert-data/_c_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..34b1d8ab3c1e299c2ab2a1ad6d47f81dfaa364cc --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_c_opts_json.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/json_protocol_example.c:main}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_c_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_c_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6bda068d12fd0b379a5af96438029c9ae476a753 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_c_opts_telnet.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/telnet_line_example.c:main}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_c_sql.mdx b/docs/en/07-develop/03-insert-data/_c_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4e55c3387ee1c6fe860f312afdbdad65142bf7fb --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_c_sql.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/insert_example.c}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_c_stmt.mdx b/docs/en/07-develop/03-insert-data/_c_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4b609efe5e942c7ecb8296e8fdbd0607f1421229 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_c_stmt.mdx @@ -0,0 +1,6 @@ +```c title=Single Row Binding +{{#include docs/examples/c/stmt_example.c}} +``` +```c title=Multiple Row Binding 72:117 +{{#include docs/examples/c/multi_bind_example.c}} +``` \ No newline at end of file diff --git a/docs-en/07-develop/03-insert-data/_category_.yml b/docs/en/07-develop/03-insert-data/_category_.yml similarity index 100% rename from docs-en/07-develop/03-insert-data/_category_.yml rename to docs/en/07-develop/03-insert-data/_category_.yml diff --git a/docs/en/07-develop/03-insert-data/_cs_line.mdx b/docs/en/07-develop/03-insert-data/_cs_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..71f46c62be3dfe7d771a35b2298e476bed353aba --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_cs_line.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/InfluxDBLineExample.cs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx b/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8d80d042c984c513df5ca91813c0cd0a17b58eb5 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/OptsJsonExample.cs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cff32abf1feaf703971111542749fbe40152bc33 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/OptsTelnetExample.cs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_cs_sql.mdx b/docs/en/07-develop/03-insert-data/_cs_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1dc7bb3d1366aa3000212786756506eb5eb280e6 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_cs_sql.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/SQLInsertExample.cs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_cs_stmt.mdx b/docs/en/07-develop/03-insert-data/_cs_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..229c874ab9f515e7eae66890a3dfe2e59c129e86 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_cs_stmt.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/StmtInsertExample.cs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_go_line.mdx b/docs/en/07-develop/03-insert-data/_go_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..df2afc0e8720ca14e42e0e4bd7e50276cecace43 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_go_line.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/line/main.go}} +``` diff --git a/docs/en/07-develop/03-insert-data/_go_opts_json.mdx b/docs/en/07-develop/03-insert-data/_go_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..362ce430515c70a3ac502e646630025d7f950612 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_go_opts_json.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/json/main.go}} +``` diff --git a/docs/en/07-develop/03-insert-data/_go_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_go_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..518ea4c8164ab148afff9e21b03d892cbc1bfaf8 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_go_opts_telnet.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/telnet/main.go}} +``` diff --git a/docs/en/07-develop/03-insert-data/_go_sql.mdx b/docs/en/07-develop/03-insert-data/_go_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..02f4d4e2ba21bc14dd67cb0443a1631b06750923 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_go_sql.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/sql/main.go}} +``` diff --git a/docs/en/07-develop/03-insert-data/_go_stmt.mdx b/docs/en/07-develop/03-insert-data/_go_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ab519c9a806345c2f14337f62c74728da955d2e0 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_go_stmt.mdx @@ -0,0 +1,8 @@ +```go +{{#include docs/examples/go/insert/stmt/main.go}} +``` + +:::tip +`github.com/taosdata/driver-go/v2/wrapper` module in driver-go is the wrapper for C API, it can be used to insert data with parameter binding. + +::: diff --git a/docs/en/07-develop/03-insert-data/_java_line.mdx b/docs/en/07-develop/03-insert-data/_java_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..17f759d30fdb76744dc032be60ee91b6dd9f1540 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_java_line.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/LineProtocolExample.java}} +``` diff --git a/docs/en/07-develop/03-insert-data/_java_opts_json.mdx b/docs/en/07-develop/03-insert-data/_java_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1fc0adc202f26c73e64da09456e7e42bdc6367f6 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_java_opts_json.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/JSONProtocolExample.java}} +``` diff --git a/docs/en/07-develop/03-insert-data/_java_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_java_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b68f54b4e872a57f34ae6d5c3651a70812b71154 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_java_opts_telnet.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java}} +``` diff --git a/docs/en/07-develop/03-insert-data/_java_sql.mdx b/docs/en/07-develop/03-insert-data/_java_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..636c7e00eb8846704678ef3cdd8394a99a4528f8 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_java_sql.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java:insert}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_java_stmt.mdx b/docs/en/07-develop/03-insert-data/_java_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2f6a33769044ef5052e633e28a9b60fdab130e88 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_java_stmt.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/StmtInsertExample.java}} +``` diff --git a/docs/en/07-develop/03-insert-data/_js_line.mdx b/docs/en/07-develop/03-insert-data/_js_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cc138a76bde76e779eaa1fe554ecc82c1f564e24 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_js_line.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/influxdb_line_example.js}} +``` diff --git a/docs/en/07-develop/03-insert-data/_js_opts_json.mdx b/docs/en/07-develop/03-insert-data/_js_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cb3c275ce8140ed58d668bf03972a1f960bb6564 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_js_opts_json.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/opentsdb_json_example.js}} +``` diff --git a/docs/en/07-develop/03-insert-data/_js_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_js_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..db96742f31440342516134636db998af987af9fb --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_js_opts_telnet.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/opentsdb_telnet_example.js}} +``` diff --git a/docs/en/07-develop/03-insert-data/_js_sql.mdx b/docs/en/07-develop/03-insert-data/_js_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a9a12f5d2cfb31bcaefba25a82846b455dbc8671 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_js_sql.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/insert_example.js}} +``` diff --git a/docs/en/07-develop/03-insert-data/_js_stmt.mdx b/docs/en/07-develop/03-insert-data/_js_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8df1065c4a42537c2e4c61087ad77cdde9e24a77 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_js_stmt.mdx @@ -0,0 +1,12 @@ +```js title=Single Row Binding +{{#include docs/examples/node/nativeexample/param_bind_example.js}} +``` + +```js title=Multiple Row Binding +{{#include docs/examples/node/nativeexample/multi_bind_example.js:insertData}} +``` + +:::info +Multiple row binding is better in performance than single row binding, but it can only be used with `INSERT` statement while single row binding can be used for other SQL statements besides `INSERT`. + +::: diff --git a/docs/en/07-develop/03-insert-data/_py_line.mdx b/docs/en/07-develop/03-insert-data/_py_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..85f7e32e6681c6d428a2332220194c169c421f2f --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_py_line.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/line_protocol_example.py}} +``` diff --git a/docs/en/07-develop/03-insert-data/_py_opts_json.mdx b/docs/en/07-develop/03-insert-data/_py_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..195c7090c02e03131c4261c57f1414a5ab1ba6b6 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_py_opts_json.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/json_protocol_example.py}} +``` diff --git a/docs/en/07-develop/03-insert-data/_py_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_py_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3bae1ea57bcffe50be5b4e96a7ae8f83faed2087 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_py_opts_telnet.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/telnet_line_protocol_example.py}} +``` diff --git a/docs/en/07-develop/03-insert-data/_py_sql.mdx b/docs/en/07-develop/03-insert-data/_py_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1557e3994b04e64c596918ee67c63e7765ebaa07 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_py_sql.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/native_insert_example.py}} +``` diff --git a/docs/en/07-develop/03-insert-data/_py_stmt.mdx b/docs/en/07-develop/03-insert-data/_py_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4f7636bfb8ea920e1e879b8e59083543cf798d01 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_py_stmt.mdx @@ -0,0 +1,12 @@ +```py title=Single Row Binding +{{#include docs/examples/python/bind_param_example.py}} +``` + +```py title=Multiple Row Binding +{{#include docs/examples/python/multi_bind_example.py:bind_batch}} +``` + +:::info +Multiple row binding is better in performance than single row binding, but it can only be used with `INSERT` statement while single row binding can be used for other SQL statements besides `INSERT`. + +::: \ No newline at end of file diff --git a/docs/en/07-develop/03-insert-data/_rust_line.mdx b/docs/en/07-develop/03-insert-data/_rust_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dbb35d76bc3517463902b642ce4a3861ae42b2f8 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_line.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx b/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cc2055510bce006491ed277a8e884b9958a5a993 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..109c0c5d019e250b87e12c535e4f55c69924b4af --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_rust_sql.mdx b/docs/en/07-develop/03-insert-data/_rust_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fb59a4826510e666457ac592328cc5ba17412c79 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_sql.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/restexample/examples/insert_example.rs}} +``` diff --git a/docs/en/07-develop/03-insert-data/_rust_stmt.mdx b/docs/en/07-develop/03-insert-data/_rust_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a889b56745601158489037a590b6cf5bd80da543 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_stmt.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/nativeexample/examples/stmt_example.rs}} +``` diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp new file mode 100644 index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs/en/07-develop/03-insert-data/index.md similarity index 100% rename from docs-en/07-develop/03-insert-data/index.md rename to docs/en/07-develop/03-insert-data/index.md diff --git a/docs/en/07-develop/04-query-data/_c.mdx b/docs/en/07-develop/04-query-data/_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c51557ef2918dd9152e329c6e1937109d286b11c --- /dev/null +++ b/docs/en/07-develop/04-query-data/_c.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/query_example.c}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/04-query-data/_c_async.mdx b/docs/en/07-develop/04-query-data/_c_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..641a53e82ddb252e1b3255799bd922158a08f229 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_c_async.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/async_query_example.c:demo}} +``` \ No newline at end of file diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs/en/07-develop/04-query-data/_category_.yml similarity index 100% rename from docs-en/07-develop/04-query-data/_category_.yml rename to docs/en/07-develop/04-query-data/_category_.yml diff --git a/docs/en/07-develop/04-query-data/_cs.mdx b/docs/en/07-develop/04-query-data/_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4bb582ecbfaeceac679af975e7752d1caeacb018 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_cs.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/QueryExample.cs}} +``` diff --git a/docs/en/07-develop/04-query-data/_cs_async.mdx b/docs/en/07-develop/04-query-data/_cs_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3ecf635fd39db402d1db68de6d7336b7b2d9d8e8 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_cs_async.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/AsyncQueryExample.cs}} +``` diff --git a/docs/en/07-develop/04-query-data/_go.mdx b/docs/en/07-develop/04-query-data/_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b43894a1ebe8aa0a261cce5f2469f2b3f8449fc4 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_go.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/query/sync/main.go}} +``` diff --git a/docs/en/07-develop/04-query-data/_go_async.mdx b/docs/en/07-develop/04-query-data/_go_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3fbc6f5b6dac9d3987678e64d7268eed200ce513 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_go_async.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/query/async/main.go}} +``` diff --git a/docs/en/07-develop/04-query-data/_java.mdx b/docs/en/07-develop/04-query-data/_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..74de32658c658fb81c29349a1997e32ed512db1b --- /dev/null +++ b/docs/en/07-develop/04-query-data/_java.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/RestQueryExample.java}} +``` diff --git a/docs/en/07-develop/04-query-data/_js.mdx b/docs/en/07-develop/04-query-data/_js.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5883d378e7c7acab033bffb2018f00f1ab5a48d5 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_js.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/query_example.js}} +``` diff --git a/docs/en/07-develop/04-query-data/_js_async.mdx b/docs/en/07-develop/04-query-data/_js_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4b0f54a0342e62da1e5050d49546ca605ae1d729 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_js_async.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/async_query_example.js}} +``` diff --git a/docs/en/07-develop/04-query-data/_py.mdx b/docs/en/07-develop/04-query-data/_py.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8ebeca450bd611913874b606b73e65f1e484d239 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_py.mdx @@ -0,0 +1,11 @@ +Result set is iterated row by row. + +```py +{{#include docs/examples/python/query_example.py:iter}} +``` + +Result set is retrieved as a whole, each row is converted to a dict and returned. + +```py +{{#include docs/examples/python/query_example.py:fetch_all}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/04-query-data/_py_async.mdx b/docs/en/07-develop/04-query-data/_py_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..393a5b173351bafcbdb469ac7d00db0a6b22dbc1 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_py_async.mdx @@ -0,0 +1,8 @@ +```py +{{#include docs/examples/python/async_query_example.py}} +``` + +:::note +This sample code can't be run on Windows system for now. + +::: diff --git a/docs/en/07-develop/04-query-data/_rust.mdx b/docs/en/07-develop/04-query-data/_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cab1b403fbba0cb432ecb9cb280a0fa7582c5be1 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_rust.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/restexample/examples/query_example.rs}} +``` diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e8e4b5c0ad555c0807af5f50a75afdffc1aaa50c --- /dev/null +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -0,0 +1,186 @@ +--- +Sidebar_label: Query data +title: Query data +description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import JavaQuery from "./_java.mdx"; +import PyQuery from "./_py.mdx"; +import GoQuery from "./_go.mdx"; +import RustQuery from "./_rust.mdx"; +import NodeQuery from "./_js.mdx"; +import CsQuery from "./_cs.mdx"; +import CQuery from "./_c.mdx"; +import PyAsync from "./_py_async.mdx"; +import NodeAsync from "./_js_async.mdx"; +import CsAsync from "./_cs_async.mdx"; +import CAsync from "./_c_async.mdx"; + +## Introduction + +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: + +- Query on single column or multiple columns +- Filter on tags or data columns:>, <, =, <\>, like +- Grouping of results: `Group By` +- Sorting of results: `Order By` +- Limit the number of results: `Limit/Offset` +- Arithmetic on columns of numeric types or aggregate results +- Join query with timestamp alignment +- Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff + +For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. + +```sql +select * from d1001 where voltage > 215 order by ts desc limit 2; +``` + +```title=Output +taos> select * from d1001 where voltage > 215 order by ts desc limit 2; + ts | current | voltage | phase | +====================================================================================== + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | +Query OK, 2 row(s) in set (0.001100s) +``` + +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. + +For detailed query syntax please refer to [Select](../../12-taos-sql/06-select.md). + +## Aggregation among Tables + +In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. + +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. + +### Example 1 + +In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. + +``` +taos> SELECT AVG(voltage) FROM meters GROUP BY location; + avg(voltage) | location | +============================================================= + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | +Query OK, 2 row(s) in set (0.002136s) +``` + +### Example 2 + +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. + +``` +taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; + count(*) | max(current) | +================================== + 5 | 13.4 | +Query OK, 1 row(s) in set (0.002136s) +``` + +Join queries are only allowed between subtables of the same STable. In [Select](../../12-taos-sql/06-select.md), all query operations are marked as to whether they support STables or not. + +## Down Sampling and Interpolation + +In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. + +``` +taos> SELECT sum(current) FROM d1001 INTERVAL(10s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:00.000 | 10.300000191 | + 2018-10-03 14:38:10.000 | 24.900000572 | +Query OK, 2 row(s) in set (0.000883s) +``` + +Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. + +``` +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.000 | 10.199999809 | + 2018-10-03 14:38:05.000 | 32.900000572 | + 2018-10-03 14:38:06.000 | 11.500000000 | + 2018-10-03 14:38:15.000 | 12.600000381 | + 2018-10-03 14:38:16.000 | 36.000000000 | +Query OK, 5 row(s) in set (0.001538s) +``` + +Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. + +``` +taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.500 | 11.189999809 | + 2018-10-03 14:38:05.500 | 31.900000572 | + 2018-10-03 14:38:06.500 | 11.600000000 | + 2018-10-03 14:38:15.500 | 12.300000381 | + 2018-10-03 14:38:16.500 | 35.000000000 | +Query OK, 5 row(s) in set (0.001521s) +``` + +In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. + +Interpolation can be performed in TDengine if there is no data in a time range. + +For more details please refer to [Aggregate by Window](../../12-taos-sql/12-interval.md). + +## Examples + +### Query + +In the section describing [Insert](../03-insert-data/01-sql-writing.mdx), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable. + + + + + + + + + + + + + + + + + + + + + + + + + +:::note + +1. With either REST connection or native connection, the above sample code works well. +2. Please note that `use db` can't be used in case of REST connection because it's stateless. + +::: + +### Asynchronous Query + +Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. + +Please note that async query can only be used with a native connection. + + + + + + + + + + + + diff --git a/docs-en/07-develop/06-continuous-query.mdx b/docs/en/07-develop/06-continuous-query.mdx similarity index 100% rename from docs-en/07-develop/06-continuous-query.mdx rename to docs/en/07-develop/06-continuous-query.mdx diff --git a/docs/en/07-develop/07-subscribe.mdx b/docs/en/07-develop/07-subscribe.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e309a33fc8f2c30c7fe2ab2a21a517029b089ab1 --- /dev/null +++ b/docs/en/07-develop/07-subscribe.mdx @@ -0,0 +1,256 @@ +--- +sidebar_label: Data Subscription +description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." +title: Data Subscription +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import Java from "./_sub_java.mdx"; +import Python from "./_sub_python.mdx"; +import Go from "./_sub_go.mdx"; +import Rust from "./_sub_rust.mdx"; +import Node from "./_sub_node.mdx"; +import CSharp from "./_sub_cs.mdx"; +import CDemo from "./_sub_c.mdx"; + +## Introduction + +Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. + +A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. + +There are 3 major APIs related to subscription provided in the TDengine client driver. + +```c +taos_subscribe +taos_consume +taos_unsubscribe +``` + +For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](../continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). + +If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: + +The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. + +```sql +select * from D1001 where ts > {last_timestamp1} and current > 10; +select * from D1002 where ts > {last_timestamp2} and current > 10; +... +``` + +The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. + +A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: + +```sql +select * from meters where ts > {last_timestamp} and current > 10; +``` + +However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. + +All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. + +The first step is to create subscription using `taos_subscribe`. + +```c +TAOS_SUB* tsub = NULL; +if (async) { +  // create an asynchronous subscription, the callback function will be called every 1s +  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); +} else { +  // create an synchronous subscription, need to call 'taos_consume' manually +  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); +} +``` + +The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. + +The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. + +The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: + +```sql +select * from meters where current > 10; +``` + +Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: + +```sql +select * from meters where ts > now - 1d and current > 10; +``` + +The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. + +If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. + +If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. + +The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. + +The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. + +After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. + +```c +if (async) { +  getchar(); +} else while(1) { +  TAOS_RES* res = taos_consume(tsub); +  if (res == NULL) { +    printf("failed to consume data."); +    break; +  } else { +    print_result(res, blockFetch); +    getchar(); +  } +} +``` + +In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. + +```c +void print_result(TAOS_RES* res, int blockFetch) { +  TAOS_ROW row = NULL; +  int num_fields = taos_num_fields(res); +  TAOS_FIELD* fields = taos_fetch_fields(res); +  int nRows = 0; +  if (blockFetch) { +    nRows = taos_fetch_block(res, &row); +    for (int i = 0; i < nRows; i++) { +      char temp[256]; +      taos_print_row(temp, row + i, fields, num_fields); +      puts(temp); +    } +  } else { +    while ((row = taos_fetch_row(res))) { +      char temp[256]; +      taos_print_row(temp, row, fields, num_fields); +      puts(temp); +      nRows++; +    } +  } +  printf("%d rows consumed.\n", nRows); +} +``` + +In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. + +In async mode, consuming data is simpler as shown below. + +```c +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { +  print_result(res, *(int*)param); +} +``` + +`taos_unsubscribe` can be invoked to terminate a subscription. + +```c +taos_unsubscribe(tsub, keep); +``` + +The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. + +Now let's see the effect of the above sample code, assuming below prerequisites have been done. + +- The sample code has been downloaded to local system +- TDengine has been installed and launched properly on same system +- The database, STable, and subtables required in the sample code are ready + +Launch the command below in the directory where the sample code resides to compile and start the program. + +```bash +make +./subscribe -sql='select * from meters where current > 10;' +``` + +After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**. + +```sql +use test; +insert into D1001 values(now, 12, 220, 1); +``` + +Then, this row of data will be shown by the example program on the first terminal because its current exceeds 10A. More data can be inserted for you to observe the output of the example program. + +## Examples + +The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. + +### Prepare Data + +```bash +# create database "power" +taos> create database power; +# use "power" as the database in following operations +taos> use power; +# create super table "meters" +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); +# create tabes using the schema defined by super table "meters" +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LoSangeles", 2); +# insert some rows +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# filter out the rows in which current is bigger than 10A +taos> select * from meters where current > 10; + ts | current | voltage | phase | location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +### Example Programs + + + + + + + + + {/* + + */} + {/* + + + + + */} + + + + + +### Run the Examples + +The example programs first consume all historical data matching the criteria. + +```bash +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 +``` + +Next, use TDengine CLI to insert a new row. + +``` +# taos +taos> use power; +taos> insert into d1001 values(now, 12.4, 220, 1); +``` + +Because the current in the inserted row exceeds 10A, it will be consumed by the example program. + +``` +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 +``` diff --git a/docs-en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md similarity index 100% rename from docs-en/07-develop/08-cache.md rename to docs/en/07-develop/08-cache.md diff --git a/docs-en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md similarity index 100% rename from docs-en/07-develop/09-udf.md rename to docs/en/07-develop/09-udf.md diff --git a/docs-en/07-develop/_category_.yml b/docs/en/07-develop/_category_.yml similarity index 100% rename from docs-en/07-develop/_category_.yml rename to docs/en/07-develop/_category_.yml diff --git a/docs/en/07-develop/_sub_c.mdx b/docs/en/07-develop/_sub_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..da492a0269f064d8cdf9dfb80969894131d94015 --- /dev/null +++ b/docs/en/07-develop/_sub_c.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/subscribe_demo.c}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_cs.mdx b/docs/en/07-develop/_sub_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a435ea0273c94cbe75eaf7431e1a9c39d49d92e3 --- /dev/null +++ b/docs/en/07-develop/_sub_cs.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/SubscribeDemo.cs}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_go.mdx b/docs/en/07-develop/_sub_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..34b2aefd92c5eef75b59fbbba96b83da091722a7 --- /dev/null +++ b/docs/en/07-develop/_sub_go.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/sub/main.go}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_java.mdx b/docs/en/07-develop/_sub_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ab77f61348c115d3fe3336df47d467c5525f41b8 --- /dev/null +++ b/docs/en/07-develop/_sub_java.mdx @@ -0,0 +1,7 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} +``` +:::note +For now Java connector doesn't provide asynchronous subscription, but `TimerTask` can be used to achieve similar purpose. + +::: \ No newline at end of file diff --git a/docs/en/07-develop/_sub_node.mdx b/docs/en/07-develop/_sub_node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3eeff0922a31a478dd34a77c6cb6471f51a57a8c --- /dev/null +++ b/docs/en/07-develop/_sub_node.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/subscribe_demo.js}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_python.mdx b/docs/en/07-develop/_sub_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..490b76fca6deb61e61dc59c2096b30742a7d25f7 --- /dev/null +++ b/docs/en/07-develop/_sub_python.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/subscribe_demo.py}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_rust.mdx b/docs/en/07-develop/_sub_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..afb8d79daa3bbd72d72795cb4425f12277d710fc --- /dev/null +++ b/docs/en/07-develop/_sub_rust.mdx @@ -0,0 +1,3 @@ +```rs +{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}} +``` \ No newline at end of file diff --git a/docs-en/07-develop/index.md b/docs/en/07-develop/index.md similarity index 100% rename from docs-en/07-develop/index.md rename to docs/en/07-develop/index.md diff --git a/docs-en/10-cluster/01-deploy.md b/docs/en/10-cluster/01-deploy.md similarity index 100% rename from docs-en/10-cluster/01-deploy.md rename to docs/en/10-cluster/01-deploy.md diff --git a/docs/en/10-cluster/02-cluster-mgmt.md b/docs/en/10-cluster/02-cluster-mgmt.md new file mode 100644 index 0000000000000000000000000000000000000000..bd3386c41161fc55b4bedcecd6ad3ab5c35be8b6 --- /dev/null +++ b/docs/en/10-cluster/02-cluster-mgmt.md @@ -0,0 +1,213 @@ +--- +sidebar_label: Operation +title: Manage DNODEs +--- + +The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. + +:::note +All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. + +::: + +## Show DNODEs + +The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. + +```sql +SHOW DNODES; +``` + +Below is the example output of this command. + +``` +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | +Query OK, 1 row(s) in set (0.008298s) +``` + +## Show VGROUPs + +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. + +Launch TDengine CLI `taos` and execute below command: + +```sql +USE SOME_DATABASE; +SHOW VGROUPS; +``` + +The example output is below: + +``` +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | +Query OK, 1 row(s) in set (0.008298s) + +taos> use db; +Database changed. + +taos> show vgroups; + vgId | tables | status | onlines | v1_dnode | v1_status | compacting | +========================================================================================== + 14 | 38000 | ready | 1 | 1 | leader | 0 | + 15 | 38000 | ready | 1 | 1 | leader | 0 | + 16 | 38000 | ready | 1 | 1 | leader | 0 | + 17 | 38000 | ready | 1 | 1 | leader | 0 | + 18 | 37001 | ready | 1 | 1 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | +Query OK, 8 row(s) in set (0.001154s) +``` + +## Add DNODE + +Launch TDengine CLI `taos` and execute the command below to add the end point of a new dnode into the EPI (end point) list of the cluster. "fqdn:port" must be quoted using double quotes. + +```sql +CREATE DNODE "fqdn:port"; +``` + +The example output is as below: + +``` +taos> create dnode "localhost:7030"; +Query OK, 0 of 0 row(s) in database (0.008203s) + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | + 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | +Query OK, 2 row(s) in set (0.001017s) +``` + +It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. + +``` +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 3 | 8 | ready | any | 2022-04-15 08:27:09.359 | | + 2 | localhost:7030 | 6 | 8 | ready | any | 2022-04-19 08:14:59.165 | | +Query OK, 2 row(s) in set (0.001316s) +``` + +## Drop DNODE + +Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. + +```sql +DROP DNODE "fqdn:port"; +``` + +or + +```sql +DROP DNODE dnodeId; +``` + +The example output is below: + +``` +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | + 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | +Query OK, 2 row(s) in set (0.001017s) + +taos> drop dnode 2; +Query OK, 0 of 0 row(s) in database (0.000518s) + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | +Query OK, 1 row(s) in set (0.001137s) +``` + +In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. + +:::note + +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. +- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. + +::: + +## Move VNODE + +A vnode can be manually moved from one dnode to another. + +Launch TDengine CLI `taos` and execute below command: + +```sql +ALTER DNODE BALANCE "VNODE:-DNODE:"; +``` + +In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `. + +First `show vgroups` is executed to show the vgroup distribution. + +``` +taos> show vgroups; + vgId | tables | status | onlines | v1_dnode | v1_status | compacting | +========================================================================================== + 14 | 38000 | ready | 1 | 3 | leader | 0 | + 15 | 38000 | ready | 1 | 3 | leader | 0 | + 16 | 38000 | ready | 1 | 3 | leader | 0 | + 17 | 38000 | ready | 1 | 3 | leader | 0 | + 18 | 37001 | ready | 1 | 3 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | +Query OK, 8 row(s) in set (0.001314s) +``` + +It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos` + +``` +taos> alter dnode 3 balance "vnode:18-dnode:1"; + +DB error: Balance already enabled (0.00755 +``` + +However, the operation fails with error message show above, which means automatic load balancing has been enabled in the current database so manual load balance can't be performed. + +Shutdown the cluster, configure `balance` parameter in all the dnodes to 0, then restart the cluster, and execute `alter dnode` and `show vgroups` as below. + +``` +taos> alter dnode 3 balance "vnode:18-dnode:1"; +Query OK, 0 row(s) in set (0.000575s) + +taos> show vgroups; + vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting | +================================================================================================================= + 14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | +Query OK, 8 row(s) in set (0.001242s) +``` + +It can be seen from above output that vgId 18 has been moved from dnode 3 to dnode 1. + +:::note + +- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. +- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. +- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. + +::: diff --git a/docs/en/10-cluster/03-ha-and-lb.md b/docs/en/10-cluster/03-ha-and-lb.md new file mode 100644 index 0000000000000000000000000000000000000000..9780e8f6c68904e444d07c6a8c87b095c6b70ead --- /dev/null +++ b/docs/en/10-cluster/03-ha-and-lb.md @@ -0,0 +1,81 @@ +--- +sidebar_label: HA & LB +title: High Availability and Load Balancing +--- + +## High Availability of Vnode + +High availability of vnode and mnode can be achieved through replicas in TDengine. + +A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. + +```sql +CREATE DATABASE demo replica 3; +``` + +The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. + +There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. + +## High Availability of Mnode + +Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. + +There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. + +```sql +SHOW MNODES; +``` + +The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. + +For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. + +:::note +If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. + +::: + +## Load Balancing + +Load balancing will be triggered in 3 cases without manual intervention. + +- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. +- When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. +- When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. + +:::tip +Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). + +::: + +## Dnode Offline + +When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: + +- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. + +- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. + +:::note +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. + +::: + +## Arbitrator + +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. + +To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. + +Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. + +Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. + +In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. + +Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb". + +```sql +SHOW DNODES; +``` diff --git a/docs-en/10-cluster/_category_.yml b/docs/en/10-cluster/_category_.yml similarity index 100% rename from docs-en/10-cluster/_category_.yml rename to docs/en/10-cluster/_category_.yml diff --git a/docs-en/10-cluster/index.md b/docs/en/10-cluster/index.md similarity index 100% rename from docs-en/10-cluster/index.md rename to docs/en/10-cluster/index.md diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md similarity index 100% rename from docs-en/12-taos-sql/01-data-type.md rename to docs/en/12-taos-sql/01-data-type.md diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md new file mode 100644 index 0000000000000000000000000000000000000000..c2961d62415cd7d23b031777082801426b221190 --- /dev/null +++ b/docs/en/12-taos-sql/02-database.md @@ -0,0 +1,126 @@ +--- +sidebar_label: Database +title: Database +description: "create and drop database, show or change database parameters" +--- + +## Create Database + +``` +CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; +``` + +:::info + +1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. +2. UPDATE specifies whether the data can be updated and how the data can be updated. + 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. + 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. + 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. +3. The maximum length of database name is 33 bytes. +4. The maximum length of a SQL statement is 65,480 bytes. +5. Below are the parameters that can be used when creating a database + - cache: [Description](/reference/config/#cache) + - blocks: [Description](/reference/config/#blocks) + - days: [Description](/reference/config/#days) + - keep: [Description](/reference/config/#keep) + - minRows: [Description](/reference/config/#minrows) + - maxRows: [Description](/reference/config/#maxrows) + - wal: [Description](/reference/config/#wallevel) + - fsync: [Description](/reference/config/#fsync) + - update: [Description](/reference/config/#update) + - cacheLast: [Description](/reference/config/#cachelast) + - replica: [Description](/reference/config/#replica) + - quorum: [Description](/reference/config/#quorum) + - comp: [Description](/reference/config/#comp) + - precision: [Description](/reference/config/#precision) +6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. + +::: + +## Show Current Configuration + +``` +SHOW VARIABLES; +``` + +## Specify The Database In Use + +``` +USE db_name; +``` + +:::note +This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" + +::: + +## Drop Database + +``` +DROP DATABASE [IF EXISTS] db_name; +``` + +:::note +All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. + +::: + +## Change Database Configuration + +Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). + +``` +ALTER DATABASE db_name COMP 2; +``` + +COMP parameter specifies whether the data is compressed and how the data is compressed. + +``` +ALTER DATABASE db_name REPLICA 2; +``` + +REPLICA parameter specifies the number of replicas of the database. + +``` +ALTER DATABASE db_name KEEP 365; +``` + +KEEP parameter specifies the number of days for which the data will be kept. + +``` +ALTER DATABASE db_name QUORUM 2; +``` + +QUORUM parameter specifies the necessary number of confirmations to determine whether the data is written successfully. + +``` +ALTER DATABASE db_name BLOCKS 100; +``` + +BLOCKS parameter specifies the number of memory blocks used by each VNODE. + +``` +ALTER DATABASE db_name CACHELAST 0; +``` + +CACHELAST parameter specifies whether and how the latest data of a sub table is cached. + +:::tip +The above parameters can be changed using `ALTER DATABASE` command without restarting. For more details of all configuration parameters please refer to [Configuration Parameters](/reference/config/). + +::: + +## Show All Databases + +``` +SHOW DATABASES; +``` + +## Show The Create Statement of A Database + +``` +SHOW CREATE DATABASE db_name; +``` + +This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md similarity index 100% rename from docs-en/12-taos-sql/03-table.md rename to docs/en/12-taos-sql/03-table.md diff --git a/docs-en/12-taos-sql/04-stable.md b/docs/en/12-taos-sql/04-stable.md similarity index 100% rename from docs-en/12-taos-sql/04-stable.md rename to docs/en/12-taos-sql/04-stable.md diff --git a/docs-en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md similarity index 100% rename from docs-en/12-taos-sql/05-insert.md rename to docs/en/12-taos-sql/05-insert.md diff --git a/docs-en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md similarity index 100% rename from docs-en/12-taos-sql/06-select.md rename to docs/en/12-taos-sql/06-select.md diff --git a/docs-en/07-develop/05-delete-data.mdx b/docs/en/12-taos-sql/08-delete-data.mdx similarity index 100% rename from docs-en/07-develop/05-delete-data.mdx rename to docs/en/12-taos-sql/08-delete-data.mdx diff --git a/docs-en/12-taos-sql/07-function.md b/docs/en/12-taos-sql/10-function.md similarity index 100% rename from docs-en/12-taos-sql/07-function.md rename to docs/en/12-taos-sql/10-function.md diff --git a/docs/en/12-taos-sql/12-interval.md b/docs/en/12-taos-sql/12-interval.md new file mode 100644 index 0000000000000000000000000000000000000000..2d5502781081e12b008314a84101fbf4c37effd7 --- /dev/null +++ b/docs/en/12-taos-sql/12-interval.md @@ -0,0 +1,113 @@ +--- +sidebar_label: Interval +title: Aggregate by Time Window +--- + +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. +Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. + +## Time Window + +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. + +![TDengine Database Time Window](./timewindow-1.webp) + +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. + +``` +SELECT * FROM temp_tb_1 INTERVAL(1m); +``` + +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. + +``` +SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); +``` + +When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. + +## Status Window + +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. + +![TDengine Database Status Window](./timewindow-3.webp) + +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: + +``` +SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); +``` + +## Session Window + +```sql +SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +``` + +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. + +![TDengine Database Session Window](./timewindow-2.webp) + +If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. + +## More On Window Aggregate + +### Syntax + +The full syntax of aggregate by window is as follows: + +```sql +SELECT function_list FROM tb_name + [WHERE where_condition] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] + +SELECT function_list FROM stb_name + [WHERE where_condition] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] + [GROUP BY tags] +``` + +### Restrictions + +- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. +- `LAST_ROW` can't be used together with window aggregate. +- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. +- `WHERE` clause can be used to specify the starting and ending time and other filter conditions +- `FILL` clause is used to specify how to fill when there is data missing in any window, including: + 1. NONE: No fill (the default fill mode) + 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` + 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` + 4. NULL:Fill with NULL, `FILL(NULL)` + 5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` + 6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` + +:::info + +1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. +2. The result set is in ascending order of timestamp when you aggregate by time window. +3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. + +::: + +Aggregate by time window is also used in continuous query, please refer to [Continuous Query](../../develop/continuous-query). + +## Examples + +A table of intelligent meters can be created by the SQL statement below: + +```sql +CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); +``` + +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. + +``` +SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters + WHERE ts>=NOW-1d and ts<=now + INTERVAL(10m) + FILL(PREV); +``` diff --git a/docs-en/12-taos-sql/09-limit.md b/docs/en/12-taos-sql/14-limit.md similarity index 100% rename from docs-en/12-taos-sql/09-limit.md rename to docs/en/12-taos-sql/14-limit.md diff --git a/docs/en/12-taos-sql/16-json.md b/docs/en/12-taos-sql/16-json.md new file mode 100644 index 0000000000000000000000000000000000000000..61c473d120fefba1ec92902f3e68aa7037875a72 --- /dev/null +++ b/docs/en/12-taos-sql/16-json.md @@ -0,0 +1,93 @@ +--- +title: JSON Type +--- + +## Syntax + +1. Tag of type JSON + + ```sql + create STable s1 (ts timestamp, v1 int) tags (info json); + + create table s1_1 using s1 tags ('{"k1": "v1"}'); + ``` + +2. "->" Operator of JSON + + ```sql + select * from s1 where info->'k1' = 'v1'; + + select info->'k1' from s1; + ``` + +3. "contains" Operator of JSON + + ```sql + select * from s1 where info contains 'k2'; + + select * from s1 where info contains 'k1'; + ``` + +## Applicable Operations + +1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. + + ```sql + select * from s1 where info->'k1' match 'v*'; + + select * from s1 where info->'k1' like 'v%' and info contains 'k2'; + + select * from s1 where info is null; + + select * from s1 where info->'k1' is not null; + ``` + +2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` + +3. `Distinct` can be used with a tag of type JSON + + ```sql + select distinct info->'k1' from s1; + ``` + +4. Tag Operations + + The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this: + + ```sql + alter table s1_1 set tag info = '{"k1": "v2"}'; + ``` + + The name of a JSON tag can be altered: + + ```sql + alter stable s1 change tag info info2 ; + ``` + + A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + + +## Other Restrictions + +- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags. + +- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes. + +- JSON format: + + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. + - If one key occurs twice in JSON, only the first one is valid. + - Escape characters are not allowed in JSON. + +- NULL is returned when querying a key that doesn't exist in JSON. + +- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. + +For example, the SQL statements below are not supported. + +```sql; +select jtag->'key' from (select jtag from STable); +select jtag->'key' from (select jtag from STable) where jtag->'key'>0; +``` diff --git a/docs-en/12-taos-sql/11-escape.md b/docs/en/12-taos-sql/18-escape.md similarity index 100% rename from docs-en/12-taos-sql/11-escape.md rename to docs/en/12-taos-sql/18-escape.md diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md new file mode 100644 index 0000000000000000000000000000000000000000..0e79a07362476501f9cfc3f3d03ff59abd25abc9 --- /dev/null +++ b/docs/en/12-taos-sql/20-keywords.md @@ -0,0 +1,315 @@ +--- +title: Keywords +--- + +There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. + +## Keyword List + +### A + +- ABORT +- ACCOUNT +- ACCOUNTS +- ADD +- AFTER +- ALL +- ALTER +- AND +- AS +- ASC +- ATTACH + +### B + +- BEFORE +- BEGIN +- BETWEEN +- BIGINT +- BINARY +- BITAND +- BITNOT +- BITOR +- BLOCKS +- BOOL +- BY + +### C + +- CACHE +- CACHELAST +- CASCADE +- CHANGE +- CLUSTER +- COLON +- COLUMN +- COMMA +- COMP +- COMPACT +- CONCAT +- CONFLICT +- CONNECTION +- CONNECTIONS +- CONNS +- COPY +- CREATE +- CTIME + +### D + +- DATABASE +- DATABASES +- DAYS +- DBS +- DEFERRED +- DELIMITERS +- DELETE +- DESC +- DESCRIBE +- DETACH +- DISTINCT +- DIVIDE +- DNODE +- DNODES +- DOT +- DOUBLE +- DROP + +### E + +- END +- EQ +- EXISTS +- EXPLAIN + +### F + +- FAIL +- FILE +- FILL +- FLOAT +- FOR +- FROM +- FSYNC + +### G + +- GE +- GLOB +- GRANTS +- GROUP +- GT + +### H + +- HAVING + +### I + +- ID +- IF +- IGNORE +- IMMEDIA +- IMPORT +- IN +- INITIAL +- INSERT +- INSTEAD +- INT +- INTEGER +- INTERVA +- INTO +- IS +- ISNULL + +### J + +- JOIN + +### K + +- KEEP +- KEY +- KILL + +### L + +- LE +- LIKE +- LIMIT +- LINEAR +- LOCAL +- LP +- LSHIFT +- LT + +### M + +- MATCH +- MAXROWS +- MINROWS +- MINUS +- MNODES +- MODIFY +- MODULES + +### N + +- NE +- NONE +- NOT +- NOTNULL +- NOW +- NULL + +### O + +- OF +- OFFSET +- OR +- ORDER + +### P + +- PARTITION +- PASS +- PLUS +- PPS +- PRECISION +- PREV +- PRIVILEGE + +### Q + +- QTIME +- QUERIE +- QUERY +- QUORUM + +### R + +- RAISE +- REM +- REPLACE +- REPLICA +- RESET +- RESTRIC +- ROW +- RP +- RSHIFT + +### S + +- SCORES +- SELECT +- SEMI +- SESSION +- SET +- SHOW +- SLASH +- SLIDING +- SLIMIT +- SMALLIN +- SOFFSET +- STable +- STableS +- STAR +- STATE +- STATEMEN +- STATE_WI +- STORAGE +- STREAM +- STREAMS +- STRING +- SYNCDB + +### T + +- TABLE +- TABLES +- TAG +- TAGS +- TBNAME +- TIMES +- TIMESTAMP +- TINYINT +- TOPIC +- TOPICS +- TRIGGER +- TSERIES + +### U + +- UMINUS +- UNION +- UNSIGNED +- UPDATE +- UPLUS +- USE +- USER +- USERS +- USING + +### V + +- VALUES +- VARIABLE +- VARIABLES +- VGROUPS +- VIEW +- VNODES + +### W + +- WAL +- WHERE + +### _ + +- _C0 +- _QSTART +- _QSTOP +- _QDURATION +- _WSTART +- _WSTOP +- _WDURATION + +## Explanations +### TBNAME +`TBNAME` can be considered as a special tag, which represents the name of the subtable, in STable. + +Get the table name and tag values of all subtables in a STable. +```mysql +SELECT TBNAME, location FROM meters; +``` + +Count the number of subtables in a STable. +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +Only filter on TAGS can be used in WHERE clause in the above two query statements. +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +The start, stop and duration of a query time window (Since version 2.6.0.0). + +### _WSTART/_WSTOP/_WDURATION +The start, stop and duration of aggegate query by time window, like interval, session window, state window (Since version 2.6.0.0). + +### _c0 +The first column of a table or STable. diff --git a/docs-en/12-taos-sql/_category_.yml b/docs/en/12-taos-sql/_category_.yml similarity index 100% rename from docs-en/12-taos-sql/_category_.yml rename to docs/en/12-taos-sql/_category_.yml diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md new file mode 100644 index 0000000000000000000000000000000000000000..1d1cb04ad4005372bb9d3a41c1c98533071ac4b2 --- /dev/null +++ b/docs/en/12-taos-sql/index.md @@ -0,0 +1,31 @@ +--- +title: TDengine SQL +description: "The syntax supported by TDengine SQL " +--- + +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. + +TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. However, TDengine Enterprise Edition provides the DELETE function since version 2.6. + +Syntax Specifications used in this chapter: + +- The content inside <\> needs to be input by the user, excluding <\> itself. +- \[ \] means optional input, excluding [] itself. +- | means one of a few options, excluding | itself. +- … means the item prior to it can be repeated multiple times. + +To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: + +```sql +taos> DESCRIBE meters; + Field | Type | Length | Note | +================================================================================= + ts | TIMESTAMP | 8 | | + current | FLOAT | 4 | | + voltage | INT | 4 | | + phase | FLOAT | 4 | | + location | BINARY | 64 | TAG | + groupid | INT | 4 | TAG | +``` + +The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-cn/12-taos-sql/timewindow-1.webp b/docs/en/12-taos-sql/timewindow-1.webp similarity index 100% rename from docs-cn/12-taos-sql/timewindow-1.webp rename to docs/en/12-taos-sql/timewindow-1.webp diff --git a/docs-cn/12-taos-sql/timewindow-2.webp b/docs/en/12-taos-sql/timewindow-2.webp similarity index 100% rename from docs-cn/12-taos-sql/timewindow-2.webp rename to docs/en/12-taos-sql/timewindow-2.webp diff --git a/docs-cn/12-taos-sql/timewindow-3.webp b/docs/en/12-taos-sql/timewindow-3.webp similarity index 100% rename from docs-cn/12-taos-sql/timewindow-3.webp rename to docs/en/12-taos-sql/timewindow-3.webp diff --git a/docs-en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md similarity index 100% rename from docs-en/13-operation/01-pkg-install.md rename to docs/en/13-operation/01-pkg-install.md diff --git a/docs-en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx similarity index 100% rename from docs-en/13-operation/02-planning.mdx rename to docs/en/13-operation/02-planning.mdx diff --git a/docs-en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md similarity index 100% rename from docs-en/13-operation/03-tolerance.md rename to docs/en/13-operation/03-tolerance.md diff --git a/docs-en/13-operation/06-admin.md b/docs/en/13-operation/06-admin.md similarity index 100% rename from docs-en/13-operation/06-admin.md rename to docs/en/13-operation/06-admin.md diff --git a/docs-en/13-operation/07-import.md b/docs/en/13-operation/07-import.md similarity index 100% rename from docs-en/13-operation/07-import.md rename to docs/en/13-operation/07-import.md diff --git a/docs-en/13-operation/08-export.md b/docs/en/13-operation/08-export.md similarity index 100% rename from docs-en/13-operation/08-export.md rename to docs/en/13-operation/08-export.md diff --git a/docs-en/13-operation/09-status.md b/docs/en/13-operation/09-status.md similarity index 100% rename from docs-en/13-operation/09-status.md rename to docs/en/13-operation/09-status.md diff --git a/docs-en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md similarity index 100% rename from docs-en/13-operation/10-monitor.md rename to docs/en/13-operation/10-monitor.md diff --git a/docs-en/13-operation/11-optimize.md b/docs/en/13-operation/11-optimize.md similarity index 100% rename from docs-en/13-operation/11-optimize.md rename to docs/en/13-operation/11-optimize.md diff --git a/docs-en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md similarity index 100% rename from docs-en/13-operation/17-diagnose.md rename to docs/en/13-operation/17-diagnose.md diff --git a/docs-en/13-operation/_category_.yml b/docs/en/13-operation/_category_.yml similarity index 100% rename from docs-en/13-operation/_category_.yml rename to docs/en/13-operation/_category_.yml diff --git a/docs-en/13-operation/index.md b/docs/en/13-operation/index.md similarity index 100% rename from docs-en/13-operation/index.md rename to docs/en/13-operation/index.md diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fe18349a6dae3ad44772b4a30a2c3d4ad75b0f47 --- /dev/null +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -0,0 +1,307 @@ +--- +title: REST API +--- + +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. + +:::note +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) +::: + +## Installation + +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. + +## Verification + +If the TDengine server is already installed, it can be verified as follows: + +The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. + +The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. + +```html +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql +``` + +The following return value results indicate that the verification passed. + +```json +{ + "status": "succ", + "head": [ + "name", + "created_time", + "ntables", + "vgroups", + "replica", + "quorum", + "days", + "keep1,keep2,keep(D)", + "cache(MB)", + "blocks", + "minrows", + "maxrows", + "wallevel", + "fsync", + "comp", + "precision", + "status" + ], + "data": [ + [ + "log", + "2020-09-02 17:23:00.039", + 4, + 1, + 1, + 1, + 10, + "30,30,30", + 1, + 3, + 100, + 4096, + 1, + 3000, + 2, + "us", + "ready" + ] + ], + "rows": 1 +} +``` + +## HTTP request URL format + +``` +http://:/rest/sql/[db_name] +``` + +Parameter Description: + +- fqnd: FQDN or IP address of any host in the cluster +- port: httpPort configuration item in the configuration file, default is 6041 +- db_name: Optional parameter that specifies the default database name for the executed SQL command. (supported since version 2.2.0.0) + +For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`. + +TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. + +- The custom authentication information is as follows. More details about "token" later. + + ``` + Authorization: Taosd + ``` + +- Basic authentication information is shown below + + ``` + Authorization: Basic + ``` + +The HTTP request's BODY is a complete SQL command, and the data table in the SQL statement should be provided with a database prefix, e.g., `db_name.tb_name`. If the table name does not have a database prefix and the database name is not specified in the URL, the system will response an error because the HTTP module is a simple forwarder and has no awareness of the current DB. + +Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax. + +```bash +curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] +``` + +Or + +```bash +curl -L -u username:password -d "" :/rest/sql/[db_name] +``` + +where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`. + +## HTTP Return Format + +The return result is in JSON format, as follows: + +```json +{ + "status": "succ", + "head": ["ts", "current", ...], + "column_meta": [["ts",9,8],["current",6,4], ...], + "data": [ + ["2018-10-03 14:38:05.000", 10.3, ...], + ["2018-10-03 14:38:15.000", 12.6, ...] + ], + "rows": 2 +} +``` + +Description: + +- status: tells you whethre the operation result is success or failure. +- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) +- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. +- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. +- rows: Indicates how many rows of data there are. + +The column types in column_meta are described as follows: + +- 1:BOOL +- 2:TINYINT +- 3:SMALLINT +- 4:INT +- 5:BIGINT +- 6:FLOAT +- 7:DOUBLE +- 8:BINARY +- 9:TIMESTAMP +- 10:NCHAR + +## Custom Authorization Code + +HTTP requests require an authorization code `` for identification purposes. The administrator usually provides the authorization code, and it can be obtained simply by sending an ``HTTP GET`` request as follows: + +```bash +curl http://:/rest/login// +``` + +Where `fqdn` is the FQDN or IP address of the TDengine database. `port` is the port number of the TDengine service. `username` is the database username. `password` is the database password. The return value is in `JSON` format, and the meaning of each field is as follows. + +- status: flag bit of the request result + +- code: return value code + +- desc: authorization code + +Example of getting authorization code. + +```bash +curl http://192.168.0.1:6041/rest/login/root/taosdata +``` + +Response body: + +```json +{ + "status": "succ", + "code": 0, + "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" +} +``` + +## For example + +- query all records from table d1001 of database demo + + ```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + ``` + + Response body: + + ```json + { + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], + ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] + ], + "rows": 2 + } + ``` + +- Create database demo: + + ```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + ``` + + Response body: + + ```json + { + "status": "succ", + "head": ["affected_rows"], + "column_meta": [["affected_rows", 4, 4]], + "data": [[1]], + "rows": 1 + } + ``` + +## Other Uses + +### Unix timestamps for result sets + +When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example: + +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt +``` + +Response body: + +```json +{ + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + [1538548685000, 10.3, 219, 0.31], + [1538548695000, 12.6, 218, 0.33] + ], + "rows": 2 +} +``` + +### UTC format for the result set + +When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example: + +```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc +``` + +Response body: + +```json +{ + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], + ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] + ], + "rows": 2 +} +``` + +## Important configuration items + +Only some configuration parameters related to the RESTful interface are listed below. Please see the description in the configuration file for other system parameters. + +- The port number of the external RESTful service is bound to 6041 by default (the actual value is serverPort + 11, so it can be changed by modifying the setting of the serverPort parameter). +- httpMaxThreads: the number of threads to start, default is 2 (the default value is rounded down to half of the CPU cores with version 2.0.17.0 and later versions). +- restfulRowLimit: the maximum number of result sets (in JSON format) to return. The default value is 10240. +- httpEnableCompress: whether to support compression, the default is not supported. Currently, TDengine only supports the gzip compression format. +- httpDebugFlag: logging switch, default is 131. 131: error and alarm messages only, 135: debug messages, 143: very detailed debug messages. +- httpDbNameMandatory: users must specify the default database name in the RESTful URL. The default is 0, which turns off this check. If set to 1, users must put a default database name in every RESTful URL. Otherwise, it will return an execution error and reject this SQL statement, regardless of whether the SQL statement executed at this time requires a specified database. + +:::note +If you are using the REST API provided by taosd, you should write the above configuration in taosd's configuration file taos.cfg. If you use the REST API of taosAdapter, you need to refer to taosAdapter [corresponding configuration method](/reference/taosadapter/). +::: diff --git a/docs-cn/14-reference/02-rest-api/_category_.yml b/docs/en/14-reference/02-rest-api/_category_.yml similarity index 100% rename from docs-cn/14-reference/02-rest-api/_category_.yml rename to docs/en/14-reference/02-rest-api/_category_.yml diff --git a/docs-en/14-reference/03-connector/03-connector.mdx b/docs/en/14-reference/03-connector/03-connector.mdx similarity index 100% rename from docs-en/14-reference/03-connector/03-connector.mdx rename to docs/en/14-reference/03-connector/03-connector.mdx diff --git a/docs-en/14-reference/03-connector/_category_.yml b/docs/en/14-reference/03-connector/_category_.yml similarity index 100% rename from docs-en/14-reference/03-connector/_category_.yml rename to docs/en/14-reference/03-connector/_category_.yml diff --git a/docs-en/14-reference/03-connector/_linux_install.mdx b/docs/en/14-reference/03-connector/_linux_install.mdx similarity index 100% rename from docs-en/14-reference/03-connector/_linux_install.mdx rename to docs/en/14-reference/03-connector/_linux_install.mdx diff --git a/docs-en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx similarity index 100% rename from docs-en/14-reference/03-connector/_preparation.mdx rename to docs/en/14-reference/03-connector/_preparation.mdx diff --git a/docs-en/14-reference/03-connector/_verify_linux.mdx b/docs/en/14-reference/03-connector/_verify_linux.mdx similarity index 100% rename from docs-en/14-reference/03-connector/_verify_linux.mdx rename to docs/en/14-reference/03-connector/_verify_linux.mdx diff --git a/docs/en/14-reference/03-connector/_verify_windows.mdx b/docs/en/14-reference/03-connector/_verify_windows.mdx new file mode 100644 index 0000000000000000000000000000000000000000..daeb151bb1252436c0ef16eab1d50a64d664e437 --- /dev/null +++ b/docs/en/14-reference/03-connector/_verify_windows.mdx @@ -0,0 +1,14 @@ +Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `taos.exe` directly to connect to the TDengine service and enter the TDengine CLI interface, for example, as follows: + +```text + C:\TDengine>taos + Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 + Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | + =================================================================================================================================================================================================================================================================== + test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | + log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | + Query OK, 2 row(s) in set (0.045000s) + taos> +``` diff --git a/docs-en/14-reference/03-connector/_windows_install.mdx b/docs/en/14-reference/03-connector/_windows_install.mdx similarity index 100% rename from docs-en/14-reference/03-connector/_windows_install.mdx rename to docs/en/14-reference/03-connector/_windows_install.mdx diff --git a/docs-cn/14-reference/03-connector/connector.webp b/docs/en/14-reference/03-connector/connector.webp similarity index 100% rename from docs-cn/14-reference/03-connector/connector.webp rename to docs/en/14-reference/03-connector/connector.webp diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs/en/14-reference/03-connector/cpp.mdx similarity index 100% rename from docs-en/14-reference/03-connector/cpp.mdx rename to docs/en/14-reference/03-connector/cpp.mdx diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs/en/14-reference/03-connector/csharp.mdx similarity index 100% rename from docs-en/14-reference/03-connector/csharp.mdx rename to docs/en/14-reference/03-connector/csharp.mdx diff --git a/docs/en/14-reference/03-connector/go.mdx b/docs/en/14-reference/03-connector/go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..69e1b56f38c3fe6feb1766abd1eea532e130ed49 --- /dev/null +++ b/docs/en/14-reference/03-connector/go.mdx @@ -0,0 +1,412 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 4 +sidebar_label: Go +title: TDengine Go Connector +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import Preparation from "./_preparation.mdx"; +import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"; +import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"; +import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"; +import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"; +import GoQuery from "../../07-develop/04-query-data/_go.mdx"; + +`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. + +`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. + +This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. + +The source code of `driver-go` is hosted on [GitHub](https://github.com/taosdata/driver-go). + +## Supported Platforms + +Native connections are supported on the same platforms as the TDengine client driver. +REST connections are supported on all platforms that can run Go. + +## Version support + +Please refer to [version support list](/reference/connector#version-support) + +## Supported features + +### Native connections + +A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: + +- Normal queries +- Continuous queries +- Subscriptions +- schemaless interface +- parameter binding interface + +### REST connection + +A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: + +- General queries +- Continuous queries + +## Installation steps + +### Pre-installation + +- Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) +- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps + +Configure the environment variables and check the command. + +- `go env` +- `gcc -v` + +### Use go get to install + +``` +go get -u github.com/taosdata/driver-go/v2@latest +``` + +### Manage with go mod + +1. Initialize the project with the `go mod` command. + +```text +go mod init taos-demo +``` + +2. Introduce taosSql + +```go +import ( + "database/sql" + _ "github.com/taosdata/driver-go/v2/taosSql" +) +``` + +3. Update the dependency packages with `go mod tidy`. + +```text +go mod tidy +``` + +4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. + +```text +go run taos-demo +go build +``` + +## Create a connection + +### Data source name (DSN) + +Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): + +```text +[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&... ¶mN=valueN] +``` + +DSN in full form. + +```text +username:password@protocol(address)/dbname?param=value +``` + +### Connecting via connector + + + + +_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. + +Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters. + +- configPath specifies the `taos.cfg` directory + +Example. + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/taosdata/driver-go/v2/taosSql" +) + +func main() { + var taosUri = "root:taosdata@tcp(localhost:6030)/" + taos, err := sql.Open("taosSql", taosUri) + if err ! = nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } +} +``` + + + + +_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. + +Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN. + +- `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression. +- `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data. + +Example. + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/taosdata/driver-go/v2/taosRestful" +) + +func main() { + var taosUri = "root:taosdata@http(localhost:6041)/" + taos, err := sql.Open("taosRestful", taosUri) + if err ! = nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } +} +``` + + + + +## Usage examples + +### Write data + +#### SQL Write + + + +#### InfluxDB line protocol write + + + +#### OpenTSDB Telnet line protocol write + + + +#### OpenTSDB JSON line protocol write + + + +### Query data + + + +### More sample programs + +- [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go) +- [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). + +## Usage limitations + +Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. + +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. + +The complete example is as follows. + +```go +package main + +import ( + "database/sql" + "fmt" + "time" + + _ "github.com/taosdata/driver-go/v2/taosRestful" +) + +func main() { + var taosDSN = "root:taosdata@http(localhost:6041)/test" + taos, err := sql.Open("taosRestful", taosDSN) + if err != nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } + defer taos.Close() + taos.Exec("create database if not exists test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + if err != nil { + fmt.Println("failed to insert, err:", err) + return + } + rows, err := taos.Query("select * from tb1") + if err != nil { + fmt.Println("failed to select from table, err:", err) + return + } + + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } +} +``` + +## Frequently Asked Questions + +1. bind interface in database/sql crashes + + REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. + +2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement + + The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. + +3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` + + Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. + +4. Upgrade `github.com/taosdata/driver-go/v2/taosRestful` + + Change the `github.com/taosdata/driver-go/v2` line in the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. + +5. `readBufferSize` parameter has no significant effect after being increased + + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. + +6. `disableCompression` parameter is set to `false` when the query efficiency is reduced + + When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. + +7. `go get` command can't get the package, or timeout to get the package + + Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. + +## Common APIs + +### database/sql API + +- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` + + Use This API to open a DB, returning an object of type \*DB. + + :::info + This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. + + ::: + +- `func (db *DB) Exec(query string, args . .interface{}) (Result, error)` + + `sql.Open` built-in method to execute non-query related SQL. + +- `func (db *DB) Query(query string, args ... . interface{}) (*Rows, error)` + + `sql.Open` Built-in method to execute query statements. + +### Advanced functions (af) API + +The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. + +#### Connection management + +- `af.Open(host, user, pass, db string, port int) (*Connector, error)` + + This API creates a connection to taosd via cgo. + +- `func (conn *Connector) Close() error` + + Closes the connection. + +#### Subscribe to + +- `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` + + Subscribe to data. + +- `func (s *taosSubscriber) Consume() (driver.Rows, error)` + + Consume the subscription data, returning the `Rows` structure of the `database/sql/driver` package. + +- `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` + + Unsubscribe from data. + +#### schemaless + +- `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` + + Write to influxDB line protocol. + +- `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` + + Write OpenTDSB telnet protocol data. + +- `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` + + Writes OpenTSDB JSON protocol data. + +#### parameter binding + +- `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` + + Parameter bound single row insert. + +- `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` + + Parameter bound query that returns the `Rows` structure of the `database/sql/driver` package. + +- `func (conn *Connector) InsertStmt() *insertstmt. + + Initialize the parameters. + +- `func (stmt *InsertStmt) Prepare(sql string) error` + + Parameter binding preprocessing SQL statement. + +- `func (stmt *InsertStmt) SetTableName(name string) error` + + Bind the set table name parameter. + +- `func (stmt *InsertStmt) SetSubTableName(name string) error` + + Parameter binding to set the sub table name. + +- `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + + Parameter bind multiple rows of data. + +- `func (stmt *InsertStmt) AddBatch() error` + + Add to a parameter-bound batch. + +- `func (stmt *InsertStmt) Execute() error` + + Execute a parameter binding. + +- `func (stmt *InsertStmt) GetAffectedRows() int` + + Gets the number of affected rows inserted by the parameter binding. + +- `func (stmt *InsertStmt) Close() error` + + Closes the parameter binding. + +## API Reference + +Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v2) diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..22f99bb9ae8fa669155ba8ac7cec1ad2c609cb32 --- /dev/null +++ b/docs/en/14-reference/03-connector/java.mdx @@ -0,0 +1,854 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 2 +sidebar_label: Java +title: TDengine Java Connector +description: TDengine Java based on JDBC API and provide both native and REST connections +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. + +![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) + +The preceding diagram shows two ways for a Java app to access TDengine via connector: + +- JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). +- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. + +The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. + +:::info +TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: + +- TDengine does not currently support delete operations for individual data records. +- Transactional operations are not currently supported. + +::: + +## Supported platforms + +Native connection supports the same platform as TDengine client-driven support. +REST connection supports all platforms that can run Java. + +## Version support + +Please refer to [Version Support List](/reference/connector#version-support). + +## TDengine DataType vs. Java DataType + +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: + +| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version > = 2.0.24) | +| ----------------- | ---------------------------------- | ------------------------------------ | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | +| JSON | - | java.lang.String | + +**Note**: Only TAG supports JSON types + +## Installation steps + +### Pre-installation preparation + +Before using Java Connector to connect to the database, the following conditions are required. + +- Java 1.8 or above runtime environment and Maven 3.6 or above installed +- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](/reference/connector#Install-Client-Driver) + +### Install the connectors + + + + +- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [maven.aliyun](https://maven.aliyun.com/mvn/search) + +Add following dependency in the `pom.xml` file of your Maven project: + +```xml + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.** + +``` + + + + +You can build Java connector from source code after cloning the TDengine project: + +``` +git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0 +cd taos-connector-jdbc +mvn clean install -Dmaven.test.skip=true +``` + +After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. + + + + +## Establish a connection + +TDengine's JDBC URL specification format is: +`jdbc:[TAOS| TAOS-RS]://[host_name]:[port]/[database_name]? [user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +For establishing connections, native connections differ slightly from REST connections. + + + + +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`. + +Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows). + +The configuration parameters in the URL are as follows: + +- user: Log in to the TDengine username. The default value is 'root'. +- password: User login password, the default value is 'taosdata'. +- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS. +- charset: The character set used by the client, the default value is the system character set. +- locale: Client locale, by default, use the system's current locale. +- timezone: The time zone used by the client, the default value is the system's current time zone. +- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large. +- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false. + +For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html). + +**Connect using the TDengine client-driven configuration file ** + +When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below: + +1. Do not specify hostname and port in Java applications. + + ```java + public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; + } + ``` + +2. specify the firstEp and the secondEp in the configuration file taos.cfg + + ```shell + # first fully qualified domain name (FQDN) for TDengine system + firstEp cluster_node1:6030 + + # second fully qualified domain name (FQDN) for TDengine system, for cluster only + secondEp cluster_node2:6030 + + # default system charset + # charset UTF-8 + + # system locale + # locale en_US.UTF-8 + ``` + +In the above example, JDBC uses the client's configuration file to establish a connection to a hostname `cluster_node1`, port 6030, and a database named `test`. When the firstEp node in the cluster fails, JDBC attempts to connect to the cluster using secondEp. + +In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally. + +:::note +The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows. + +::: + + + + +```java +Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. + +There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: + +1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". +2. jdbcUrl starting with "jdbc:TAOS-RS://". +3. use 6041 as the connection port. + +The configuration parameters in the URL are as follows. + +- user: Login TDengine user name, default value 'root'. +- password: user login password, default value 'taosdata'. +- batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. +- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. +- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. +- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. +- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. +- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. +- useSSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection. + +**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection. + +:::note + +- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. + +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); +``` + +- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); + +::: + + + + +### Specify the URL and Properties to get the connection + +In addition to getting the connection from the specified URL, you can use Properties to specify parameters when the connection is established. + +**Note**: + +- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set. +- The following sample code is based on taos-jdbcdriver-2.0.36. + +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connProps.setProperty("debugFlag", "135"); + connProps.setProperty("maxSQLLength", "1048576"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} + +public Connection getRestConn() throws Exception{ + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +In the above example, a connection is established to `taosdemo.com`, port is 6030/6041, and database named `test`. The connection specifies the user name as `root` and the password as `taosdata` in the URL and specifies the character set, language environment, time zone, and whether to enable bulk fetching in the connProps. + +The configuration parameters in properties are as follows. + +- TSDBDriver.PROPERTY_KEY_USER: login TDengine user name, default value 'root'. +- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'. +- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false. +- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false. +- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS. +- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. +- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. +- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. +- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. +- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection. It only takes effect when using using JDBC REST connection. + For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). + +### Priority of configuration parameters + +If the configuration parameters are duplicated in the URL, Properties, or client configuration file, the `priority` of the parameters, from highest to lowest, are as follows: + +1. JDBC URL parameters, as described above, can be specified in the parameters of the JDBC URL. +2. Properties connProps +3. the configuration file taos.cfg of the TDengine client driver when using a native connection + +For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. + +## Usage examples + +### Create database and tables + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` + +> **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb. + +### Insert data + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` + +> now is an internal function. The default is the current time of the client's computer. +> `now + 1s` represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years). + +### Querying data + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` + +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. + +### Handling exceptions + +After an error is reported, the error message and error code can be obtained through SQLException. + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +There are three types of error codes that the JDBC connector can report: + +- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350) +- Error code of the native connection method (error code between 0x2351 and 0x2400) +- Error code of other TDengine function modules + +For specific error codes, please refer to. + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) + +### Writing data via parameter binding + +TDengine's native JDBC connection implementation has significantly improved its support for data writing (INSERT) scenarios via bind interface with version 2.1.2.0 and later versions. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. + +**Note**. + +- JDBC REST connections do not currently support bind interface +- The following sample code is based on taos-jdbcdriver-2.0.36 +- The setString method should be called for binary type data, and the setNString method should be called for nchar type data +- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition + +```java +public class ParameterBindingDemo { + + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 20; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + conn.close(); + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_parabind"); + stmt.execute("create database if not exists test_parabind"); + stmt.execute("use test_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(3, random.nextLong()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setByte(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setShort(2, f2List); + + ArrayList f3List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f3List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(3, f3List); + + ArrayList f4List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f4List.add(random.nextLong()); + pstmt.setLong(4, f4List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute column + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(0, random.nextFloat()); + pstmt.setTagDouble(1, random.nextDouble()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextFloat()); + pstmt.setFloat(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(random.nextDouble()); + pstmt.setDouble(2, f2List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + // close if no try-with-catch statement is used + pstmt.close(); + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(0, random.nextBoolean()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextBoolean()); + pstmt.setBoolean(1, f1List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(0, new String("abc")); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(new String("abc")); + } + pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(0, "California-abc"); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add("California-abc"); + } + pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } +} +``` + +The methods to set TAGS values: + +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +The methods to set VALUES columns: + +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` + +### Schemaless Writing + +Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. + +**Note**. + +- JDBC REST connections do not currently support schemaless writes +- The following sample code is based on taos-jdbcdriver-2.0.36 + +```java +public class SchemalessInsertTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try (Connection connection = DriverManager.getConnection(url)) { + init(connection); + + SchemalessWriter writer = new SchemalessWriter(connection); + writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); + writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); + } + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.executeUpdate("drop database if exists test_schemaless"); + stmt.executeUpdate("create database if not exists test_schemaless"); + stmt.executeUpdate("use test_schemaless"); + } + } +} +``` + +### Subscriptions + +The TDengine Java Connector supports subscription functionality with the following application API. + +#### Create subscriptions + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); +``` + +The three parameters of the `subscribe()` method have the following meanings. + +- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. +- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. +- restart: if the subscription already exists, whether to restart or continue the previous subscription + +The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. + +#### Subscribe to consume data + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +The `consume()` method returns a result set containing all new data from the last `consume()`. Be sure to choose a reasonable frequency for calling `consume()` as needed (e.g. `Thread.sleep(1000)` in the example). Otherwise, it will cause unnecessary stress on the server-side. + +#### Close subscriptions + +```java +sub.close(true); +``` + +The `close()` method closes a subscription. If its argument is `true` it means that the subscription progress information is retained, and the subscription with the same name can be created to continue consuming data; if it is `false` it does not retain the subscription progress. + +### Closing resources + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` + +> **Be sure to close the connection**, otherwise, there will be a connection leak. + +### Use with connection pool + +#### HikariCP + +Example usage is as follows. + +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to connection pool +} +``` + +> getConnection(), you need to call the close() method after you finish using it. It doesn't close the connection. It just puts it back into the connection pool. +> For more questions about using HikariCP, please see the [official instructions](https://github.com/brettwooldridge/HikariCP). + +#### Druid + +Example usage is as follows. + +```java +public static void main(String[] args) throws Exception { + + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + //query or insert + // ... + + connection.close(); // put back to connection pool +} +``` + +> For more questions about using druid, please see [Official Instructions](https://github.com/alibaba/druid). + +**Caution:** + +- TDengine `v1.6.4.1` provides a special function `select server_status()` for heartbeat detection, so it is recommended to use `select server_status()` for Validation Query when using connection pooling. + +As you can see below, `select server_status()` returns `1` on successful execution. + +```sql +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +### More sample programs + +The source code of the sample application is under `TDengine/examples/JDBC`: + +- JDBCDemo: JDBC sample source code. +- JDBCConnectorChecker: JDBC installation checker source and jar package. +- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc. +- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate. +- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis. + +Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) + +## Recent update logs + +| taos-jdbcdriver version | major changes | +| :---------------------: | :--------------------------------------------: | +| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | +| 2.0.38 | JDBC REST connections add bulk pull function | +| 2.0.37 | Support json tags | +| 2.0.36 | Support schemaless writing | + +## Frequently Asked Questions + +1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`? + + **Cause**: In TDengine's JDBC implementation, SQL statements submitted by `addBatch()` method are executed sequentially in the order they are added, which does not reduce the number of interactions with the server and does not bring performance improvement. + + **Solution**: 1. splice multiple values in a single insert statement; 2. use multi-threaded concurrent insertion; 3. use parameter-bound writing + +2. java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**: The program did not find the dependent native library `taos`. + + **Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work. + +3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on an IA 32-bit platform + + **Cause**: Currently, TDengine only supports 64-bit JDK. + + **Solution**: Reinstall the 64-bit JDK. 4. + +For other questions, please refer to [FAQ](/train-faq/faq) + +## API Reference + +[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs/en/14-reference/03-connector/node.mdx similarity index 100% rename from docs-en/14-reference/03-connector/node.mdx rename to docs/en/14-reference/03-connector/node.mdx diff --git a/docs/en/14-reference/03-connector/php.mdx b/docs/en/14-reference/03-connector/php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..69dcce91e80fa05face1ffb35effe1ce1efa2631 --- /dev/null +++ b/docs/en/14-reference/03-connector/php.mdx @@ -0,0 +1,150 @@ +--- +sidebar_position: 1 +sidebar_label: PHP +title: PHP Connector +--- + +`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. + +PHP Connector relies on TDengine client driver. + +Project Repository: + +After TDengine client or server is installed, `taos.h` is located at: + +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` + +TDengine client driver is located at: + +- Linux: `/usr/local/taos/driver/libtaos.so` +- Windows: `C:\TDengine\taos.dll` + +## Supported Platforms + +- Windows、Linux、MacOS + +- PHP >= 7.4 + +- TDengine >= 2.0 + +- Swoole >= 4.8 (Optional) + +## Supported Versions + +Because the version of TDengine client driver is tightly associated with that of TDengine server, it's strongly suggested to use the client driver of same version as TDengine server, even though the client driver can work with TDengine server if the first 3 sections of the versions are same. + +## Installation + +### Install TDengine Client Driver + +Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps) + +### Install php-tdengine + +**Download Source Code Package and Unzip:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases). + +**Non-Swoole Environment:** + +```shell +phpize && ./configure && make -j && make install +``` + +**Specify TDengine location:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` is followed by TDengine location. +> It's useful in case TDengine installatio location can't be found automatically or MacOS. + +**Swoole Environment:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**Enable Extension:** + +Option One: Add `extension=tdengine` in `php.ini`. + +Option Two: Use CLI `php -dextension=tdengine test.php`. + +## Sample Programs + +In this section a few sample programs which use TDengine PHP connector to access TDengine cluster are demonstrated. + +> Any error would throw exception: `TDengine\Exception\TDengineException` + +### Establish Conection + +
+Establish Connection + +```c +{{#include docs/examples/php/connect.php}} +``` + +
+ +### Insert Data + +
+Insert Data + +```c +{{#include docs/examples/php/insert.php}} +``` + +
+ +### Synchronous Query + +
+Synchronous Query + +```c +{{#include docs/examples/php/query.php}} +``` + +
+ +### Parameter Binding + +
+Parameter Binding + +```c +{{#include docs/examples/php/insert_stmt.php}} +``` + +
+ +## Constants + +| Constant | Description | +| ----------------------------------- | ----------- | +| `TDengine\TSDB_DATA_TYPE_NULL` | null | +| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | +| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | +| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | +| `TDengine\TSDB_DATA_TYPE_INT` | int | +| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | +| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | +| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | +| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | +| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | +| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | +| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | +| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | +| `TDengine\TSDB_DATA_TYPE_UINT` | uint | +| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | diff --git a/docs/en/14-reference/03-connector/python.mdx b/docs/en/14-reference/03-connector/python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c992d4fcf6803f914aa778b22d8c8c18d22d4bfb --- /dev/null +++ b/docs/en/14-reference/03-connector/python.mdx @@ -0,0 +1,360 @@ +--- +sidebar_position: 3 +sidebar_label: Python +title: TDengine Python Connector +description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas." +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +`taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. +In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). + +The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". + +The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). + +## Supported Platforms + +- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. +- REST connections are supported on all platforms that can run Python. + +## Version selection + +We recommend using the latest version of `taospy`, regardless of the version of TDengine. + +## Supported features + +- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. +- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). + +## Installation + +### Preparation + +1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. +2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. + +If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. + +### Install via pip + +#### Uninstalling an older version + +If you have installed an older version of the Python Connector, please uninstall it beforehand. + +``` +pip3 uninstall taos taospy +``` + +:::note +Earlier TDengine client software includes the Python connector. If the Python connector is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. + +::: + +#### To install `taospy` + + + + +Install the latest version of: + +``` +pip3 install taospy +``` + +You can also specify a specific version to install: + +``` +pip3 install taospy==2.3.0 +``` + + + + +``` +pip3 install git+https://github.com/taosdata/taos-connector-python.git +``` + + + + +### Installation verification + + + + +For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. + +```python +import taos +``` + + + + +For REST connections, verifying that the `taosrest` module can be imported successfully can be done in the Python Interactive Shell by typing. + +```python +import taosrest +``` + + + + +:::tip +If you have multiple versions of Python on your system, you may have various `pip` commands. Be sure to use the correct path for the `pip` command. Above, we installed the `pip3` command, which rules out the possibility of using the `pip` corresponding to Python 2.x versions. However, if you have more than one version of Python 3.x on your system, you still need to check that the installation path is correct. The easiest way to verify this is to type `pip3 install taospy` again in the command, and it will print out the exact location of `taospy`, for example, on Windows. + +``` +C:\> pip3 install taospy +Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple +Requirement already satisfied: taospy in c:\users\username\appdata\local\programs\python\python310\lib\site-packages (2.3.0) +``` + +::: + +## Establish connection + +### Connectivity testing + +Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. + + + + +Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command. + +``` +ping +``` + +Then test if the cluster can be appropriately connected with TDengine CLI: + +``` +taos -h -p +``` + +The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the serverPort corresponding to this dnode. + + + + +For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. + +``` +curl -u root:taosdata http://:/rest/sql -d "select server_version()" +``` + +The FQDN above is the FQDN of the machine running taosAdapter, PORT is the port taosAdapter listening, default is `6041`. +If the test is successful, it will output the server version information, e.g. + +```json +{ + "status": "succ", + "head": ["server_version()"], + "column_meta": [["server_version()", 8, 8]], + "data": [["2.4.0.16"]], + "rows": 1 +} +``` + + + + +### Using connectors to establish connections + +The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort. + + + + +```python +{{#include docs/examples/python/connect_native_reference.py}} +``` + +All arguments of the `connect()` function are optional keyword arguments. The following are the connection parameters specified. + +- `host` : The FQDN of the node to connect to. There is no default value. If this parameter is not provided, the firstEP in the client configuration file will be connected. +- `user` : The TDengine user name. The default value is `root`. +- `password` : TDengine user password. The default value is `taosdata`. +- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided. +- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux systems. +- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone. + +:::warning +`config` and `timezone` are both process-level configurations. We recommend that all connections made by a process use the same parameter values. Otherwise, unpredictable errors may occur. +::: + +:::tip +The `connect()` function returns a `taos.TaosConnection` instance. In client-side multi-threaded scenarios, we recommend that each thread request a separate connection instance rather than sharing a connection between multiple threads. + +::: + + + + +```python +{{#include docs/examples/python/connect_rest_examples.py:connect}} +``` + +All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified. + +- `url`: The URL of taosAdapter REST service. The default is . +- `user`: TDengine user name. The default is `root`. +- `password`: TDengine user password. The default is `taosdata`. +- `timeout`: HTTP request timeout in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. + + + + +## Sample program + +### Basic Usage + + + + +##### TaosConnection class + +The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods). + +```python title="execute method" +{{#include docs/examples/python/connection_usage_native_reference.py:insert}} +``` + +```python title="query method" +{{#include docs/examples/python/connection_usage_native_reference.py:query}} +``` + +:::tip +The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list. +::: + +##### Use of TaosResult class + +In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data. + +```python title="blocks_iter method" +{{#include docs/examples/python/result_set_examples.py}} +``` +##### Use of the TaosCursor class + +The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class. + +```python title="Use of TaosCursor" +{{#include docs/examples/python/cursor_usage_native_reference.py}} +``` + +:::note +The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results. + +::: + + + + +##### Use of TaosRestCursor class + +The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface. + +```python title="Use of TaosRestCursor" +{{#include docs/examples/python/connect_rest_examples.py:basic}} +``` +- `cursor.execute` : Used to execute arbitrary SQL statements. +- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. +- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. + +##### Use of the RestClient class + +The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. + +```python title="Use of RestClient" +{{#include docs/examples/python/rest_client_example.py}} +``` + +For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). + + + + +### Used with pandas + + + + +```python +{{#include docs/examples/python/conn_native_pandas.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_rest_pandas.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_native_sqlalchemy.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_rest_sqlalchemy.py}} +``` + + + + +### Other sample programs + +| Example program links | Example program content | +| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- | +| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once | +| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py +| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing | +| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags | +| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | Asynchronous subscription | +| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | synchronous-subscribe | + +## Other notes + +### Exception handling + +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: + +```python +{{#include docs/examples/python/handle_exception.py}} +``` + +### About nanoseconds + +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. + +1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds +2. https://www.python.org/dev/peps/pep-0564/ + + +## Frequently Asked Questions + +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). + +## Important Update + +| Connector version | Important Update | Release date | +| ---------- | --------------------------------------------------------------------------------- | ---------- | +| 2.3.1 | 1. support TDengine REST API
2. remove support for Python version below 3.6 | 2022-04-28 | +| 2.2.5 | support timezone option when connect | 2022-04-13 | +| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | + +[**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases) + +## API Reference + +- [taos](https://docs.taosdata.com/api/taospy/taos/) +- [taosrest](https://docs.taosdata.com/api/taospy/taosrest) diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..56ca586c7e8ada6e4422596906e01887d4726fd0 --- /dev/null +++ b/docs/en/14-reference/03-connector/rust.mdx @@ -0,0 +1,384 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 5 +sidebar_label: Rust +title: TDengine Rust Connector +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +import Preparation from "./_preparation.mdx" +import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" +import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" +import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" +import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" +import RustQuery from "../../07-develop/04-query-data/_rust.mdx" + +`libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data. + +`libtaos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **REST connection**, which connects to TDengine instances via taosAdapter's REST interface. + +The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/libtaos-rs). + +## Supported platforms + +The platforms supported by native connections are the same as those supported by the TDengine client driver. +REST connections are supported on all platforms that can run Rust. + +## Version support + +Please refer to [version support list](/reference/connector#version-support). + +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. + +## Installation + +### Pre-installation +* Install the Rust development toolchain +* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) + +### Adding libtaos dependencies + +Add the [libtaos][libtaos] dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected. + + + + +Add [libtaos][libtaos] to the `Cargo.toml` file. + +```toml +[dependencies] +# use default feature +libtaos = "*" +``` + + + + +Add [libtaos][libtaos] to the `Cargo.toml` file and enable the `rest` feature. + +```toml +[dependencies] +# use rest feature +libtaos = { version = "*", features = ["rest"]} +``` + + + + + +### Using connection pools + +Please enable the `r2d2` feature in `Cargo.toml`. + +```toml +[dependencies] +# with taosc +libtaos = { version = "*", features = ["r2d2"] } +# or rest +libtaos = { version = "*", features = ["rest", "r2d2"] } +``` + +## Create a connection + +The [TaosCfgBuilder] provides the user with an API in the form of a constructor for the subsequent creation of connections or use of connection pools. + +```rust +let cfg: TaosCfg = TaosCfgBuilder::default() + .ip("127.0.0.1") + .user("root") + .pass("taosdata") + .db("log") // do not set if not require a default database. + .port(6030u16) + .build() + .expect("TaosCfg builder error"); +} +``` + +You can now use this object to create the connection. + +```rust +let conn = cfg.connect()? ; +``` + +The connection object can create more than one. + +```rust +let conn = cfg.connect()? ; +let conn2 = cfg.connect()? ; +``` + +You can use connection pools in applications. + +```rust +let pool = r2d2::Pool::builder() + .max_size(10000) // max connections + .build(cfg)? ; + +// ... +// Use pool to get connection +let conn = pool.get()? ; +``` + +After that, you can perform the following operations on the database. + +```rust +async fn demo() -> Result<(), Error> { + // get connection ... + + // create database + conn.exec("create database if not exists demo").await? + // change database context + conn.exec("use demo").await? + // create table + conn.exec("create table if not exists tb1 (ts timestamp, v int)").await? + // insert + conn.exec("insert into tb1 values(now, 1)").await? + // query + let rows = conn.query("select * from tb1").await? + for row in rows.rows { + println!("{}", row.into_iter().join(",")); + } +} +``` + +## Usage examples + +### Write data + +#### SQL Write + + + +#### InfluxDB line protocol write + + + +#### OpenTSDB Telnet line protocol write + + + +#### OpenTSDB JSON line protocol write + + + +### Query data + + + +### More sample programs + +| Program Path | Program Description | +| -------------- | ----------------------------------------------------------------------------- | +| [demo.rs] | Basic API Usage Examples | +| [bailongma-rs] | Using TDengine as the Prometheus remote storage API adapter for the storage backend, using the r2d2 connection pool | + +## API Reference + +### Connection constructor API + +The [Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) constructor pattern is Rust's solution for handling complex data types or optional configuration types. The [libtaos] implementation uses the connection constructor [TaosCfgBuilder] as the entry point for the TDengine Rust connector. The [TaosCfgBuilder] provides optional configuration of servers, ports, databases, usernames, passwords, etc. + +Using the `default()` method, you can construct a [TaosCfg] with default parameters for subsequent connections to the database or establishing connection pools. + +```rust +let cfg = TaosCfgBuilder::default().build()? ; +``` + +Using the constructor pattern, the user can set on-demand. + +```rust +let cfg = TaosCfgBuilder::default() + .ip("127.0.0.1") + .user("root") + .pass("taosdata") + .db("log") + .port(6030u16) + .build()? ; +``` + +Create TDengine connection using [TaosCfg] object. + +```rust +let conn: Taos = cfg.connect(); +``` + +### Connection pooling + +In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. + +As follows, a connection pool with default parameters can be generated. + +```rust +let pool = r2d2::Pool::new(cfg)? ; +``` + +You can set the same connection pool parameters using the connection pool's constructor. + +```rust + use std::time::Duration; + let pool = r2d2::Pool::builder() + .max_size(5000) // max connections + .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection + .min_idle(Some(1000)) // minimal idle connections + .connection_timeout(Duration::from_minutes(2)) + .build(cfg); +``` + +In the application code, use `pool.get()? ` to get a connection object [Taos]. + +```rust +let taos = pool.get()? ; +``` + +The [Taos] structure is the connection manager in [libtaos] and provides two main APIs. + +1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc. + + ```rust + taos.exec().await? + ``` + +2. `query`: Execute the query statement and return the [TaosQueryData] object. + + ```rust + let q = taos.query("select * from log.logs").await? + ``` + + The [TaosQueryData] object stores the query result data and basic information about the returned columns (column name, type, length). + + Column information is stored using [ColumnMeta]. + + ```rust + let cols = &q.column_meta; + for col in cols { + println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes); + } + ``` + + It fetches data line by line. + + ```rust + for (i, row) in q.rows.iter().enumerate() { + for (j, cell) in row.iter().enumerate() { + println!("cell({}, {}) data: {}", i, j, cell); + } + } + ``` + +Note that Rust asynchronous functions and an asynchronous runtime are required. + +[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. + +- `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. +- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. +- `.use_database(database: &str)`: Executes the `USE` statement. + +In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. + +### Bind Interface + +Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. + +```rust +let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; +``` + +The bind object provides a set of interfaces for implementing parameter binding. + +##### `.set_tbname(tbname: impl ToCString)` + +To bind table names. + +##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)` + +Bind sub-table table names and tag values when the SQL statement uses a super table. + +```rust +let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)") ? ; +// tags can be created with any supported type, here is an example using JSON +let v = Field::Json(serde_json::from_str("{\"tag1\":\"one, two, three, four, five, six, seven, eight, nine, ten\"}").unwrap()); +stmt.set_tbname_tags("tb0", [&tag])? ; +``` + +##### `.bind(params: impl IntoParams)` + +Bind value types. Use the [Field] structure to construct the desired type and bind. + +```rust +let ts = Field::Timestamp(Timestamp::now()); +let value = Field::Float(0.0); +stmt.bind(vec![ts, value].iter())? ; +``` + +##### `.execute()` + +Execute SQL.[Stmt] objects can be reused, re-binded, and executed after execution. + +```rust +stmt.execute()? ; + +// next bind cycle. +// stmt.set_tbname()? ; +//stmt.bind()? ; +//stmt.execute()? ; +``` + +### Line protocol interface + +The line protocol interface supports multiple modes and different precision and requires the introduction of constants in the schemaless module to set. + +```rust +use libtaos::*; +use libtaos::schemaless::*; +``` + +- InfluxDB line protocol + + ```rust + let lines = [ + "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000" + "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000" + ]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)? ; + ``` + +- OpenTSDB Telnet Protocol + + ```rust + let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ; + ``` + +- OpenTSDB JSON protocol + + ```rust + let lines = [r#" + { + "metric": "st", + "timestamp": 1626006833, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.! @#$%^&*:;,. /? |+-=()[]{}<>" + } + }"#]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ; + ``` + +Please move to the Rust documentation hosting page for other related structure API usage instructions: . + +[libtaos]: https://github.com/taosdata/libtaos-rs +[tdengine]: https://github.com/taosdata/TDengine +[bailongma-rs]: https://github.com/taosdata/bailongma-rs +[r2d2]: https://crates.io/crates/r2d2 +[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs +[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html +[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html +[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html +[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html +[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html +[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp similarity index 100% rename from docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp rename to docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md new file mode 100644 index 0000000000000000000000000000000000000000..cad229c32d602e8fc595ec06f72a1a486e2af77b --- /dev/null +++ b/docs/en/14-reference/04-taosadapter.md @@ -0,0 +1,337 @@ +--- +title: "taosAdapter" +description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine." +sidebar_label: "taosAdapter" +--- + +import Prometheus from "./_prometheus.mdx" +import CollectD from "./_collectd.mdx" +import StatsD from "./_statsd.mdx" +import Icinga2 from "./_icinga2.mdx" +import TCollector from "./_tcollector.mdx" + +taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface that allows InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine. + +taosAdapter provides the following features. + +- RESTful interface +- InfluxDB v1 compliant write interface +- OpenTSDB JSON and telnet format writes compatible +- Seamless connection to Telegraf +- Seamless connection to collectd +- Seamless connection to StatsD +- Supports Prometheus remote_read and remote_write + +## taosAdapter architecture diagram + +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) + +## taosAdapter Deployment Method + +### Install taosAdapter + +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. + +### Start/Stop taosAdapter + +On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. + +### Remove taosAdapter + +Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. + +### Upgrade taosAdapter + +taosAdapter and TDengine server need to use the same version. Please upgrade the taosAdapter by upgrading the TDengine server. +You need to upgrade the taosAdapter deployed separately from TDengine server by upgrading the TDengine server on the deployed server. + +## taosAdapter parameter list + +taosAdapter is configurable via command-line arguments, environment variables and configuration files. The default configuration file is /etc/taos/taosadapter.toml on Linux. + +Command-line arguments take precedence over environment variables over configuration files. The command-line usage is arg=val, e.g., taosadapter -p=30000 --debug=true. The detailed list is as follows: + +```shell +Usage of taosAdapter: + --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") + --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) + --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") + --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) + --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") + --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) + -c, --config string config path default /etc/taos/taosadapter.toml + --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) + --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" + --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" + --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" + --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" + --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" + --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" + --help Print this help message and exit + --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") + --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) + --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") + --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") + --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) + --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" + --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" + --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") + --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) + --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") + --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) + --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" + --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" + --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") + --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" + --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) + --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" + --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" + --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" + --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) + --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" + --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") + --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) + --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) + --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") + --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) + --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) + --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") + --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) + --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" + --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") + --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) + --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) + --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) + -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) + --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) + --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) + --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" + --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" + --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" + --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) + --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") + --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) + --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) + --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) + --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) + --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) + --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) + --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) + --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") + --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) + --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") + --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" + --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") + --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) + --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" + --version Print the version and exit +``` + +Note: +Please set the following Cross-Origin Resource Sharing (CORS) parameters according to the actual situation when using a browser for interface calls. + +```text +AllowAllOrigins +AllowOrigins +AllowHeaders +ExposeHeaders +AllowCredentials +AllowWebSockets +``` + +You do not need to care about these configurations if you do not make interface calls through the browser. + +For details on the CORS protocol, please refer to: [https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) or [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS). + +See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml) for sample configuration files. + +## Feature List + +- Compatible with RESTful interfaces [REST API](/reference/rest-api/) +- Compatible with InfluxDB v1 write interface + [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) +- Compatible with OpenTSDB JSON and telnet format writes + - + - +- Seamless connection to collectd + collectd is a system statistics collection daemon, please visit [https://collectd.org/](https://collectd.org/) for more information. +- Seamless connection with StatsD + StatsD is a simple yet powerful daemon for aggregating statistical information. Please visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information. +- Seamless connection with icinga2 + icinga2 is a software that collects inspection result metrics and performance data. Please visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information. +- Seamless connection to TCollector + TCollector is a client process that collects data from a local collector and pushes the data to OpenTSDB. Please visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information. +- Seamless connection to node_exporter + node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information. +- Support for Prometheus remote_read and remote_write + remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information. + +## Interfaces + +### TDengine RESTful interface + +You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://:6041/`. See the [official documentation](/reference/connector#restful) for details. The following EndPoint is supported. + +```text +/rest/sql +/rest/sqlt +/rest/sqlutc +``` + +### InfluxDB + +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: + +```text +/influxdb/v1/write +``` + +Support InfluxDB query parameters as follows. + +- `db` Specifies the database name used by TDengine +- `precision` The time precision used by TDengine +- `u` TDengine user name +- `p` TDengine password + +Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported. + +### OpenTSDB + +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. + +```text +/opentsdb/v1/put/json/ +/opentsdb/v1/put/telnet/ +``` + +### collectd + + + +### StatsD + + + +### icinga2 OpenTSDB writer + + + +### TCollector + + + +### node_exporter + +node_export is an exporter of hardware and OS metrics exposed by the \*NIX kernel used by Prometheus + +- Enable the taosAdapter configuration `node_exporter.enable` +- Set the configuration of the node_exporter +- Restart taosAdapter + +### Prometheus + + + +## Memory usage optimization methods + +taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. + +- pauseQueryMemoryThreshold +- pauseAllMemoryThreshold + +Stops processing query requests when the `pauseQueryMemoryThreshold` threshold is exceeded. + +HTTP response content. + +- code 503 +- body "query memory exceeds threshold" + +Stops processing all write and query requests when the `pauseAllMemoryThreshold` threshold is exceeded. + +HTTP response: code 503 + +- code 503 +- body "memory exceeds threshold" + +Resume the corresponding function when the memory falls back below the threshold. + +Status check interface `http://:6041/-/ping` + +- Normal returns `code 200` +- No parameter If memory exceeds pauseAllMemoryThreshold returns `code 503` +- Request parameter `action=query` returns `code 503` if memory exceeds `pauseQueryMemoryThreshold` or `pauseAllMemoryThreshold` + +Corresponding configuration parameter + +``text + monitor.collectDuration monitoring interval environment variable `TAOS_MONITOR_COLLECT_DURATION` (default value 3s) + monitor.incgroup whether to run in cgroup (set to true for running in container) environment variable `TAOS_MONITOR_INCGROUP` + monitor.pauseAllMemoryThreshold memory threshold for no more inserts and queries environment variable `TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD` (default 80) + monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) +``` + +You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. + +## taosAdapter Monitoring Metrics + +taosAdapter collects HTTP-related metrics, CPU percentage, and memory percentage. + +### HTTP interface + +Provides an interface conforming to [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md). + +```text +http://:6041/metrics +``` + +### Write to TDengine + +taosAdapter supports writing the metrics of HTTP monitoring, CPU percentage, and memory percentage to TDengine. + +For configuration parameters + +| **Configuration items** | **Description** | **Default values** | +| ----------------------- | --------------------------------------------------------- | ---------- | +| monitor.collectDuration | CPU and memory collection interval | 3s | +| monitor.identity | The current taosadapter identifier will be used if not set to `hostname:port` | | +| monitor.incgroup | whether it is running in a cgroup (set to true for running in a container) | false | +| monitor.writeToTD | Whether to write to TDengine | true | +| monitor.user | TDengine connection username | root | +| monitor.password | TDengine connection password | taosdata | +| monitor.writeInterval | Write to TDengine interval | 30s | + +## Limit the number of results returned + +taosAdapter controls the number of results returned by the parameter `restfulRowLimit`, -1 means no limit, default is no limit. + +This parameter controls the number of results returned by the following interfaces: + +- `http://:6041/rest/sql` +- `http://:6041/rest/sqlt` +- `http://:6041/rest/sqlutc` +- ` http://:6041/prometheus/v1/remote_read/:db` + +## Troubleshooting + +You can check the taosAdapter running status with the `systemctl status taosadapter` command. + +You can also adjust the level of the taosAdapter log output by setting the `--logLevel` parameter or the environment variable `TAOS_ADAPTER_LOG_LEVEL`. Valid values are: panic, fatal, error, warn, warning, info, debug and trace. + +## How to migrate from older TDengine versions to taosAdapter + +In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. + +| **#** | **embedded httpd** | **taosAdapter** | **comment** | +| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | +| 1 | httpEnableRecordSql | --logLevel=debug | | +| 2 | httpMaxThreads | n/a | taosAdapter Automatically manages thread pools without this parameter | +| 3 | telegrafUseFieldNum | See the taosAdapter telegraf configuration method | | +| 4 | restfulRowLimit | restfulRowLimit | Embedded httpd outputs 10240 rows of data by default, the maximum allowed is 102400. taosAdapter also provides restfulRowLimit but it is not limited by default. You can configure it according to the actual scenario. +| 5 | httpDebugFlag | Not applicable | httpdDebugFlag does not work for taosAdapter | +| 6 | httpDBNameMandatory | N/A | taosAdapter requires the database name to be specified in the URL | diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md new file mode 100644 index 0000000000000000000000000000000000000000..0951717f5ae5f17676bd4aaefcd24f0da829c12c --- /dev/null +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -0,0 +1,437 @@ +--- +title: taosBenchmark +sidebar_label: taosBenchmark +toc_max_heading_level: 4 +description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine." +--- + +## Introduction + +taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. + +## Installation + +There are two ways to install taosBenchmark: + +- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details. + +- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. + +## Run + +### Configuration and running methods + +TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. + +taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. + +**Make sure that the TDengine cluster is running correctly before running taosBenchmark. ** + +### Run without command-line arguments + +Execute the following commands to quickly experience taosBenchmark's default configuration-based write performance testing of TDengine. + +```bash +taosBenchmark +``` + +When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database. + +### Run with command-line configuration parameters + +The `-f ` argument cannot be used when running taosBenchmark with command-line parameters and controlling its behavior. Users must specify all configuration parameters from the command-line. The following is an example of testing taosBenchmark writing performance using the command-line approach. + +```bash +taosBenchmark -I stmt -n 200 -t 100 +``` + +Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. + +### Run with the configuration file + +A sample configuration file is provided in the taosBenchmark installation package under `/examples/taosbenchmark-json`. + +Use the following command-line to run taosBenchmark and control its behavior via a configuration file. + +```bash +taosBenchmark -f +``` + +#### Configuration file examples +##### Example of inserting a scenario JSON configuration file + +
+insert.json + +```json +{{#include /taos-tools/example/insert.json}} +``` + +
+ +##### Query Scenario JSON Profile Example + +
+query.json + +```json +{{#include /taos-tools/example/query.json}} +``` + +
+ +##### Subscription JSON configuration example + +
+subscribe.json + +```json +{{#include /taos-tools/example/subscribe.json}} +``` + +
+ +## Command-line argument in detailed + +- **-f/--file ** : + specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. + +- **-c/--config-dir ** : + specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`. + +- **-h/--host ** : + Specify the FQDN of the TDengine server to connect to. The default value is localhost. + +- **-P/--port ** : + The port number of the TDengine server to connect to, the default value is 6030. + +- **-I/--interface ** : + Insert mode. Options are taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface writing, parameter binding interface writing, schemaless interface writing, RESTful schemaless interface writing (provided by taosAdapter). The default value is taosc. + +- **-u/--user ** : + User name to connect to the TDengine server. Default is root. + +- **-p/--password ** : + The default password to connect to the TDengine server is `taosdata`. + +- **-o/--output ** : + specify the path of the result output file, the default value is `. /output.txt`. + +- **-T/--thread ** : + The number of threads to insert data. Default is 8. + +- **-B/--interlace-rows ** : + Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. + +- **-i/--insert-interval ** : + Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. That means that after inserting interlaced rows for each child table, the data insertion with multiple threads will wait for the interval specified by this value before proceeding to the next round of writes. + +- **-r/--rec-per-req ** : + Writing the number of rows of records per request to TDengine, the default value is 30000. + +- **-t/--tables ** : + Specify the number of sub-tables. The default is 10000. + +- **-S/--timestampstep ** : + Timestamp step for inserting data in each child table in ms, default is 1. + +- **-n/--records ** : + The default value of the number of records inserted in each sub-table is 10000. + +- **-d/--database ** : + The name of the database used, the default value is `test`. + +- **-b/--data-type ** : + specify the type of the data columns of the super table. It defaults to three columns of type FLOAT, INT, and FLOAT if not used. + +- **-l/--columns ** : + specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`. + +- **-A/--tag-type ** : + The tag column type of the super table. nchar and binary types can both set the length, for example: + +``` +taosBenchmark -A INT,DOUBLE,NCHAR,BINARY(16) +``` + +If users did not set tag type, the default is two tags, whose types are INT and BINARY(16). +Note: In some shells, such as bash, "()" needs to be escaped, so the above command should be + +``` +taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) +``` + +- **-w/--binwidth **: + specify the default length for nchar and binary types. The default value is 64. + +- **-m/--table-prefix ** : + The prefix of the sub-table name, the default value is "d". + +- **-E/--escape-character** : + Switch parameter specifying whether to use escape characters in the super table and sub-table names. By default is not used. + +- **-C/--chinese** : +<<<<<<< HEAD + Switch specifying whether to use Unicode Chinese characters in nchar and binary. By default is not used. +======= + specify whether to use Unicode Chinese characters in nchar and binary, the default is no. +>>>>>>> 108548b4d6 (docs: typo) + +- **-N/--normal-table** : + This parameter indicates that taosBenchmark will create only normal tables instead of super tables. The default value is false. It can be used if the insert mode is taosc, stmt, and rest. + +- **-M/--random** : + This parameter indicates writing data with random values. The default is false. If users use this parameter, taosBenchmark will generate the random values. For tag/data columns of numeric type, the value is a random value within the range of values of that type. For NCHAR and BINARY type tag columns/data columns, the value is the random string within the specified length range. + +- **-x/--aggr-func** : + Switch parameter to indicate query aggregation function after insertion. The default value is false. + +- **-y/--answer-yes** : + Switch parameter that requires the user to confirm at the prompt to continue. The default value is false. + +- **-O/--disorder ** : + Specify the percentage probability of disordered data, with a value range of [0,50]. The default is 0, i.e., there is no disordered data. + +- **-R/--disorder-range ** : + Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. + +- **-F/--prepare_rand ** : + Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. + +- **-a/--replica ** : + Specify the number of replicas when creating the database. The default value is 1. + +- **-V/--version** : + Show version information only. Users should not use it with other parameters. + +- **-? /--help** : + Show help information and exit. Users should not use it with other parameters. + +## Configuration file parameters in detailed + +### General configuration parameters + +The parameters listed in this section apply to all function modes. + +- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file. +**cfgdir**: specify the TDengine cluster configuration file's directory. The default path is /etc/taos. + +- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`. + +- **port**: The port number of the TDengine server to connect to, the default value is `6030`. + +- **user**: The user name of the TDengine server to connect to, the default is `root`. + +- **password**: The password to connect to the TDengine server, the default value is `taosdata`. + +### Insert scenario configuration parameters + +`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters) + +#### Database related configuration parameters + +The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. These parameters correspond to the database parameters specified when `create database` in TDengine. + +- **name**: specify the name of the database. + +- **drop**: indicate whether to delete the database before inserting. The default is true. + +- **replica**: specify the number of replicas when creating the database. + +- **days**: specify the time span for storing data in a single data file. The default is 10. + +- **cache**: specify the size of the cache blocks in MB. The default value is 16. + +- **blocks**: specify the number of cache blocks in each vnode. The default is 6. + +- **precision**: specify the database time precision. The default value is "ms". + +- **keep**: specify the number of days to keep the data. The default value is 3650. + +- **minRows**: specify the minimum number of records in the file block. The default value is 100. + +- **maxRows**: specify the maximum number of records in the file block. The default value is 4096. + +- **comp**: specify the file compression level. The default value is 2. + +- **walLevel** : specify WAL level, default is 1. + +- **cacheLast**: indicate whether to allow the last record of each table to be kept in memory. The default value is 0. The value can be 0, 1, 2, or 3. + +- **quorum**: specify the number of writing acknowledgments in multi-replica mode. The default value is 1. + +- **fsync**: specify the interval of fsync in ms when users set WAL to 2. The default value is 3000. + +- **update** : indicate whether to support data update, default value is 0, optional values are 0, 1, 2. + +#### Super table related configuration parameters + +The parameters for creating super tables are configured in `super_tables` in the json configuration file, as shown below. + +- **name**: Super table name, mandatory, no default value. +- **child_table_exists** : whether the child table already exists, default value is "no", optional value is "yes" or "no". + +- **child_table_count** : The number of child tables, the default value is 10. + +- **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value. + +- **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no". + +- **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. + +- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. + +- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. + +- **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. + +- **non_stop_mode**: Specify whether to keep writing. If "yes", insert_rows will be disabled, and writing will not stop until Ctrl + C stops the program. The default value is "no", i.e., taosBenchmark will stop the writing after the specified number of rows are written. Note: insert_rows must be configured as a non-zero positive integer even if it fails in continuous write mode. + +- **line_protocol**: Insert data using line protocol. Only works when insert_mode is sml or sml-rest. The value can be `line`, `telnet`, or `json`. + +- **tcp_transfer**: Communication protocol in telnet mode only takes effect when insert_mode is sml-rest, and line_protocol is telnet. If not configured, the default protocol is http. + +- **insert_rows** : The number of inserted rows per child table, default is 0. + +- **childtable_offset**: Effective only if childtable_exists is yes, specifies the offset when fetching the list of child tables from the super table, i.e., starting from the first child table. + +- **childtable_limit**: Effective only when childtable_exists is yes, specifies the upper limit for fetching the list of child tables from the super table. + +- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Staggered insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. + +- **insert_interval** : Specifies the insertion interval in ms for interleaved insertion mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. + +- **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. + +- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data. + +- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. + +- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1. + +- **start_timestamp** : The timestamp start value of each sub-table, the default value is now. + +- **sample_format**: The type of the sample data file; for now only "csv" is supported. + +- **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. + +- **use_sample_ts**: effective only when data_source is `sample`, indicates whether the CSV file specified by sample_file contains the first timestamp column. Default is no. If set to yes, the first column of the CSV file is used as `timestamp`. Since the timestamp of the same sub-table cannot be repeated, the amount of data generated depends on the same number of rows of data in the CSV file, and insert_rows will be invalidated. + +- **tags_file** : only works when insert_mode is taosc, rest. The final tag value is related to the childtable_count. Suppose the tag data rows in the CSV file are smaller than the given number of child tables. In that case, taosBenchmark will read the CSV file data cyclically until the number of child tables specified by childtable_count is generated. Otherwise, taosBenchmark will read the childtable_count rows of tag data only. The final number of child tables generated is the smaller of the two. + +#### Tag and Data Column Configuration Parameters + +The configuration parameters for specifying super table tag columns and data columns are in `columns` and `tag` in `super_tables`, respectively. + +- **type**: Specify the column type. For optional values, please refer to the data types supported by TDengine. + Note: JSON data type is unique and can only be used for tags. When using JSON type as a tag, there is and can only be this one tag. At this time, `count` and `len` represent the meaning of the number of key-value pairs within the JSON tag and the length of the value of each KV pair. Respectively, the value is a string by default. + +- **len**: Specifies the length of this data type, valid for NCHAR, BINARY, and JSON data types. If this parameter is configured for other data types, a value of 0 means that the column is always written with a null value; if it is not 0, it is ignored. + +- **count**: Specifies the number of consecutive occurrences of the column type, e.g., "count": 4096 generates 4096 columns of the specified type. + +- **name** : The name of the column, if used together with count, e.g. "name": "current", "count":3, then the names of the 3 columns are current, current_2. current_3. + +- **min**: The minimum value of the column/label of the data type. + +- **max**: The maximum value of the column/label of the data type. + +- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. + +#### insertion behavior configuration parameters + +- **thread_count**: specify the number of threads to insert data. Default is 8. + +- **create_table_thread_count** : The number of threads to build the table, default is 8. + +- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified. + +- **result_file** : The path to the result output file, the default value is . /output.txt. + +- **confirm_parameter_prompt**: The switch parameter requires the user to confirm after the prompt to continue. The default value is false. + +- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables are inserted. The default value is 0, which means that data will be inserted into the following child table only after data is inserted into one child table. + This parameter can also be configured in `super_tables`, and if so, the configuration in `super_tables` takes precedence and overrides the global setting. + +- **insert_interval** : + Specifies the insertion interval in ms for interleaved insertion mode. The default value is 0. Only works if `-B/--interlace-rows` is greater than 0. It means that after inserting interlace rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. + This parameter can also be configured in `super_tables`, and if configured, the configuration in `super_tables` takes high priority, overriding the global setting. + +- **num_of_records_per_req** : + The number of rows of data to be written per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements. + +- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are the same. The default value is 10000. + +### Query scenario configuration parameters + +`filetype` must be set to `query` in the query scenario. See [General Configuration Parameters](#General Configuration Parameters) for details of this parameter and other general parameters + +#### Configuration parameters for executing the specified query statement + +The configuration parameters for querying the sub-tables or the normal tables are set in `specified_table_query`. + +- **query_interval** : The query interval in seconds, the default value is 0. + +- **threads**: The number of threads to execute the query SQL, the default value is 1. + +- **sqls**. + - **sql**: the SQL command to be executed. + - **result**: the file to save the query result. If it is unspecified, taosBenchmark will not save the result. + +#### Configuration parameters of query super table + +The configuration parameters of the super table query are set in `super_table_query`. + +- **stblname**: Specify the name of the super table to be queried, required. + +- **query_interval** : The query interval in seconds, the default value is 0. + +- **threads**: The number of threads to execute the query SQL, the default value is 1. + +- **sqls** : The default value is 1. + - **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table. + Replace it with all the sub-table names in the super table. + - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. + +### Subscription scenario configuration parameters + +`filetype` must be set to `subscribe` in the subscription scenario. See [General Configuration Parameters](#General Configuration Parameters) for details of this and other general parameters + +#### Configuration parameters for executing the specified subscription statement + +The configuration parameters for subscribing to a sub-table or a generic table are set in `specified_table_query`. + +- **threads**: The number of threads to execute SQL, default is 1. + +- **interval**: The time interval to execute the subscription, in seconds, default is 0. + +- **restart** : "yes" means start a new subscription, "no" means continue the previous subscription, the default value is "no". + +- **keepProgress**: "yes" means keep the progress of the subscription, "no" means don't keep it, and the default value is "no". + +- **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". + +- **sqls** : The default value is "no". + - **sql** : The SQL command to be executed, required. + - **result** : The file to save the query result, unspecified is not saved. + +#### Configuration parameters for subscribing to supertables + +The configuration parameters for subscribing to a super table are set in `super_table_query`. + +- **stblname**: The name of the super table to subscribe. + +- **threads**: The number of threads to execute SQL, default is 1. + +- **interval**: The time interval to execute the subscription, in seconds, default is 0. + +- **restart** : "yes" means start a new subscription, "no" means continue the previous subscription, the default value is "no". + +- **keepProgress**: "yes" means keep the progress of the subscription, "no" means don't keep it, and the default value is "no". + +- **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". + +- **sqls** : The default value is "no". + - **sql**: SQL command to be executed, required; for the query SQL of the super table, keep "xxxx" in the SQL command, and the program will replace it with all the sub-table names of the super table automatically. + Replace it with all the sub-table names in the super table. + - **result**: The file to save the query result, if not specified, it will not be saved. diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md new file mode 100644 index 0000000000000000000000000000000000000000..2105ba83fad9700674e28609016b07ef6de66833 --- /dev/null +++ b/docs/en/14-reference/06-taosdump.md @@ -0,0 +1,120 @@ +--- +title: taosdump +description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." +--- + +## Introduction + +taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. + +taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. + +If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup. + +Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security. + +Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. + +## Installation + +There are two ways to install taosdump: + +- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. + +- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. + +## Common usage scenarios + +### taosdump backup data + +1. backing up all databases: specify `-A` or `-all-databases` parameter. +2. backup multiple specified databases: use `-D db1,db2,... ` parameters; +3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. +4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. +5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. + +:::tip +- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. +- The export of taosdump does not support resuming from an interruption. Therefore, if the taosdump process terminates unexpectedly, delete all related files that have been exported or generated. +- The import of taosdump supports resuming from an interruption, but when the process resumes, you will receive some "table already exists" messages, which could be ignored. + +::: + +### taosdump recover data + +Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. + +:::tip +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. + +::: + +## Detailed command-line parameter list + +The following is a detailed list of taosdump command-line arguments. + +``` +Usage: taosdump [OPTION...] dbname [tbname ...] + or: taosdump [OPTION...] --databases db1,db2,... + or: taosdump [OPTION...] --all-databases + or: taosdump [OPTION...] -i inpath + or: taosdump [OPTION...] -o outpath + + -h, --host=HOST Server host from which to dump data. Default is + localhost. + -p, --password User password to connect to server. Default is + taosdata. + -P, --port=PORT Port to connect + -u, --user=USER User name used to connect to server. Default is + root. + -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos + -i, --inpath=INPATH Input file path. + -o, --outpath=OUTPATH Output file path. + -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. + -a, --allow-sys Allow to dump system database + -A, --all-databases Dump all databases. + -D, --databases=DATABASES Dump listed databases. Use comma to separate + database names. + -N, --without-property Dump database without its properties. + -s, --schemaonly Only dump table schemas. + -y, --answer-yes Input yes for prompt. It will skip data file + checking! + -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, + and lzma. + -S, --start-time=START_TIME Start time to dump. Either epoch or + ISO8601/RFC3339 format is acceptable. ISO8601 + format example: 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00:000+0800 or '2017-10-01 + 00:00:00.000+0800' + -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 + format is acceptable. ISO8601 format example: + 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00.000+0800 or '2017-10-01 + 00:00:00.000+0800' + -B, --data-batch=DATA_BATCH Number of data per query/insert statement when + backup/restore. Default value is 16384. If you see + 'error actual dump .. batch ..' when backup or if + you see 'WAL size exceeds limit' error when + restore, please adjust the value to a smaller one + and try. The workable value is related to the + length of the row and type of table schema. + -I, --inspect inspect avro file content and print on screen + -L, --loose-mode Use loose mode if the table name and column name + use letter and number only. Default is NOT. + -n, --no-escape No escape char '`'. Default is using it. + -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is + 8. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -t, --timeout=SECONDS The timeout seconds for websocket to interact. + -g, --debug Print debug info. + -?, --help Give this help list + --usage Give a short usage message + -V, --version Print program version + +Mandatory or optional arguments to long options are also mandatory or optional +for any corresponding short options. + +Report bugs to . +``` diff --git a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..54dc1062d6440cc0fc7b8c69d9e4c6b53e4cd01e --- /dev/null +++ b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json @@ -0,0 +1,3191 @@ +{ + "__inputs": [ + { + "name": "DS_TDENGINE", + "label": "TDengine", + "description": "", + "type": "datasource", + "pluginId": "tdengine-datasource", + "pluginName": "TDengine" + } + ], + "__requires": [ + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "7.5.10" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart v2", + "version": "" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "datasource", + "id": "tdengine-datasource", + "name": "TDengine", + "version": "3.1.0" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "TDengine nodes metrics.", + "editable": true, + "gnetId": 15146, + "graphTooltip": 0, + "id": null, + "iteration": 1635263227798, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 57, + "panels": [], + "title": "Cluster Status", + "type": "row" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 32, + "options": { + "content": "

TDengine Cluster Dashboard

>\n", + "mode": "markdown" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "mnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "-- OVERVIEW --", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Time", + "operator": "+", + "reducer": "sum", + "right": "" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + } + } + } + ], + "type": "text" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 8, + "x": 0, + "y": 4 + }, + "id": 28, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Leader MNode", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "leader" + } + }, + "fieldName": "role" + } + ], + "match": "all", + "type": "include" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["dnodes"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 8, + "y": 4 + }, + "id": 70, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "/^Time$/", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Leader MNode Create Time", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "leader" + } + }, + "fieldName": "role" + } + ], + "match": "all", + "type": "include" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["Time"] + } + } + }, + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "reducer": "min" + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 15, + "y": 4 + }, + "id": 29, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show variables", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Variables", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["value", "name"] + } + } + }, + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": ".*" + } + }, + "fieldName": "name" + } + ], + "match": "all", + "type": "include" + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 7 + }, + "id": 33, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "/.*/", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "select server_version()", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Server Version", + "transformations": [], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 2, + "y": 7 + }, + "id": 27, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of MNodes", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "greater", + "options": { + "value": 0 + } + }, + "fieldName": "id" + } + ], + "match": "any", + "type": "include" + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["count"] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["id"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 5, + "y": 7 + }, + "id": 41, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Dnodes", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["count"] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["id"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 7, + "y": 7 + }, + "id": 31, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Offline Dnodes", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "ready" + } + }, + "fieldName": "status" + } + ], + "match": "all", + "type": "exclude" + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["count"] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["id"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 7 + }, + "id": 65, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of Databases", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["count"] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["name"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 7 + }, + "id": 69, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Number of Vgroups", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["vgroups"] + } + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["sum"] + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "role" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "from": "", + "id": 1, + "text": "", + "to": "", + "type": 2, + "value": "" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 9, + "x": 0, + "y": 10 + }, + "id": 67, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of DNodes for each Role", + "transformations": [ + { + "id": "groupBy", + "options": { + "fields": { + "end_point": { + "aggregations": ["count"], + "operation": "aggregate" + }, + "role": { + "aggregations": [], + "operation": "groupby" + } + } + } + }, + { + "id": "filterFieldsByName", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "end_point (count)": "Number of DNodes", + "role": "Dnode Role" + } + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 10 + }, + "id": 55, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show connections", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of Connections", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["count"] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["connId"] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 10 + }, + "id": 68, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Number of Tables", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["ntables"] + } + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["sum"] + } + } + ], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 24, + "panels": [], + "title": "Dnodes Status", + "type": "row" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "status" + }, + "properties": [ + { + "id": "custom.width", + "value": 86 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "vnodes" + }, + "properties": [ + { + "id": "custom.width", + "value": 77 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "role" + }, + "properties": [ + { + "id": "custom.width", + "value": 84 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cores" + }, + "properties": [ + { + "id": "custom.width", + "value": 75 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "end_point" + }, + "properties": [ + { + "id": "custom.width", + "value": 205 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "id" + }, + "properties": [ + { + "id": "custom.width", + "value": 78 + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 16, + "x": 0, + "y": 14 + }, + "id": 36, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes Status", + "type": "table" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 14 + }, + "id": 40, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "table", + "placement": "right", + "values": ["value"] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "/.*/", + "values": false + }, + "text": { + "titleSize": 6 + } + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Offline Reasons", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "ready" + } + }, + "fieldName": "status" + } + ], + "match": "all", + "type": "exclude" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": ["offline reason", "end_point"] + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "Time": { + "aggregations": ["count"], + "operation": "aggregate" + }, + "end_point": { + "aggregations": ["count"], + "operation": "aggregate" + }, + "offline reason": { + "aggregations": [], + "operation": "groupby" + } + } + } + } + ], + "type": "piechart" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 22, + "panels": [], + "title": "Mnodes Status", + "type": "row" + }, + { + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 38, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes;", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Mnodes Status", + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 20, + "panels": [], + "repeat": "fqdn", + "title": "节点资源占用 [ $fqdn ]", + "type": "row" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 26 + }, + "id": 66, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "memory", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(mem_taosd) as taosd, last(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Memory Usage of taosd", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "last(cpu_taosd)" + }, + "properties": [ + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 26 + }, + "id": 45, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "/^last\\(cpu_taosd\\)$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "mem_taosd", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_taosd) from log.dn where fqdn = '$fqdn'", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current CPU Usage of taosd", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "avg band speed last one minute", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 10, + "y": 26 + }, + "id": 14, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "band_speed", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(band_speed) from log.dn where fqdn='$fqdn' and ts >= now-5m and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "band speed", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 14, + "y": 26 + }, + "id": 48, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(io_read) as io_read, last(io_write) as io_write from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "IO Rate", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 75 + }, + { + "color": "red", + "value": 80 + }, + { + "color": "dark-red", + "value": 95 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 19, + "y": 26 + }, + "id": 51, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "/^disk_used_percent$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(disk_used) as used from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select last(disk_total) as total from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "disk_used_percent", + "expression": "A/B", + "formatType": "Time series", + "hide": false, + "queryType": "Arithmetic", + "refId": "C", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Used", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": ["lastNotNull"] + } + } + ], + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 32 + }, + "id": 12, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "memory", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(mem_taosd) as taosd, max(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max Memory Usage of taosd in Last 5 minute", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 32 + }, + "id": 43, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "mem_taosd", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(cpu_taosd) from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max CPU Usage of taosd in Last 5 minute", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "avg band speed last one minute", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 10, + "y": 32 + }, + "id": 50, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "band_speed", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(band_speed) from log.dn where fqdn = '$fqdn' and ts >= now-1h", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max band speed in last hour", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 14, + "y": 32 + }, + "id": 49, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(io_read) as io_read, max(io_write) as io_write from log.dn where fqdn = '$fqdn'", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max IO Rate in last hour", + "type": "gauge" + }, + { + "datasource": "${DS_TDENGINE}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "cpm" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 19, + "y": 32 + }, + "id": 52, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "alias": "req-http", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_http) as req_http from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req-inserts", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert) as req_insert from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req-selects", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_select) as req_select from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests in last Minute", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "description": "monitor system cpu", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "cpu_system", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(cpu_system) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "cpu_taosd", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(cpu_taosd) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "CPU 资源占用情况", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "description": "monitor system cpu", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 38 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 42, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "system", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mem_system) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "taosd", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(mem_taosd) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select avg(mem_total) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "内存资源占用情况", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "decmbytes", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "percent", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "D", + "sql": "select avg(disk_used)/avg(disk_total) * 100 from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Percent", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "gbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percent", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 49 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(value, 1m, 0) from (select avg(disk_used) as value from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Increasing Rate per Minute", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "description": "total select request per minute last hour", + "fieldConfig": { + "defaults": { + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 8, + "interval": null, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxDataPoints": 100, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "req_select", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_select) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "req_insert", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req_http", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_http) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requets Count per Minutes $fqdn", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [], + "unit": "Kbits" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 58 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 47, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "io-read", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "io-write", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "io-read-last-hour", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-2h and ts < now - 1h interval(1m)", + "target": "select metric", + "timeshift": { + "period": 1, + "unit": "hours" + }, + "type": "timeserie" + }, + { + "alias": "io-write-last-hour", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "D", + "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "timeshift": { + "period": 1, + "unit": "hours" + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "IO", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Kbits", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 63, + "panels": [], + "title": "Login History", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TDENGINE}", + "fieldConfig": { + "defaults": { + "displayName": "Logins Per Minute", + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "logins", + "nullPointMode": "null as zero" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "logins", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select count(*) from log.log where ts >= $from and ts < $to interval (1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Login Counts per Minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "1m", + "schemaVersion": 27, + "style": "dark", + "tags": ["TDengine", "multiple"], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "TDengine", + "value": "TDengine" + }, + "description": "TDengine Data Source Selector", + "error": null, + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "ds", + "options": [], + "query": "tdengine-datasource", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_TDENGINE}", + "definition": "select fqdn from log.dn", + "description": "TDengine Nodes FQDN (Hostname)", + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "fqdn", + "options": [], + "query": "select fqdn from log.dn", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"] + }, + "timezone": "", + "title": "Multiple TDengines Monitoring", + "uid": "tdengine-multiple", + "version": 4 +} diff --git a/docs-cn/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json b/docs/en/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json rename to docs/en/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-full.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp rename to docs/en/14-reference/07-tdinsight/assets/TDinsight-full.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs/en/14-reference/07-tdinsight/assets/alert-manager-status.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp rename to docs/en/14-reference/07-tdinsight/assets/alert-manager-status.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs/en/14-reference/07-tdinsight/assets/alert-notification-channel.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp rename to docs/en/14-reference/07-tdinsight/assets/alert-notification-channel.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs/en/14-reference/07-tdinsight/assets/alert-query-demo.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp rename to docs/en/14-reference/07-tdinsight/assets/alert-query-demo.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs/en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp rename to docs/en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs/en/14-reference/07-tdinsight/assets/alert-rule-test.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp rename to docs/en/14-reference/07-tdinsight/assets/alert-rule-test.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs/en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs/en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs/en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp rename to docs/en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs/en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp rename to docs/en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs/en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp rename to docs/en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs/en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp rename to docs/en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp rename to docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp diff --git a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json new file mode 100644 index 0000000000000000000000000000000000000000..1add8522a712aa2cfef6187e577c42d205432b66 --- /dev/null +++ b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json @@ -0,0 +1,3358 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "TDengine nodes metrics.", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "iteration": 1634275785625, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 57, + "panels": [], + "title": "Cluster Status", + "type": "row" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 32, + "options": { + "content": "

TDengine Cluster Dashboard

>\n", + "mode": "markdown" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "mnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "-- OVERVIEW --", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Time", + "operator": "+", + "reducer": "sum", + "right": "" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + } + } + } + ], + "type": "text" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 8, + "x": 0, + "y": 4 + }, + "id": 28, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Leader MNode", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "leader" + } + }, + "fieldName": "role" + } + ], + "match": "all", + "type": "include" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "dnodes" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 8, + "y": 4 + }, + "id": 70, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Time$/", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Leader MNode Create Time", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "leader" + } + }, + "fieldName": "role" + } + ], + "match": "all", + "type": "include" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time" + ] + } + } + }, + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "reducer": "min" + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 15, + "y": 4 + }, + "id": 29, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show variables", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Variables", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value", + "name" + ] + } + } + }, + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": ".*" + } + }, + "fieldName": "name" + } + ], + "match": "all", + "type": "include" + } + } + ], + "type": "table" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 7 + }, + "id": 33, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "select server_version()", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Server Version", + "transformations": [], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 2, + "y": 7 + }, + "id": 27, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of MNodes", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "greater", + "options": { + "value": 0 + } + }, + "fieldName": "id" + } + ], + "match": "any", + "type": "include" + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "count" + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "id" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 5, + "y": 7 + }, + "id": 41, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Dnodes", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "count" + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "id" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 7, + "y": 7 + }, + "id": 31, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Offline Dnodes", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "ready" + } + }, + "fieldName": "status" + } + ], + "match": "all", + "type": "exclude" + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "count" + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "id" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 7 + }, + "id": 65, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of Databases", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "count" + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "name" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 7 + }, + "id": 69, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Number of Vgroups", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "vgroups" + ] + } + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "sum" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "role" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "from": "", + "id": 1, + "text": "", + "to": "", + "type": 2, + "value": "" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 9, + "x": 0, + "y": 10 + }, + "id": 67, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of DNodes for each Role", + "transformations": [ + { + "id": "groupBy", + "options": { + "fields": { + "end_point": { + "aggregations": [ + "count" + ], + "operation": "aggregate" + }, + "role": { + "aggregations": [], + "operation": "groupby" + } + } + } + }, + { + "id": "filterFieldsByName", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "end_point (count)": "Number of DNodes", + "role": "Dnode Role" + } + } + } + ], + "type": "table" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 10 + }, + "id": 55, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show connections", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of Connections", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "count" + ] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "connId" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 10 + }, + "id": 68, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.10", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Number of Tables", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "ntables" + ] + } + } + }, + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "sum" + ] + } + } + ], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 24, + "panels": [], + "title": "Dnodes Status", + "type": "row" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "status" + }, + "properties": [ + { + "id": "custom.width", + "value": null + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "vnodes" + }, + "properties": [ + { + "id": "custom.width", + "value": null + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 16, + "x": 0, + "y": 14 + }, + "id": 36, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes Status", + "type": "table" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 14 + }, + "id": 40, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "table", + "placement": "right", + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": { + "titleSize": 6 + } + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show dnodes", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Offline Reasons", + "transformations": [ + { + "id": "filterByValue", + "options": { + "filters": [ + { + "config": { + "id": "regex", + "options": { + "value": "ready" + } + }, + "fieldName": "status" + } + ], + "match": "all", + "type": "exclude" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "offline reason", + "end_point" + ] + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "Time": { + "aggregations": [ + "count" + ], + "operation": "aggregate" + }, + "end_point": { + "aggregations": [ + "count" + ], + "operation": "aggregate" + }, + "offline reason": { + "aggregations": [], + "operation": "groupby" + } + } + } + } + ], + "type": "piechart" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 22, + "panels": [], + "title": "Mnodes Status", + "type": "row" + }, + { + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 38, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.10", + "targets": [ + { + "formatType": "Table", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes;", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Mnodes Status", + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 20, + "panels": [], + "repeat": "fqdn", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "title": "节点资源占用 [ $fqdn ]", + "type": "row" + }, + { + "datasource": "${ds}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 26 + }, + "id": 66, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "memory", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(mem_taosd) as taosd, last(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Memory Usage of taosd", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "last(cpu_taosd)" + }, + "properties": [ + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 26 + }, + "id": 45, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^last\\(cpu_taosd\\)$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "mem_taosd", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_taosd) from log.dn where fqdn = '$fqdn'", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current CPU Usage of taosd", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "avg band speed last one minute", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 10, + "y": 26 + }, + "id": 14, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "band_speed", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(band_speed) from log.dn where fqdn='$fqdn' and ts >= now-5m and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "band speed", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 14, + "y": 26 + }, + "id": 48, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(io_read) as io_read, last(io_write) as io_write from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "IO Rate", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 75 + }, + { + "color": "red", + "value": 80 + }, + { + "color": "dark-red", + "value": 95 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 19, + "y": 26 + }, + "id": 51, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "/^disk_used_percent$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select last(disk_used) as used from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select last(disk_total) as total from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "disk_used_percent", + "expression": "A/B", + "formatType": "Time series", + "hide": false, + "queryType": "Arithmetic", + "refId": "C", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Used", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 32 + }, + "id": 12, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "memory", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(mem_taosd) as taosd, max(mem_total) as total from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max Memory Usage of taosd in Last 5 minute", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "taosd max memery last 10 minutes", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 32 + }, + "id": 43, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "mem_taosd", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(cpu_taosd) from log.dn where fqdn = '$fqdn' and ts >= now -5m and ts < now", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max CPU Usage of taosd in Last 5 minute", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "avg band speed last one minute", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 10, + "y": 32 + }, + "id": 50, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "band_speed", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(band_speed) from log.dn where fqdn = '$fqdn' and ts >= now-1h", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max band speed in last hour", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "Kbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 14, + "y": 32 + }, + "id": 49, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select max(io_read) as io_read, max(io_write) as io_write from log.dn where fqdn = '$fqdn'", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Max IO Rate in last hour", + "type": "gauge" + }, + { + "datasource": "${ds}", + "description": "io read/write rate", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 8192, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 4916 + }, + { + "color": "red", + "value": 6554 + } + ] + }, + "unit": "cpm" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 19, + "y": 32 + }, + "id": 52, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.10", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "targets": [ + { + "alias": "req-http", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_http) as req_http from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req-inserts", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert) as req_insert from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req-selects", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_select) as req_select from log.dn where fqdn = '$fqdn' and ts >= now - 1h interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests in last Minute", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "description": "monitor system cpu", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "cpu_system", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(cpu_system) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "cpu_taosd", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(cpu_taosd) from log.dn where fqdn='$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "CPU 资源占用情况", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:58", + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:59", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "description": "monitor system cpu", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 38 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 42, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "system", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mem_system) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "taosd", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(mem_taosd) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select avg(mem_total) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(30s)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "内存资源占用情况", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:58", + "decimals": null, + "format": "decmbytes", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:59", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "unit": "percent" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [ + { + "$$hashKey": "object:249", + "alias": "disk_used", + "hiddenSeries": true + }, + { + "$$hashKey": "object:256", + "alias": "disk_total", + "hiddenSeries": true + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "percent", + "expression": "A/B * 100", + "formatType": "Time series", + "hide": false, + "queryType": "Arithmetic", + "refId": "C", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Percent", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:456", + "format": "percent", + "label": null, + "logBase": 1, + "max": "100", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:457", + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 49 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [ + { + "$$hashKey": "object:834", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "disk_used", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(disk_used) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_total", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(disk_total) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(30s)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "percent", + "expression": "A/B", + "formatType": "Time series", + "hide": false, + "queryType": "Arithmetic", + "refId": "C", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:456", + "format": "decgbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:457", + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "description": "total select request per minute last hour", + "fieldConfig": { + "defaults": { + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 8, + "interval": null, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxDataPoints": 100, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "req_select", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_select) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "req_insert", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "req_http", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_http) from log.dn where fqdn = '$fqdn' and ts >= $from and ts < $to interval(1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requets Count per Minutes $fqdn", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:127", + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:128", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [], + "unit": "Kbits" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 58 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 47, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "scopedVars": { + "fqdn": { + "selected": true, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "io-read", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "A", + "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "io-write", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "B", + "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "io-read-last-hour", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "C", + "sql": "select avg(io_read) from log.dn where fqdn = '$fqdn' and ts >= now-2h and ts < now - 1h interval(1m)", + "target": "select metric", + "timeshift": { + "period": 1, + "unit": "hours" + }, + "type": "timeserie" + }, + { + "alias": "io-write-last-hour", + "formatType": "Time series", + "hide": false, + "queryType": "SQL", + "refId": "D", + "sql": "select avg(io_write) from log.dn where fqdn = '$fqdn' and ts >= now-1h and ts < now interval(1m)", + "target": "select metric", + "timeshift": { + "period": 1, + "unit": "hours" + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "IO", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:58", + "decimals": null, + "format": "Kbits", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:59", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 63, + "panels": [], + "title": "Login History", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${ds}", + "fieldConfig": { + "defaults": { + "displayName": "Logins Per Minute", + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:756", + "alias": "logins", + "nullPointMode": "null as zero" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "logins", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "select count(*) from log.log where ts >= $from and ts < $to interval (1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Login Counts per Minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:585", + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:586", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "1m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "TDengine" + ], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "TDengine", + "value": "TDengine" + }, + "description": "TDengine Data Source Selector", + "error": null, + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "ds", + "options": [], + "query": "tdengine-datasource", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "huolinhe-TM1701:6030", + "value": "huolinhe-TM1701:6030" + }, + "datasource": "${ds}", + "definition": "select fqdn from log.dn", + "description": "TDengine Nodes FQDN (Hostname)", + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "fqdn", + "options": [], + "query": "select fqdn from log.dn", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TDengine", + "uid": "tdengine", + "version": 8 +} \ No newline at end of file diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine-grafana.json b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana.json similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/tdengine-grafana.json rename to docs/en/14-reference/07-tdinsight/assets/tdengine-grafana.json diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs/en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp rename to docs/en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md new file mode 100644 index 0000000000000000000000000000000000000000..e74c9de7b2aa71278a99d45f250e0dcaf86d4704 --- /dev/null +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -0,0 +1,428 @@ +--- +title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine +sidebar_label: TDinsight +--- + +TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. + +After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. + +## System Requirements + +To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). + +## Installing Grafana + +We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana]. + +### Installing Grafana on Debian or Ubuntu + +For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch. + +```bash +sudo apt-get install -y apt-transport-https +sudo apt-get install -y software-properties-common wget +wget -q -O - https://packages.grafana.com/gpg.key |\ + sudo apt-key add - +echo "deb https://packages.grafana.com/oss/deb stable main" |\ + sudo tee -a /etc/apt/sources.list.d/grafana.list +sudo apt-get update +sudo apt-get install grafana +``` + +### Install Grafana on CentOS / RHEL + +You can install it from its official YUM repository. + +```bash +sudo tee /etc/yum.repos.d/grafana.repo << EOF +[grafana] +name=grafana +baseurl=https://packages.grafana.com/oss/rpm +repo_gpgcheck=1 +enabled=1 +gpgcheck=1 +gpgkey=https://packages.grafana.com/gpg.key +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt +EOF +sudo yum install grafana +``` + +Or install it with RPM package. + +```bash +wget https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm +sudo yum install grafana-7.5.11-1.x86_64.rpm +# or +sudo yum install \ + https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm +``` + +## Automated deployment of TDinsight + +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. + +You can download the script via `wget` or other tools: + +```bash +wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh +chmod +x TDinsight.sh +./TDinsight.sh +``` + +This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. + +Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. + +The following is a description of TDinsight.sh usage. + +```text +Usage: + ./TDinsight.sh + ./TDinsight.sh -h|--help + ./TDinsight.sh -n -a -u -p + +Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 system. + +-h, -help, --help Display help + +-V, -verbose, --verbose Run script in verbose mode. Will print out each step of execution. + +-v, --plugin-version TDengine datasource plugin version, [default: latest] + +-P, --grafana-provisioning-dir Grafana provisioning directory, [default: /etc/grafana/provisioning/] +-G, --grafana-plugins-dir Grafana plugins directory, [default: /var/lib/grafana/plugins] +-O, --grafana-org-id Grafana organization id. [default: 1] + +-n, --tdengine-ds-name TDengine datasource name, no space. [default: TDengine] +-a, --tdengine-api TDengine REST API endpoint. [default: http://127.0.0.1:6041] +-u, --tdengine-user TDengine user name. [default: root] +-p, --tdengine-password TDengine password. [default: taosdata] + +-i, --tdinsight-uid Replace with a non-space ASCII code as the dashboard id. [default: tdinsight] +-t, --tdinsight-title Dashboard title. [default: TDinsight] +-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false] + +-E, --external-notifier Apply external notifier uid to TDinsight dashboard. + +Alibaba Cloud SMS as Notifier: +-s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook. +-N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] +-U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. +-D, --sms-notifier-is-default Set notifier as default. +-I, --sms-access-key-id Alibaba Cloud SMS access key id +-K, --sms-access-key-secret Alibaba Cloud SMS access key secret +-S, --sms-sign-name Sign name +-C, --sms-template-code Template code +-T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' +-B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" +-L, --sms-listen-addr [default: 127.0.0.1:9100] +``` + +Most command-line options can take effect the same as environment variables. + +| Short Options | Long Options | Environment Variables | Description | +| ------ | -------------------------- | ---------------------------- | ------------------------------------------------------------------ --------- | +| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | The TDengine data source plugin version, the latest version is used by default. | -P +| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | The Grafana configuration directory, defaults to `/etc/grafana/provisioning/` | +| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | The Grafana plugin directory, defaults to `/var/lib/grafana/plugins`. | -O +| -O | --grafana-org-id | GF_ORG_ID | The Grafana organization ID, default is 1. | +| -n | --tdengine-ds-name | TDENGINE_DS_NAME | The name of the TDengine data source, defaults to TDengine. | -a | --tdengine-ds-name | The name of the TDengine data source, defaults to TDengine. +| -a | --tdengine-api | TDENGINE_API | The TDengine REST API endpoint. Defaults to `http://127.0.0.1:6041`. | -u +| -u | --tdengine-user | TDENGINE_USER | TDengine username. [default: root] | +| -p | --tdengine-password | TDENGINE_PASSWORD | TDengine password. [default: tadosdata] | -i | --tdengine-password +| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] | +| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title +| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external +| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s +| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s +| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U +| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms +| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default +| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id | +| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key | +| -S | --sms-sign-name | SMS_SIGN_NAME | Signature | +| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code | +| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters | +| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` | +| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` | + +Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script. + +```bash +sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord +``` + +We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel. + +```bash +curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq +``` + +Use the `uid` value obtained above as `-E` input. + +```bash +sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier +``` + +If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters. + +- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`. +- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`. +- `-I`: Alibaba Cloud SMS access key id. +- `-K`: Alibaba Cloud SMS access secret key. +- `-S`: Alibaba Cloud SMS signature. +- `-C`: Alibaba Cloud SMS template id. +- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content. +- `-B`: a list of phone numbers, separated by a comma `,`. + +If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature. + +```bash +sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' +# If using built-in SMS notifications +sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ + -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611 +``` + +Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed). + +Specifically, `-O` can be used to set the organization ID when you are using Grafana Cloud or another organization. `-G` specifies the Grafana plugin installation directory. The `-e` parameter sets the dashboard to be editable. + +## Set up TDinsight manually + +### Install the TDengine data source plugin + +Install the latest version of the TDengine Data Source plugin from GitHub. + +```bash +get_latest_release() { + curl --silent "https://api.github.com/repos/taosdata/grafanaplugin/releases/latest" | + grep '"tag_name":' | + sed -E 's/.*"v([^"]+)".*/\1/' +} +TDENGINE_PLUGIN_VERSION=$(get_latest_release) +sudo grafana-cli \ + --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v$TDENGINE_PLUGIN_VERSION/tdengine-datasource-$TDENGINE_PLUGIN_VERSION.zip \ + plugins install tdengine-datasource +``` + +:::note +The 3.1.6 and earlier version plugins require the following setting in the configuration file `/etc/grafana/grafana.ini` to enable unsigned plugins. + +```ini +[plugins] +allow_loading_unsigned_plugins = tdengine-datasource +``` +::: + +### Start the Grafana service + +```bash +sudo systemctl start grafana-server +sudo systemctl enable grafana-server +``` + +### Logging into Grafana + +Open the default Grafana URL in a web browser: ``http://localhost:3000``. +The default username/password is `admin`. Grafana will require a password change after the first login. + +### Adding a TDengine Data Source + +Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button. + +![TDengine Database TDinsight Add data source button](./assets/howto-add-datasource-button.webp) + +Search for and select **TDengine**. + +![TDengine Database TDinsight Add datasource](./assets/howto-add-datasource-tdengine.webp) + +Configure the TDengine datasource. + +![TDengine Database TDinsight Datasource Configuration](./assets/howto-add-datasource.webp) + +Save and test. It will report 'TDengine Data source is working' under normal circumstances. + +![TDengine Database TDinsight datasource test](./assets/howto-add-datasource-test.webp) + +### Importing dashboards + +Point to **+** / **Create** - **import** (or `/dashboard/import` url). + +![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) + +Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. + +![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) + +Once the import is complete, the full page view of TDinsight is shown below. + +![TDengine Database TDinsight show](./assets/TDinsight-full.webp) + +## TDinsight dashboard details + +The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases. + +Details of the metrics are as follows. + +### Cluster Status + +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) + +This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). + +- **First EP**: the `firstEp` setting in the current TDengine cluster. +- **Version**: TDengine server version (leader mnode). +- **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader. +- **Expire Time** - Enterprise version expiration time. +- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition. +- **Databases** - The number of databases. +- **Connections** - The number of current connections. +- **DNodes/MNodes/VGroups/VNodes** - Total number of each resource and the number of survivors. +- **DNodes/MNodes/VGroups/VNodes Alive Percent**: The ratio of the number of alive/total for each resource, enabling the alert rule and triggering it when the resource liveness rate (the average percentage of healthy resources in 1 minute) is less than 100%. +- **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default). +- **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default). +- **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters. +- **Variables**: `show variables` table display. + +### DNodes Status + +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) + +- **DNodes Status**: simple table view of `show dnodes`. +- **DNodes Lifetime**: the time elapsed since the dnode was created. +- **DNodes Number**: the number of DNodes changes. +- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart. + +### MNode Overview + +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) + +1. **MNodes Status**: a simple table view of `show mnodes`. +2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. + +### Request + +![TDengine Database TDinsight tdinsight requests](./assets/TDinsight-4-requests.webp) + +1. **Requests Rate(Inserts per Second)**: average number of inserts per second. +2. **Requests (Selects)**: number of query requests and change rate (count of second). +3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second). + +### Database + +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) + +Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. + +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. +4. **Tables**: graph of all normal table numbers over time. +5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. + +### DNode Resource Usage + +![TDengine Database TDinsight dnode usage](./assets/TDinsight-6-dnode-usage.webp) + +Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. + +1. **Uptime**: the time elapsed since the dnode was created. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the leader role. +6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. +7. **Current Memory Usage of taosd**: memory usage of taosd processes. +8. **Disk Used**: The total disk usage percentage of the taosd data directory. +9. **CPU Usage**: Process and system CPU usage. +10. **RAM Usage**: Time series view of RAM usage metrics. +11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). +12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. +13. **Disk IO**: Disk IO rate. +14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. + +### Login History + +![TDengine Database TDinsight Login History](./assets/TDinsight-7-login-history.webp) + +Currently, only the number of logins per minute is reported. + +### Monitoring taosAdapter + +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) + +Support monitoring taosAdapter request statistics and status details. Includes. + +1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed +2. **top 3 request endpoint**: data of the top 3 requests by endpoint group +3. **Memory Used**: taosAdapter memory usage +4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages +5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping +6. **CPU Used**: taosAdapter CPU usage + +## Upgrade + +TDinsight installed via the `TDinsight.sh` script can be upgraded to the latest Grafana plugin and TDinsight Dashboard by re-running the script. + +In the case of a manual installation, follow the steps above to install the new Grafana plugin and Dashboard yourself. + +## Uninstall + +TDinsight installed via the `TDinsight.sh` script can be cleaned up using the command line `TDinsight.sh -R` to clean up the associated resources. + +To completely uninstall TDinsight during a manual installation, you need to clean up the following. + +1. the TDinsight Dashboard in Grafana. +2. the Data Source in Grafana. +3. remove the `tdengine-datasource` plugin from the plugin installation directory. + +## Integrated Docker Example + +```bash +git clone --depth 1 https://github.com/taosdata/grafanaplugin.git +cd grafanaplugin +``` + +Change as needed in the ``docker-compose.yml`` file to + +```yaml +version: '3.7' + +services: + grafana: + image: grafana/grafana:7.5.10 + volumes: + - . /dist:/var/lib/grafana/plugins/tdengine-datasource + - . /grafana/grafana.ini:/etc/grafana/grafana.ini + - . /grafana/provisioning/:/etc/grafana/provisioning/ + - grafana-data:/var/lib/grafana + environment: + TDENGINE_API: ${TDENGINE_API} + TDENGINE_USER: ${TDENGINE_USER} + TDENGINE_PASS: ${TDENGINE_PASS} + SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID} + SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET} + SMS_SIGN_NAME: ${SMS_SIGN_NAME} + SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE} + SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}' + SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS + SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR} + ports: + - 3000:3000 +volumes: + grafana-data: +``` + +Replace the environment variables in `docker-compose.yml` or save the environment variables to the `.env` file, then start Grafana with `docker-compose up`. See [Docker Compose Reference](https://docs.docker.com/compose/) + +```bash +docker-compose up -d +``` + +Then the TDinsight was deployed via Provisioning. Go to http://localhost:3000/d/tdinsight/ to view the dashboard. + +[grafana]: https://grafana.com +[tdengine]: https://tdengine.com diff --git a/docs-en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md similarity index 100% rename from docs-en/14-reference/08-taos-shell.md rename to docs/en/14-reference/08-taos-shell.md diff --git a/docs-en/14-reference/09-support-platform/_category_.yml b/docs/en/14-reference/09-support-platform/_category_.yml similarity index 100% rename from docs-en/14-reference/09-support-platform/_category_.yml rename to docs/en/14-reference/09-support-platform/_category_.yml diff --git a/docs-en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md similarity index 100% rename from docs-en/14-reference/09-support-platform/index.md rename to docs/en/14-reference/09-support-platform/index.md diff --git a/docs-en/14-reference/11-docker/_category_.yml b/docs/en/14-reference/11-docker/_category_.yml similarity index 100% rename from docs-en/14-reference/11-docker/_category_.yml rename to docs/en/14-reference/11-docker/_category_.yml diff --git a/docs-en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md similarity index 100% rename from docs-en/14-reference/11-docker/index.md rename to docs/en/14-reference/11-docker/index.md diff --git a/docs-en/14-reference/12-config/_category_.yml b/docs/en/14-reference/12-config/_category_.yml similarity index 100% rename from docs-en/14-reference/12-config/_category_.yml rename to docs/en/14-reference/12-config/_category_.yml diff --git a/docs-en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md similarity index 100% rename from docs-en/14-reference/12-config/index.md rename to docs/en/14-reference/12-config/index.md diff --git a/docs/en/14-reference/12-directory.md b/docs/en/14-reference/12-directory.md new file mode 100644 index 0000000000000000000000000000000000000000..d6cffd22e054a759e67d34dd3e8fbb1a8585569c --- /dev/null +++ b/docs/en/14-reference/12-directory.md @@ -0,0 +1,39 @@ +--- +title: File directory structure +description: "TDengine installation directory description" +--- + +After TDengine is installed, the following directories or files will be created in the system by default. + +| directory/file | description | +| ------------------------- | -------------------------------------------------------------------- | +| /usr/local/taos/bin | The TDengine executable directory. The executable files are soft-linked to the /usr/bin directory. | +| /usr/local/taos/driver | The TDengine dynamic link library directory. It is soft-linked to the /usr/lib directory. | +| /usr/local/taos/examples | The TDengine various language application examples directory. | +| /usr/local/taos/include | The header files for TDengine's external C interface. | +| /etc/taos/taos.cfg | TDengine default [configuration file] | +| /var/lib/taos | TDengine's default data file directory. The location can be changed via [configuration file]. | +| /var/log/taos | TDengine default log file directory. The location can be changed via [configure file]. | + +## Executable files + +All executable files of TDengine are in the _/usr/local/taos/bin_ directory by default. These include. + +- _taosd_: TDengine server-side executable files +- _taos_: TDengine CLI executable +- _taosdump_: data import and export tool +- _taosBenchmark_: TDengine testing tool +- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos` +- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares +- _tarbitrator_: provides arbitration for two-node cluster deployments +- _TDinsight.sh_: script to download TDinsight and install it +- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging +- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. + +:::note +taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. +::: + +:::tip +You can configure different data directories and log directories by modifying the system configuration file `taos.cfg`. +::: diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md new file mode 100644 index 0000000000000000000000000000000000000000..dea14aa90a24f16dad5069a29497661333c2d00a --- /dev/null +++ b/docs/en/14-reference/13-schemaless/13-schemaless.md @@ -0,0 +1,159 @@ +--- +title: Schemaless Writing +description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." +--- + +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. + +The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. + +## Schemaless Writing Line Protocol + +TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. + +For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. + +With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). + +```json +measurement,tag_set field_set timestamp +``` + +where : + +- measurement will be used as the data table name. It will be separated from tag_set by a comma. +- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. +- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. +- The timestamp is the primary key corresponding to the data in this row. + +All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). + +In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail: + +- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. +- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. +- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) +- Numeric types will be distinguished from data types by the suffix. + +| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | +| -------- | -------- | ------------ | -------------- | +| 1 | none or f64 | double | 8 | +| 2 | f32 | float | 4 | +| 3 | i8/u8 | TinyInt/UTinyInt | 1 | +| 4 | i16/u16 | SmallInt/USmallInt | 2 | +| 5 | i32/u32 | Int/UInt | 4 | +| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 | + +- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types. + +For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row. + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` + +Note that if the wrong case is used when describing the data type suffix, or if the wrong data type is specified for the data, it may cause an error message and cause the data to fail to be written. + +## Main processing logic for schemaless writing + +Schemaless writes process row data according to the following principles. + +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: + +```json +"measurement,tag_key1=tag_value1,tag_key2=tag_value2" +``` + +Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. + +2. If the super table obtained by parsing the line protocol does not exist, this super table is created(It is not recommended to create a super table manually, otherwise the inserted data may be abnormal). +If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. +4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). +5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. +6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. +7. Errors encountered throughout the processing will interrupt the writing process and return an error code. +8. In order to improve the efficiency of writing, the order of fields in the same super table should be the same. If the order is different, you need to configure the parameter smlDataFormat to false, otherwise, the data in the library will be abnormal. + +:::tip +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +::: + +## Time resolution recognition + +Three specified modes are supported in the schemaless writing process, as follows: + +| **Serial** | **Value** | **Description** | +| -------- | ------------------- | ------------------------------- | +| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | +| 3 | SML_JSON_PROTOCOL | JSON protocol format | + +In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. + +| **Serial Number** | **Time Resolution Definition** | **Meaning** | +| -------- | --------------------------------- | -------------- | +| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) | +| 2 | TSDB_SML_TIMESTAMP_HOURS | hour | +| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES +| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS +| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds +| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds +| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds | + +In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point. + +## Data schema mapping rules + +This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: +- The tag name in tag_set is the name of the tag in the data schema +- The name in field_set is the column's name. + +The following data is used as an example to illustrate the mapping rules. + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` + +The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement. + +```json +create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) +``` + +## Data schema change handling + +This section describes the impact on the data schema for different line protocol data writing cases. + +When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 +``` + +The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing. + +If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change. + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 +``` + +The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. + +```json +st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 +``` + +The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point. + +## Write integrity + +TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail. + +## Error code + +If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error. diff --git a/docs-en/14-reference/13-schemaless/_category_.yml b/docs/en/14-reference/13-schemaless/_category_.yml similarity index 100% rename from docs-en/14-reference/13-schemaless/_category_.yml rename to docs/en/14-reference/13-schemaless/_category_.yml diff --git a/docs-en/14-reference/_category_.yml b/docs/en/14-reference/_category_.yml similarity index 100% rename from docs-en/14-reference/_category_.yml rename to docs/en/14-reference/_category_.yml diff --git a/docs-en/14-reference/_collectd.mdx b/docs/en/14-reference/_collectd.mdx similarity index 100% rename from docs-en/14-reference/_collectd.mdx rename to docs/en/14-reference/_collectd.mdx diff --git a/docs-en/14-reference/_icinga2.mdx b/docs/en/14-reference/_icinga2.mdx similarity index 100% rename from docs-en/14-reference/_icinga2.mdx rename to docs/en/14-reference/_icinga2.mdx diff --git a/docs-en/14-reference/_prometheus.mdx b/docs/en/14-reference/_prometheus.mdx similarity index 100% rename from docs-en/14-reference/_prometheus.mdx rename to docs/en/14-reference/_prometheus.mdx diff --git a/docs-en/14-reference/_statsd.mdx b/docs/en/14-reference/_statsd.mdx similarity index 100% rename from docs-en/14-reference/_statsd.mdx rename to docs/en/14-reference/_statsd.mdx diff --git a/docs-en/14-reference/_tcollector.mdx b/docs/en/14-reference/_tcollector.mdx similarity index 100% rename from docs-en/14-reference/_tcollector.mdx rename to docs/en/14-reference/_tcollector.mdx diff --git a/docs-en/14-reference/_telegraf.mdx b/docs/en/14-reference/_telegraf.mdx similarity index 100% rename from docs-en/14-reference/_telegraf.mdx rename to docs/en/14-reference/_telegraf.mdx diff --git a/docs-en/14-reference/index.md b/docs/en/14-reference/index.md similarity index 100% rename from docs-en/14-reference/index.md rename to docs/en/14-reference/index.md diff --git a/docs-cn/14-reference/taosAdapter-architecture.webp b/docs/en/14-reference/taosAdapter-architecture.webp similarity index 100% rename from docs-cn/14-reference/taosAdapter-architecture.webp rename to docs/en/14-reference/taosAdapter-architecture.webp diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx new file mode 100644 index 0000000000000000000000000000000000000000..696be9e4d5bd3e53619a55d02ef4b8dce67fce94 --- /dev/null +++ b/docs/en/20-third-party/01-grafana.mdx @@ -0,0 +1,218 @@ +--- +sidebar_label: Grafana +title: Grafana +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. + +You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). + +## Prerequisites + +In order for Grafana to add the TDengine data source successfully, the following preparations are required: + +1. The TDengine cluster is deployed and functioning properly +2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. + +Record these values: + +- TDengine REST API url: `http://tdengine.local:6041`. +- TDengine cluster authorization, with user + password. + +## Installing Grafana + +TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: . + +## Configuring Grafana + +### Install Grafana Plugin and Configure Data Source + + + + +Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install. + +![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png) + +Installation may cost some minutes, then you can **Create a TDengine data source**: + +![Install and configure Grafana data source](./grafana/grafana-install-and-config.png) + +Then you can add a TDengine data source by filling up the configuration options. + +![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png) + +You can create dashboards with TDengine now. + + + + +On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users. + +```sh +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata +``` + +Restart Grafana service and open Grafana in web-browser, usually . + +Save the script and type `./install.sh --help` for the full usage of the script. + + + + + +Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. + +```bash +grafana-cli plugins install tdengine-datasource +# with sudo +sudo -u grafana grafana-cli plugins install tdengine-datasource +``` + +Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/tags) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory. + +```bash +GF_VERSION=3.2.2 +# from GitHub +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +# from Grafana +wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download +``` + +Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. + +```bash +sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ +``` + +If Grafana is running in a Docker environment, the TDengine plugin can be automatically installed and set up using the following environment variable settings: + +```bash +GF_INSTALL_PLUGINS=tdengine-datasource +``` + +Now users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure. + +![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp) + +Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure. + +![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp) + +Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration. + +![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp) + +- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`. +- User: TDengine user name. +- Password: TDengine user password. + +Click `Save & Test` to test. You should see a success message if the test worked. + +![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) + + + + +Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file: + +1. Save the provisioning configuration file to `tdengine.yml`. + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image. + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. Start TDengine and Grafana services: `docker-compose up -d`. + +Open Grafana , and you can add dashboard with TDengine now. + + + + +### Create Dashboard + +Go back to the main interface to create a dashboard and click Add Query to enter the panel query page: + +![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) + +As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. + +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. +- ALIAS BY: This allows you to set the current query alias. +- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. + +Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows. + +![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp) + +> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/). + +### Importing the Dashboard + +You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details. + +For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list: + +- [15146](https://grafana.com/grafana/dashboards/15146): Monitor multiple TDengine clusters. +- [15155](https://grafana.com/grafana/dashboards/15155): TDengine alert demo. +- [15167](https://grafana.com/grafana/dashboards/15167): TDinsight. +- [16388](https://grafana.com/grafana/dashboards/16388): Telegraf node metrics dashboard using TDengine data source. diff --git a/docs-en/20-third-party/02-prometheus.md b/docs/en/20-third-party/02-prometheus.md similarity index 100% rename from docs-en/20-third-party/02-prometheus.md rename to docs/en/20-third-party/02-prometheus.md diff --git a/docs-en/20-third-party/03-telegraf.md b/docs/en/20-third-party/03-telegraf.md similarity index 100% rename from docs-en/20-third-party/03-telegraf.md rename to docs/en/20-third-party/03-telegraf.md diff --git a/docs-en/20-third-party/05-collectd.md b/docs/en/20-third-party/05-collectd.md similarity index 100% rename from docs-en/20-third-party/05-collectd.md rename to docs/en/20-third-party/05-collectd.md diff --git a/docs-en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md similarity index 100% rename from docs-en/20-third-party/06-statsd.md rename to docs/en/20-third-party/06-statsd.md diff --git a/docs-en/20-third-party/07-icinga2.md b/docs/en/20-third-party/07-icinga2.md similarity index 100% rename from docs-en/20-third-party/07-icinga2.md rename to docs/en/20-third-party/07-icinga2.md diff --git a/docs-en/20-third-party/08-tcollector.md b/docs/en/20-third-party/08-tcollector.md similarity index 100% rename from docs-en/20-third-party/08-tcollector.md rename to docs/en/20-third-party/08-tcollector.md diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md new file mode 100644 index 0000000000000000000000000000000000000000..0900dd3d7571dc0ab8d93174aa2d7b5eccf1fbf5 --- /dev/null +++ b/docs/en/20-third-party/09-emq-broker.md @@ -0,0 +1,141 @@ +--- +sidebar_label: EMQX Broker +title: EMQX Broker writing +--- + +MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). + +## Prerequisites + +The following preparations are required for EMQX to add TDengine data sources correctly. +- The TDengine cluster is deployed and working properly +- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. +- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended. + +## Install and start EMQX + +Depending on the current operating system, users can download the installation package from the [EMQX official website](https://www.emqx.io/downloads) and execute the installation. After installation, use `sudo emqx start` or `sudo systemctl start emqx` to start the EMQX service. + +Note: this chapter is based on EMQX v4.4.5. Other version of EMQX probably change its user interface, configuration methods or functions. + +## Create Database and Table + +In this step we create the appropriate database and table schema in TDengine for receiving MQTT data. Open TDengine CLI and execute SQL bellow: + +```sql +CREATE DATABASE test; +USE test; +CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); +``` + +Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario. + +## Configuring EMQX Rules + +Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation. + +### Login EMQX Dashboard + +Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`. + +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) + +### Creating Rule + +Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! + +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) + +### Edit SQL fields + +Copy SQL bellow and paste it to the SQL edit area: + +```sql +SELECT + payload +FROM + "sensor/data" +``` + +![TDengine Database EMQX create rule](./emqx/create-rule.webp) + +### Add "action handler" + +![TDengine Database EMQX add action handler](./emqx/add-action-handler.webp) + +### Add "Resource" + +![TDengine Database EMQX create resource](./emqx/create-resource.webp) + +Select "Data to Web Service" and click the "New Resource" button. + +### Edit "Resource" + +Select "WebHook" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. + +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) + +### Edit "action" + +Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is: +``` +Basic cm9vdDp0YW9zZGF0YQ== +``` + +Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details. + +Enter the rule engine replacement template in the message body: + +```sql +INSERT INTO test.sensor_data VALUES( + now, + ${payload.temperature}, + ${payload.humidity}, + ${payload.volume}, + ${payload.PM10}, + ${payload.pm25}, + ${payload.SO2}, + ${payload.NO2}, + ${payload.CO}, + '${payload.id}', + ${payload.area}, + ${payload.ts} +) +``` + +![TDengine Database EMQX edit action](./emqx/edit-action.webp) + +Finally, click the "Create" button at bottom left corner saving the rule. +## Compose program to mock data + +```javascript +{{#include docs/examples/other/mock.js}} +``` + +Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. + +![TDengine Database EMQX client num](./emqx/client-num.webp) + +## Execute tests to simulate sending MQTT data + +``` +npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org +node mock.js +``` + +![TDengine Database EMQX run mock](./emqx/run-mock.webp) + +## Verify that EMQX is receiving data + +Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly: + +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) + +## Verify that data writing to TDengine + +Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly: + +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) + +Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. +EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md new file mode 100644 index 0000000000000000000000000000000000000000..64404bd63f2368494a665aed192edd3c503a65b0 --- /dev/null +++ b/docs/en/20-third-party/10-hive-mq-broker.md @@ -0,0 +1,6 @@ +--- +sidebar_label: HiveMQ Broker +title: HiveMQ Broker writing +--- + +[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/taosdata/hivemq-tdengine-extension/blob/master/README_EN.md) for details on how to use it. diff --git a/docs-en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md similarity index 100% rename from docs-en/20-third-party/11-kafka.md rename to docs/en/20-third-party/11-kafka.md diff --git a/docs/en/20-third-party/12-jupyterlab.md b/docs/en/20-third-party/12-jupyterlab.md new file mode 100644 index 0000000000000000000000000000000000000000..fbd7e530f0959740c53e48ce1d73d92ce0d6c5c5 --- /dev/null +++ b/docs/en/20-third-party/12-jupyterlab.md @@ -0,0 +1,98 @@ +--- +sidebar_label: JupyterLab +title: Connect JupyterLab to TDengine +--- + +JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. + +## Install JupyterLab +Installing JupyterLab is very easy. Installation instructions can be found at: + +https://jupyterlab.readthedocs.io/en/stable/getting_started/installation.html. + +If you don't feel like clicking on the link here are the instructions. +Jupyter's preferred Python package manager is pip, so we show the instructions for pip. +You can also use **conda** or **pipenv** if you are managing Python environments. +```` +pip install jupyterlab +```` + +For **conda** you can run: +```` +conda install -c conda-forge jupyterlab +```` + +For **pipenv** you can run: +```` +pipenv install jupyterlab +pipenv shell +```` + +## Run JupyterLab +You can start JupyterLab from the command line by running: +```` +jupyter lab +```` +This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888. + +## Install the TDengine Python connector +You can now install the TDengine Python connector as follows. + +Start a new Python kernel in JupyterLab. + +If using **conda** run the following: +```` +# Install a conda package in the current Jupyter kernel +import sys +!conda install --yes --prefix {sys.prefix} taospy +```` +If using **pip** run the following: +```` +# Install a pip package in the current Jupyter kernel +import sys +!{sys.executable} -m pip install taospy +```` + +## Connect to TDengine +You can find detailed examples to use the Python connector, in the TDengine documentation here. +Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. +Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance. +In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info". + +The structure of this table is as follows: +```` +taos> desc disks_info; + Field | Type | Length | Note | +================================================================================= + ts | TIMESTAMP | 8 | | + datadir_l0_used | FLOAT | 4 | | + datadir_l0_total | FLOAT | 4 | | + datadir_l1_used | FLOAT | 4 | | + datadir_l1_total | FLOAT | 4 | | + datadir_l2_used | FLOAT | 4 | | + datadir_l2_total | FLOAT | 4 | | + dnode_id | INT | 4 | TAG | + dnode_ep | BINARY | 134 | TAG | +Query OK, 9 row(s) in set (0.000238s) +```` + +The code below is used to fetch data from this table into a pandas DataFrame. + +```` +import sys +import taos +import pandas + +def sqlQuery(conn): + df: pandas.DataFrame = pandas.read_sql("select * from log.disks_info limit 500", conn) + print(df) + return df + +conn = taos.connect() + +result = sqlQuery(conn) + +print(result) +```` + +TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). diff --git a/docs-en/20-third-party/_category_.yml b/docs/en/20-third-party/_category_.yml similarity index 100% rename from docs-en/20-third-party/_category_.yml rename to docs/en/20-third-party/_category_.yml diff --git a/docs-en/20-third-party/_deploytaosadapter.mdx b/docs/en/20-third-party/_deploytaosadapter.mdx similarity index 100% rename from docs-en/20-third-party/_deploytaosadapter.mdx rename to docs/en/20-third-party/_deploytaosadapter.mdx diff --git a/docs-cn/20-third-party/emqx/add-action-handler.webp b/docs/en/20-third-party/emqx/add-action-handler.webp similarity index 100% rename from docs-cn/20-third-party/emqx/add-action-handler.webp rename to docs/en/20-third-party/emqx/add-action-handler.webp diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.webp b/docs/en/20-third-party/emqx/check-result-in-taos.webp similarity index 100% rename from docs-cn/20-third-party/emqx/check-result-in-taos.webp rename to docs/en/20-third-party/emqx/check-result-in-taos.webp diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.webp b/docs/en/20-third-party/emqx/check-rule-matched.webp similarity index 100% rename from docs-cn/20-third-party/emqx/check-rule-matched.webp rename to docs/en/20-third-party/emqx/check-rule-matched.webp diff --git a/docs-cn/20-third-party/emqx/client-num.webp b/docs/en/20-third-party/emqx/client-num.webp similarity index 100% rename from docs-cn/20-third-party/emqx/client-num.webp rename to docs/en/20-third-party/emqx/client-num.webp diff --git a/docs-cn/20-third-party/emqx/create-resource.webp b/docs/en/20-third-party/emqx/create-resource.webp similarity index 100% rename from docs-cn/20-third-party/emqx/create-resource.webp rename to docs/en/20-third-party/emqx/create-resource.webp diff --git a/docs-cn/20-third-party/emqx/create-rule.webp b/docs/en/20-third-party/emqx/create-rule.webp similarity index 100% rename from docs-cn/20-third-party/emqx/create-rule.webp rename to docs/en/20-third-party/emqx/create-rule.webp diff --git a/docs-cn/20-third-party/emqx/edit-action.webp b/docs/en/20-third-party/emqx/edit-action.webp similarity index 100% rename from docs-cn/20-third-party/emqx/edit-action.webp rename to docs/en/20-third-party/emqx/edit-action.webp diff --git a/docs-cn/20-third-party/emqx/edit-resource.webp b/docs/en/20-third-party/emqx/edit-resource.webp similarity index 100% rename from docs-cn/20-third-party/emqx/edit-resource.webp rename to docs/en/20-third-party/emqx/edit-resource.webp diff --git a/docs-cn/20-third-party/emqx/login-dashboard.webp b/docs/en/20-third-party/emqx/login-dashboard.webp similarity index 100% rename from docs-cn/20-third-party/emqx/login-dashboard.webp rename to docs/en/20-third-party/emqx/login-dashboard.webp diff --git a/docs-cn/20-third-party/emqx/rule-engine.webp b/docs/en/20-third-party/emqx/rule-engine.webp similarity index 100% rename from docs-cn/20-third-party/emqx/rule-engine.webp rename to docs/en/20-third-party/emqx/rule-engine.webp diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.webp b/docs/en/20-third-party/emqx/rule-header-key-value.webp similarity index 100% rename from docs-cn/20-third-party/emqx/rule-header-key-value.webp rename to docs/en/20-third-party/emqx/rule-header-key-value.webp diff --git a/docs-cn/20-third-party/emqx/run-mock.webp b/docs/en/20-third-party/emqx/run-mock.webp similarity index 100% rename from docs-cn/20-third-party/emqx/run-mock.webp rename to docs/en/20-third-party/emqx/run-mock.webp diff --git a/docs-cn/20-third-party/add_datasource1.webp b/docs/en/20-third-party/grafana/add_datasource1.webp similarity index 100% rename from docs-cn/20-third-party/add_datasource1.webp rename to docs/en/20-third-party/grafana/add_datasource1.webp diff --git a/docs-cn/20-third-party/add_datasource2.webp b/docs/en/20-third-party/grafana/add_datasource2.webp similarity index 100% rename from docs-cn/20-third-party/add_datasource2.webp rename to docs/en/20-third-party/grafana/add_datasource2.webp diff --git a/docs-cn/20-third-party/add_datasource3.webp b/docs/en/20-third-party/grafana/add_datasource3.webp similarity index 100% rename from docs-cn/20-third-party/add_datasource3.webp rename to docs/en/20-third-party/grafana/add_datasource3.webp diff --git a/docs-cn/20-third-party/add_datasource4.webp b/docs/en/20-third-party/grafana/add_datasource4.webp similarity index 100% rename from docs-cn/20-third-party/add_datasource4.webp rename to docs/en/20-third-party/grafana/add_datasource4.webp diff --git a/docs-cn/20-third-party/create_dashboard1.webp b/docs/en/20-third-party/grafana/create_dashboard1.webp similarity index 100% rename from docs-cn/20-third-party/create_dashboard1.webp rename to docs/en/20-third-party/grafana/create_dashboard1.webp diff --git a/docs-cn/20-third-party/create_dashboard2.webp b/docs/en/20-third-party/grafana/create_dashboard2.webp similarity index 100% rename from docs-cn/20-third-party/create_dashboard2.webp rename to docs/en/20-third-party/grafana/create_dashboard2.webp diff --git a/docs/en/20-third-party/grafana/grafana-data-source.png b/docs/en/20-third-party/grafana/grafana-data-source.png new file mode 100644 index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-data-source.png differ diff --git a/docs/en/20-third-party/grafana/grafana-install-and-config.png b/docs/en/20-third-party/grafana/grafana-install-and-config.png new file mode 100644 index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-install-and-config.png differ diff --git a/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png differ diff --git a/docs-en/20-third-party/index.md b/docs/en/20-third-party/index.md similarity index 100% rename from docs-en/20-third-party/index.md rename to docs/en/20-third-party/index.md diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.webp b/docs/en/20-third-party/kafka/Kafka_Connect.webp similarity index 100% rename from docs-cn/20-third-party/kafka/Kafka_Connect.webp rename to docs/en/20-third-party/kafka/Kafka_Connect.webp diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.webp b/docs/en/20-third-party/kafka/confluentPlatform.webp similarity index 100% rename from docs-cn/20-third-party/kafka/confluentPlatform.webp rename to docs/en/20-third-party/kafka/confluentPlatform.webp diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs/en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp similarity index 100% rename from docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp rename to docs/en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md new file mode 100644 index 0000000000000000000000000000000000000000..44651c0496481c410640e577aaad5781f846e302 --- /dev/null +++ b/docs/en/21-tdinternal/01-arch.md @@ -0,0 +1,287 @@ +--- +sidebar_label: Architecture +title: Architecture +--- + +## Cluster and Primary Logic Unit + +The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly. + +### Primary Logic Unit + +Logical structure diagram of TDengine's distributed architecture is as follows: + +![TDengine Database architecture diagram](structure.webp) +
Figure 1: TDengine architecture diagram
+ +A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. + +**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). + +**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. + +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. + +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. + +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. + +**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. + +### Node Communication + +**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. + +**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. + +**Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. + +**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. + +**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: + +1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; +2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; +3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. + +**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. + +**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. +- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" +- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. + +**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. + +### A Typical Data Writing Process + +To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. + +![typical process of TDengine Database](message.webp) +
Figure 2: Typical process of TDengine
+ +1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. +2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. +4. TAOSC initiates an insert request to leader vnode. +5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. +6. TAOSC notifies APP that writing is successful. + +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. + +For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node. + +The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. + +Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. + +## Storage Model and Data Partitioning/Sharding + +### Storage Model + +The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts: + +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck. + +Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages: + +- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. + +### Data Sharding + +For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. + +VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. + +For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. + +When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. + +The meta data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is good for efficient and parallel tag filtering operations. + +### Data Partitioning + +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs. + +In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. + +### Load Balancing + +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. + +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. + +When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process. + +The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** + +## Data Writing and Replication Process + +If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect. + +### Leader vnode Writing Process + +Leader Vnode uses a writing process as follows: + +![TDengine Database Leader Writing Process](write_master.webp) +
Figure 3: TDengine Leader writing process
+ +1. Leader vnode receives the application data insertion request, verifies, and moves to next step; +2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data; +4. Write into memory and add the record to “skip list”; +5. Leader vnode returns a confirmation message to the application, indicating a successful write. +6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. + +### Follower vnode Writing Process + +For a follower vnode, the write process as follows: + +![TDengine Database Follower Writing Process](write_slave.webp) +
Figure 4: TDengine Follower Writing Process
+ +1. Follower vnode receives a data insertion request forwarded by Leader vnode; +2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +3. Write into memory and add the record to “skip list”. + +Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. + +### Remote Disaster Recovery and IDC (Internet Data Center) Migration + +As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. + +On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. + +However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: + +1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; +2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2; +3. Follower vnode will become the new leader, thus losing one record. + +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. + +Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** + +### Leader/follower Selection + +Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. + +When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows: + +1. If there’s only one replica, it’s always leader +2. When all replicas are online, the one with latest version is leader +3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader +4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader. + +### Synchronous Replication + +For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application. + +With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. + +## Caching and Persistence + +### Caching + +TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. + +TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems. + +Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”. + +### Persistent Storage + +TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth. + +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations. + +For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. + +Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. + +In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. + +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file. + +When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. + +### Tiered Storage + +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. + +dataDir format is as follows: +``` +dataDir data_path [tier_level] +``` + +Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. + +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: + +``` +dataDir /mnt/disk1/taos +dataDir /mnt/disk2/taos 0 +dataDir /mnt/disk3/taos 1 +dataDir /mnt/disk4/taos 1 +dataDir /mnt/disk5/taos 2 +dataDir /mnt/disk6/taos 2 +``` + +Mounted disks can also be a non-local network disk, as long as the system can access it. + +Note: Tiered Storage is only supported in Enterprise Edition + +## Data Query + +TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode. + +### Single Table Query + +The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode). + +According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time. + +When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client. + +### Aggregation by Time Axis, Downsampling, Interpolation + +Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines. + +The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example: + +```mysql +select count(*) from d1001 interval(1h); +``` + +For the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. + +In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example: + +```mysql +select count(*) from d1001 interval(1h) fill(prev); +``` + +For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). + +### Multi-table Aggregation Query + +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: + +![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp) +
Figure 5: Diagram of multi-table aggregation query
+ +1. Application sends a query condition to system; +2. TAOSC sends the STable name to Meta Node(management node); +3. Management node sends the vnode list owned by the STable back to TAOSC; +4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; +5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. + +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. + +### Precomputation + +In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. + diff --git a/docs-en/21-tdinternal/30-iot-big-data.md b/docs/en/21-tdinternal/30-iot-big-data.md similarity index 100% rename from docs-en/21-tdinternal/30-iot-big-data.md rename to docs/en/21-tdinternal/30-iot-big-data.md diff --git a/docs-en/21-tdinternal/_category_.yml b/docs/en/21-tdinternal/_category_.yml similarity index 100% rename from docs-en/21-tdinternal/_category_.yml rename to docs/en/21-tdinternal/_category_.yml diff --git a/docs-cn/21-tdinternal/dnode.webp b/docs/en/21-tdinternal/dnode.webp similarity index 100% rename from docs-cn/21-tdinternal/dnode.webp rename to docs/en/21-tdinternal/dnode.webp diff --git a/docs-en/21-tdinternal/index.md b/docs/en/21-tdinternal/index.md similarity index 100% rename from docs-en/21-tdinternal/index.md rename to docs/en/21-tdinternal/index.md diff --git a/docs-cn/21-tdinternal/message.webp b/docs/en/21-tdinternal/message.webp similarity index 100% rename from docs-cn/21-tdinternal/message.webp rename to docs/en/21-tdinternal/message.webp diff --git a/docs-cn/21-tdinternal/modules.webp b/docs/en/21-tdinternal/modules.webp similarity index 100% rename from docs-cn/21-tdinternal/modules.webp rename to docs/en/21-tdinternal/modules.webp diff --git a/docs-cn/21-tdinternal/multi_tables.webp b/docs/en/21-tdinternal/multi_tables.webp similarity index 100% rename from docs-cn/21-tdinternal/multi_tables.webp rename to docs/en/21-tdinternal/multi_tables.webp diff --git a/docs-cn/21-tdinternal/replica-forward.webp b/docs/en/21-tdinternal/replica-forward.webp similarity index 100% rename from docs-cn/21-tdinternal/replica-forward.webp rename to docs/en/21-tdinternal/replica-forward.webp diff --git a/docs-cn/21-tdinternal/replica-master.webp b/docs/en/21-tdinternal/replica-master.webp similarity index 100% rename from docs-cn/21-tdinternal/replica-master.webp rename to docs/en/21-tdinternal/replica-master.webp diff --git a/docs-cn/21-tdinternal/replica-restore.webp b/docs/en/21-tdinternal/replica-restore.webp similarity index 100% rename from docs-cn/21-tdinternal/replica-restore.webp rename to docs/en/21-tdinternal/replica-restore.webp diff --git a/docs-cn/21-tdinternal/structure.webp b/docs/en/21-tdinternal/structure.webp similarity index 100% rename from docs-cn/21-tdinternal/structure.webp rename to docs/en/21-tdinternal/structure.webp diff --git a/docs-cn/21-tdinternal/vnode.webp b/docs/en/21-tdinternal/vnode.webp similarity index 100% rename from docs-cn/21-tdinternal/vnode.webp rename to docs/en/21-tdinternal/vnode.webp diff --git a/docs-cn/21-tdinternal/write_master.webp b/docs/en/21-tdinternal/write_master.webp similarity index 100% rename from docs-cn/21-tdinternal/write_master.webp rename to docs/en/21-tdinternal/write_master.webp diff --git a/docs-cn/21-tdinternal/write_slave.webp b/docs/en/21-tdinternal/write_slave.webp similarity index 100% rename from docs-cn/21-tdinternal/write_slave.webp rename to docs/en/21-tdinternal/write_slave.webp diff --git a/docs-en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md similarity index 100% rename from docs-en/25-application/01-telegraf.md rename to docs/en/25-application/01-telegraf.md diff --git a/docs-en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md similarity index 100% rename from docs-en/25-application/02-collectd.md rename to docs/en/25-application/02-collectd.md diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md new file mode 100644 index 0000000000000000000000000000000000000000..806b996f6d392f68a045906da0c4e4d1536f4179 --- /dev/null +++ b/docs/en/25-application/03-immigrate.md @@ -0,0 +1,435 @@ +--- +sidebar_label: OpenTSDB Migration to TDengine +title: Best Practices for Migrating OpenTSDB Applications to TDengine +--- + +As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. + +As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up. + +To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**. + +After learning the advantages of many traditional relational databases and NoSQL databases, stream computing engines, and message queues, TDengine has its unique benefits in time-series big data processing. TDengine can effectively solve the problems currently encountered by OpenTSDB. + +Compared with OpenTSDB, TDengine has the following distinctive features. + +- Data writing and querying performance far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk. +- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds. +- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost. +- Supports up to 128 tags, with a total tag length of 16 KB. +- In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC. + +Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem. + +We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems. + +## DevOps Application Quick Migration + +### 1. Typical Application Scenarios + +The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario. + +**Figure 1. Typical architecture in a DevOps scenario** +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") + +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.). + +The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana. + +### 2. Migration Services + +- **TDengine installation and deployment** + +First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). + +Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. + +- **Adjusting the data collector configuration** + +TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration. + +Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. + +Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). + +If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. + +```html +LoadPlugin write_tsdb + + + Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates + false AlwaysAppendDS false + + +``` + +You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly. + +- **Tuning the Dashboard system** + +After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). + +TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. + +**Importing Grafana Templates** Figure 2. +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") + +With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed. + +### 3. Post-migration architecture + +After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance. + +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application. + +**Figure 3. System architecture after migration** +![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") + +The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application. + +## Migration evaluation and strategy for other scenarios + +### 1. Differences between TDengine and OpenTSDB + +This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. + +TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly. + +TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. +In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. + +In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine. + +1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application. +2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. +While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. + +With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. + +### 2. Migration strategy suggestion + +OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them. +You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services. + +## Data model design + +On the one hand, TDengine requires a strict schema definition for its incoming data. On the other hand, the data model of TDengine is richer than that of OpenTSDB, and the multi-valued model is compatible with all single-valued model building requirements. + +Let us now assume a DevOps scenario where we use collectd to collect the underlying metrics of the device, including memory, swap, disk, etc. The schema in OpenTSDB is as follows. + +| metric | value name | type | tag1 | tag2 | tag3 | tag4 | tag5 | +| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | +| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | + +TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: +1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. + +At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference. + +(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models. + +- **Single-valued model**. + +The steps are as follows: +- Use the name of the metrics as the name of the TDengine super table +- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. +- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. + +Create 3 super tables in TDengine. + +```sql +create stable memory(ts timestamp, val float) tags(host binary(12), memory_type binary(20), memory_type_instance binary(20), source binary(20)) ; +create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); +create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); +``` + +For sub-tables use dynamic table creation as shown below. + +```sql +insert into memory_vm130_memory_buffered_collectd using memory tags('vm130', 'memory', ' buffer', 'collectd') values(1632979445, 3.0656); +``` + +The final system will have about 340 sub-tables and three super-tables. Note that if the use of concatenated tagged values causes the sub-table names to exceed the system limit (191 bytes), then some encoding (e.g., MD5) needs to be used to convert them to an acceptable length. + +- **Multi-value model** + +Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. + +## Data triage and application adaptation + +Subscribe to the message queue and start writing data to TDengine. + +After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. + +```sql +select count(*) from memory +``` + +After completing the query, if the data written does not differ from what is expected and there are no abnormal error messages from the writing program itself, you can confirm that the written data is complete and valid. + +TDengine does not support querying, or data fetching using the OpenTSDB query syntax but does provide a counterpart for each of the OpenTSDB queries. The corresponding query processing can be adapted and applied in a manner obtained by examining Appendix 1. To fully understand the types of queries supported by TDengine, refer to the TDengine user manual. + +TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. Please read the user manual for specific operations and usage. + +## Historical Data Migration + +### 1. Use the tool to migrate data automatically + +To facilitate historical data migration, we provide a plug-in for the data synchronization tool DataX, which can automatically write data into TDengine.The automatic data migration of DataX can only support the data migration process of a single value model. + +For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). + +After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. + +| Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) | +| ----------------------------- | ------------------- -- | +| 1 | About 139,000 | +| 2 | About 218,000 | +| 3 | About 249,000 | +| 5 | About 295,000 | +| 10 | About 330,000 | + +
(Note: The test data comes from a single-node Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16-core 64G hardware device, the channel and batchSize are 8 and 1000 respectively, and each record contains 10 tags) + +### 2. Manual data migration + +Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement—written to the database. + +Manual migration of data requires attention to the following two issues: + +1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine. + +2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data. + +Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially. + +While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching. + +## Appendix 1: OpenTSDB query function correspondence table + +### Avg + +Equivalent function: avg + +Example: + +```sql +SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) +``` + +Remarks: + +1. The value in Interval needs to be the same as the interval value in the outer query. +2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements. +3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data. +4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). + +### Count + +Equivalent function: count + +Example: + +```sql +select count(\*) from super_table_name; +``` + +### Dev + +Equivalent function: stddev + +Example: + +```sql +Select stddev(val) from table_name +``` + +### Estimated percentiles + +Equivalent function: apercentile + +Example: + +```sql +Select apercentile(col1, 50, “t-digest”) from table_name +``` + +Remark: + +1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively. + +### First + +Equivalent function: first + +Example: + +```sql +Select first(col1) from table_name +``` + +### Last + +Equivalent function: last + +Example: + +```sql +Select last(col1) from table_name +``` + +### Max + +Equivalent function: max + +Example: + +```sql +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) +``` + +Note: The Max function requires interpolation for the reasons described above. + +### Min + +Equivalent function: min + +Example: + +```sql +Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); +``` + +### MinMax + +Equivalent function: max + +```sql +Select max(val) from table_name +``` + +Note: This function has no interpolation requirements, so it can be directly calculated. + +### MimMin + +Equivalent function: min + +```sql +Select min(val) from table_name +``` + +Note: This function has no interpolation requirements, so it can be directly calculated. + +### Percentile + +Equivalent function: percentile + +Remark: + +### Sum + +Equivalent function: sum + +```sql +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) +``` + +Note: This function has no interpolation requirements, so it can be directly calculated. + +### Zimsum + +Equivalent function: sum + +```sql +Select sum(val) from table_name +``` + +Note: This function has no interpolation requirements, so it can be directly calculated. + +Complete example: + +````json +// OpenTSDB query JSON +query = { +"start": 1510560000, +"end": 1515000009, +"queries": [{ +"aggregator": "count", +"metric": "cpu.usage_user", +}] +} + +// Equivalent query SQL: +SELECT count(*) +FROM `cpu.usage_user` +WHERE ts>=1510560000 AND ts<=1515000009 +```` + +## Appendix 2: Resource Estimation Methodology + +### Data generation environment + +We still use the hypothetical environment from Chapter 4. There are three measurements. Respectively: the data writing rate of temperature and humidity is one record every 5 seconds, and the timeline is 100,000. The writing rate of air pollution is one record every 10 seconds, the timeline is 10,000, and the query request frequency is 500 QPS. + +### Storage resource estimation + +Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `86400 * n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(86400 * n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. +With additional 20% redundancy, you can calculate the required storage resources: + +```matlab +(86400 * n * t * L) * (365 * 1.5) * (1+20%)/C +```` +Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. + +### Storage Device Selection Considerations + +A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. + +Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance. + +### Computational resource estimates + +Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. + +In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. + +### Memory resource estimation + +The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. + +In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough. + +## Appendix 3: Cluster Deployment and Startup + +TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference. + +### Cluster Deployment + +The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats. + +Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters. + +### Set running parameters and start the service + +To ensure that the system can obtain the necessary information for regular operation. Please set the following vital parameters correctly on the server: + +FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](../../cluster/)" + +Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. + +Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](../../cluster/)". + +## Appendix 4: Super Table Names + +Since OpenTSDB's metric name has a dot (".") in it, for example, a metric with a name like "cpu.usage_user", the dot has a special meaning in TDengine and is a separator used to separate database and table names. TDengine also provides "escape" characters to allow users to use keywords or special separators (e.g., dots) in (super)table names. To use special characters, enclose the table name in escape characters, e.g.: `cpu.usage_user`. It is a valid (super) table name. + +## Appendix 5: Reference Articles + +1. [Using TDengine + collectd/StatsD + Grafana to quickly build an IT operation and maintenance monitoring system](/application/collectd/) +2. [Write collected data directly to TDengine through collectd](/third-party/collectd/) diff --git a/docs-cn/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp rename to docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp b/docs/en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp rename to docs/en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp rename to docs/en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp b/docs/en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp rename to docs/en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-Telegraf.webp rename to docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-collectd-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-collectd-dashboard.webp rename to docs/en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-statsd-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-statsd-dashboard.webp rename to docs/en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp diff --git a/docs-cn/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp similarity index 100% rename from docs-cn/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp rename to docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp diff --git a/docs-en/25-application/_category_.yml b/docs/en/25-application/_category_.yml similarity index 100% rename from docs-en/25-application/_category_.yml rename to docs/en/25-application/_category_.yml diff --git a/docs-en/25-application/index.md b/docs/en/25-application/index.md similarity index 100% rename from docs-en/25-application/index.md rename to docs/en/25-application/index.md diff --git a/docs-en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md similarity index 100% rename from docs-en/27-train-faq/01-faq.md rename to docs/en/27-train-faq/01-faq.md diff --git a/docs/en/27-train-faq/03-docker.md b/docs/en/27-train-faq/03-docker.md new file mode 100644 index 0000000000000000000000000000000000000000..0378fffb8bdbc4cae8d4d2176ec3d745a548c2fe --- /dev/null +++ b/docs/en/27-train-faq/03-docker.md @@ -0,0 +1,285 @@ +--- +sidebar_label: TDengine in Docker +title: Deploy TDengine in Docker +--- + +We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . + +In this chapter we introduce a simple step by step guide to use TDengine in Docker. + +## Install Docker + +To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). + +After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. + +```bash +$ docker -v +Docker version 20.10.3, build 48d30b5 +``` + +## Launch TDengine in Docker + +### Launch TDengine Server + +```bash +$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd +``` + +In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). + +- **docker run**: Launch a docker container +- **-d**: the container will run in background mode +- **-p**: port mapping +- **tdengine/tdengine**: The image from which to launch the container +- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**: the container ID if successfully launched. + +Furthermore, `--name` can be used with `docker run` to specify name for the container, `--hostname` can be used to specify hostname for the container, `-v` can be used to mount local volumes to the container so that the data generated inside the container can be persisted to disk on the host. + +```bash +docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +``` + +- **--name tdengine**: specify the name of the container, the name can be used to specify the container later +- **--hostname=tdengine-server**: specify the hostname inside the container, the hostname can be used inside the container without worrying the container IP may vary +- **-v**: volume mapping between host and container + +### Check the container + +```bash +docker ps +``` + +The output is like below: + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS ··· +c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +``` + +- **docker ps**: List all the containers +- **CONTAINER ID**: Container ID +- **IMAGE**: The image used for the container +- **COMMAND**: The command used when launching the container +- **CREATED**: When the container was created +- **STATUS**: Status of the container + +### Access TDengine inside container + +```bash +$ docker exec -it tdengine /bin/bash +root@tdengine-server:~/TDengine-server-2.4.0.4# +``` + +- **docker exec**: Attach to the container +- **-i**: Interactive mode +- **-t**: Use terminal +- **tdengine**: Container name, up to the output of `docker ps` +- **/bin/bash**: The command to execute once the container is attached + +Inside the container, start TDengine CLI `taos` + +```bash +root@tdengine-server:~/TDengine-server-2.4.0.4# taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +The above example is for a successful connection. If `taos` fails to connect to the server side, error information would be shown. + +In TDengine CLI, SQL commands can be executed to create/drop databases, tables, STables, and insert or query data. For details please refer to [TAOS SQL](/taos-sql/). + +### Access TDengine from host + +If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. + +``` +$ taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +It's also able to access the REST interface provided by TDengine in container from the host. + +``` +curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql +``` + +Output is like below: + +``` +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} +``` + +For details of REST API please refer to [REST API](/reference/rest-api/). + +### Run TDengine server and taosAdapter inside container + +From version 2.4.0.0, in the TDengine Docker image, `taosAdapter` is enabled by default, but can be disabled using environment variable `TAOS_DISABLE_ADAPTER=true` . `taosAdapter` can also be run alone without `taosd` when launching a container. + +For the port mapping of `taosAdapter`, please refer to [taosAdapter](/reference/taosadapter/). + +- Run both `taosd` and `taosAdapter` (by default) in docker container: + +```bash +docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 +``` + +- Run `taosAdapter` only in docker container, `TAOS_FIRST_EP` environment variable needs to be used to specify the container name in which `taosd` is running: + +```bash +docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter +``` + +- Run `taosd` only in docker container: + +```bash +docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 +``` + +- Verify the REST interface: + +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql +``` + +Below is an example output: + +``` +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +### Use taosBenchmark on host to access TDengine server in container + +1. Run `taosBenchmark`, named as `taosdemo` previously, on the host: + + ```bash + $ taosBenchmark + + taosBenchmark is simulating data generated by power equipments monitoring... + + host: 127.0.0.1:6030 + user: root + password: taosdata + configDir: + resultFile: ./output.txt + thread num of insert data: 10 + thread num of create table: 10 + top insert interval: 0 + number of records per req: 30000 + max sql length: 1048576 + database count: 1 + database[0]: + database[0] name: test + drop: yes + replica: 1 + precision: ms + super table count: 1 + super table[0]: + stbName: meters + autoCreateTable: no + childTblExists: no + childTblCount: 10000 + childTblPrefix: d + dataSource: rand + iface: taosc + insertRows: 10000 + interlaceRows: 0 + disorderRange: 1000 + disorderRatio: 0 + maxSqlLen: 1048576 + timeStampStep: 1 + startTimestamp: 2017-07-14 10:40:00.000 + sampleFormat: + sampleFile: + tagsFile: + columnCount: 3 + column[0]:FLOAT column[1]:INT column[2]:FLOAT + tagCount: 2 + tag[0]:INT tag[1]:BINARY(16) + + Press enter key to continue or Ctrl-C to stop + ``` + + Once the execution is finished, a database `test` is created, a STable `meters` is created in database `test`, 10,000 sub tables are created using `meters` as template, named as "d0" to "d9999", while 10,000 rows are inserted into each table, so totally 100,000,000 rows are inserted. + +2. Check the data + + - **Check database** + + ```bash + $ taos> show databases; + name | created_time | ntables | vgroups | ··· + test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· + log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· + + ``` + + - **Check STable** + + ```bash + $ taos> use test; + Database changed. + + $ taos> show stables; + name | created_time | columns | tags | tables | + ============================================================================================ + meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | + Query OK, 1 row(s) in set (0.003259s) + + ``` + + - **Check Tables** + + ```bash + $ taos> select * from test.t0 limit 10; + + DB error: Table does not exist (0.002857s) + taos> select * from test.d0 limit 10; + ts | current | voltage | phase | + ====================================================================================== + 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | + 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | + 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | + 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | + 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | + 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | + 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | + 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | + 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | + 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | + Query OK, 10 row(s) in set (0.016791s) + + ``` + + - **Check tag values of table d0** + + ```bash + $ taos> select groupid, location from test.d0; + groupid | location | + ================================= + 0 | California.SanDiego | + Query OK, 1 row(s) in set (0.003490s) + ``` + +### Access TDengine from 3rd party tools + +A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). + +There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. + +## Stop TDengine inside container + +```bash +docker stop tdengine +``` + +- **docker stop**: stop a container +- **tdengine**: container name diff --git a/docs-en/27-train-faq/_category_.yml b/docs/en/27-train-faq/_category_.yml similarity index 100% rename from docs-en/27-train-faq/_category_.yml rename to docs/en/27-train-faq/_category_.yml diff --git a/docs-en/27-train-faq/index.md b/docs/en/27-train-faq/index.md similarity index 100% rename from docs-en/27-train-faq/index.md rename to docs/en/27-train-faq/index.md diff --git a/docs/en/30-release/01-2.6.md b/docs/en/30-release/01-2.6.md new file mode 100644 index 0000000000000000000000000000000000000000..c7f46b110cde153de57d38468bcf885390c3a33b --- /dev/null +++ b/docs/en/30-release/01-2.6.md @@ -0,0 +1,11 @@ +--- +title: 2.6 +--- + +[2.6.0.6](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.6) + +[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4) + +[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1) + +[2.6.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.0) diff --git a/docs/en/30-release/02-2.4.md b/docs/en/30-release/02-2.4.md new file mode 100644 index 0000000000000000000000000000000000000000..9eeb5a10b4b9e76d207d3fbcf498df5dd3256bce --- /dev/null +++ b/docs/en/30-release/02-2.4.md @@ -0,0 +1,31 @@ +--- +title: 2.4 +--- + +[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30) + +[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) + +[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) + +[2.4.0.24](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.24) + +[2.4.0.20](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.20) + +[2.4.0.18](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.18) + +[2.4.0.16](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.16) + +[2.4.0.14](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.14) + +[2.4.0.12](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.12) + +[2.4.0.10](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.10) + +[2.4.0.7](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.7) + +[2.4.0.5](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.5) + +[2.4.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.4) + +[2.4.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.0) diff --git a/docs-en/30-release/_category_.yml b/docs/en/30-release/_category_.yml similarity index 100% rename from docs-en/30-release/_category_.yml rename to docs/en/30-release/_category_.yml diff --git a/docs-en/30-release/index.md b/docs/en/30-release/index.md similarity index 100% rename from docs-en/30-release/index.md rename to docs/en/30-release/index.md diff --git a/docs/examples/.gitignore b/docs/examples/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b50ab6c63b6a8968ba92af7d04edb446ec3d2776 --- /dev/null +++ b/docs/examples/.gitignore @@ -0,0 +1,4 @@ +.vscode +*.lock +.idea +.env \ No newline at end of file diff --git a/docs-examples/R/connect_native.r b/docs/examples/R/connect_native.r similarity index 100% rename from docs-examples/R/connect_native.r rename to docs/examples/R/connect_native.r diff --git a/docs-examples/R/connect_rest.r b/docs/examples/R/connect_rest.r similarity index 100% rename from docs-examples/R/connect_rest.r rename to docs/examples/R/connect_rest.r diff --git a/docs-examples/c/.gitignore b/docs/examples/c/.gitignore similarity index 100% rename from docs-examples/c/.gitignore rename to docs/examples/c/.gitignore diff --git a/docs-examples/c/async_query_example.c b/docs/examples/c/async_query_example.c similarity index 100% rename from docs-examples/c/async_query_example.c rename to docs/examples/c/async_query_example.c diff --git a/docs-examples/c/connect_example.c b/docs/examples/c/connect_example.c similarity index 100% rename from docs-examples/c/connect_example.c rename to docs/examples/c/connect_example.c diff --git a/docs-examples/c/error_handle_example.c b/docs/examples/c/error_handle_example.c similarity index 100% rename from docs-examples/c/error_handle_example.c rename to docs/examples/c/error_handle_example.c diff --git a/docs-examples/c/insert_example.c b/docs/examples/c/insert_example.c similarity index 100% rename from docs-examples/c/insert_example.c rename to docs/examples/c/insert_example.c diff --git a/docs-examples/c/json_protocol_example.c b/docs/examples/c/json_protocol_example.c similarity index 100% rename from docs-examples/c/json_protocol_example.c rename to docs/examples/c/json_protocol_example.c diff --git a/docs-examples/c/line_example.c b/docs/examples/c/line_example.c similarity index 100% rename from docs-examples/c/line_example.c rename to docs/examples/c/line_example.c diff --git a/docs-examples/c/multi_bind_example.c b/docs/examples/c/multi_bind_example.c similarity index 100% rename from docs-examples/c/multi_bind_example.c rename to docs/examples/c/multi_bind_example.c diff --git a/docs-examples/c/query_example.c b/docs/examples/c/query_example.c similarity index 100% rename from docs-examples/c/query_example.c rename to docs/examples/c/query_example.c diff --git a/docs-examples/c/stmt_example.c b/docs/examples/c/stmt_example.c similarity index 100% rename from docs-examples/c/stmt_example.c rename to docs/examples/c/stmt_example.c diff --git a/docs-examples/c/subscribe_demo.c b/docs/examples/c/subscribe_demo.c similarity index 100% rename from docs-examples/c/subscribe_demo.c rename to docs/examples/c/subscribe_demo.c diff --git a/docs-examples/c/telnet_line_example.c b/docs/examples/c/telnet_line_example.c similarity index 100% rename from docs-examples/c/telnet_line_example.c rename to docs/examples/c/telnet_line_example.c diff --git a/docs-examples/csharp/.gitignore b/docs/examples/csharp/.gitignore similarity index 100% rename from docs-examples/csharp/.gitignore rename to docs/examples/csharp/.gitignore diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs/examples/csharp/AsyncQueryExample.cs similarity index 100% rename from docs-examples/csharp/AsyncQueryExample.cs rename to docs/examples/csharp/AsyncQueryExample.cs diff --git a/docs-examples/csharp/ConnectExample.cs b/docs/examples/csharp/ConnectExample.cs similarity index 100% rename from docs-examples/csharp/ConnectExample.cs rename to docs/examples/csharp/ConnectExample.cs diff --git a/docs-examples/csharp/InfluxDBLineExample.cs b/docs/examples/csharp/InfluxDBLineExample.cs similarity index 100% rename from docs-examples/csharp/InfluxDBLineExample.cs rename to docs/examples/csharp/InfluxDBLineExample.cs diff --git a/docs-examples/csharp/OptsJsonExample.cs b/docs/examples/csharp/OptsJsonExample.cs similarity index 100% rename from docs-examples/csharp/OptsJsonExample.cs rename to docs/examples/csharp/OptsJsonExample.cs diff --git a/docs-examples/csharp/OptsTelnetExample.cs b/docs/examples/csharp/OptsTelnetExample.cs similarity index 100% rename from docs-examples/csharp/OptsTelnetExample.cs rename to docs/examples/csharp/OptsTelnetExample.cs diff --git a/docs-examples/csharp/QueryExample.cs b/docs/examples/csharp/QueryExample.cs similarity index 100% rename from docs-examples/csharp/QueryExample.cs rename to docs/examples/csharp/QueryExample.cs diff --git a/docs-examples/csharp/SQLInsertExample.cs b/docs/examples/csharp/SQLInsertExample.cs similarity index 100% rename from docs-examples/csharp/SQLInsertExample.cs rename to docs/examples/csharp/SQLInsertExample.cs diff --git a/docs-examples/csharp/StmtInsertExample.cs b/docs/examples/csharp/StmtInsertExample.cs similarity index 100% rename from docs-examples/csharp/StmtInsertExample.cs rename to docs/examples/csharp/StmtInsertExample.cs diff --git a/docs-examples/csharp/SubscribeDemo.cs b/docs/examples/csharp/SubscribeDemo.cs similarity index 100% rename from docs-examples/csharp/SubscribeDemo.cs rename to docs/examples/csharp/SubscribeDemo.cs diff --git a/docs-examples/csharp/asyncquery.csproj b/docs/examples/csharp/asyncquery.csproj similarity index 100% rename from docs-examples/csharp/asyncquery.csproj rename to docs/examples/csharp/asyncquery.csproj diff --git a/docs-examples/csharp/connect.csproj b/docs/examples/csharp/connect.csproj similarity index 100% rename from docs-examples/csharp/connect.csproj rename to docs/examples/csharp/connect.csproj diff --git a/docs-examples/csharp/influxdbline.csproj b/docs/examples/csharp/influxdbline.csproj similarity index 100% rename from docs-examples/csharp/influxdbline.csproj rename to docs/examples/csharp/influxdbline.csproj diff --git a/docs-examples/csharp/optsjson.csproj b/docs/examples/csharp/optsjson.csproj similarity index 100% rename from docs-examples/csharp/optsjson.csproj rename to docs/examples/csharp/optsjson.csproj diff --git a/docs-examples/csharp/optstelnet.csproj b/docs/examples/csharp/optstelnet.csproj similarity index 100% rename from docs-examples/csharp/optstelnet.csproj rename to docs/examples/csharp/optstelnet.csproj diff --git a/docs-examples/csharp/query.csproj b/docs/examples/csharp/query.csproj similarity index 100% rename from docs-examples/csharp/query.csproj rename to docs/examples/csharp/query.csproj diff --git a/docs-examples/csharp/sqlinsert.csproj b/docs/examples/csharp/sqlinsert.csproj similarity index 100% rename from docs-examples/csharp/sqlinsert.csproj rename to docs/examples/csharp/sqlinsert.csproj diff --git a/docs-examples/csharp/stmtinsert.csproj b/docs/examples/csharp/stmtinsert.csproj similarity index 100% rename from docs-examples/csharp/stmtinsert.csproj rename to docs/examples/csharp/stmtinsert.csproj diff --git a/docs-examples/csharp/subscribe.csproj b/docs/examples/csharp/subscribe.csproj similarity index 100% rename from docs-examples/csharp/subscribe.csproj rename to docs/examples/csharp/subscribe.csproj diff --git a/docs-examples/go/.gitignore b/docs/examples/go/.gitignore similarity index 100% rename from docs-examples/go/.gitignore rename to docs/examples/go/.gitignore diff --git a/docs-examples/go/connect/afconn/main.go b/docs/examples/go/connect/afconn/main.go similarity index 100% rename from docs-examples/go/connect/afconn/main.go rename to docs/examples/go/connect/afconn/main.go diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs/examples/go/connect/cgoexample/main.go similarity index 100% rename from docs-examples/go/connect/cgoexample/main.go rename to docs/examples/go/connect/cgoexample/main.go diff --git a/docs-examples/go/connect/restexample/main.go b/docs/examples/go/connect/restexample/main.go similarity index 100% rename from docs-examples/go/connect/restexample/main.go rename to docs/examples/go/connect/restexample/main.go diff --git a/docs-examples/go/connect/wrapper/main.go b/docs/examples/go/connect/wrapper/main.go similarity index 100% rename from docs-examples/go/connect/wrapper/main.go rename to docs/examples/go/connect/wrapper/main.go diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..b0f6fbcd50d54c232c322ec23621d389f8a1f46d --- /dev/null +++ b/docs/examples/go/go.mod @@ -0,0 +1,5 @@ +module goexample + +go 1.17 + +require github.com/taosdata/driver-go/v2 latest diff --git a/docs-examples/go/insert/json/main.go b/docs/examples/go/insert/json/main.go similarity index 100% rename from docs-examples/go/insert/json/main.go rename to docs/examples/go/insert/json/main.go diff --git a/docs-examples/go/insert/line/main.go b/docs/examples/go/insert/line/main.go similarity index 100% rename from docs-examples/go/insert/line/main.go rename to docs/examples/go/insert/line/main.go diff --git a/docs-examples/go/insert/sql/main.go b/docs/examples/go/insert/sql/main.go similarity index 100% rename from docs-examples/go/insert/sql/main.go rename to docs/examples/go/insert/sql/main.go diff --git a/docs-examples/go/insert/stmt/main.go b/docs/examples/go/insert/stmt/main.go similarity index 100% rename from docs-examples/go/insert/stmt/main.go rename to docs/examples/go/insert/stmt/main.go diff --git a/docs-examples/go/insert/telnet/main.go b/docs/examples/go/insert/telnet/main.go similarity index 100% rename from docs-examples/go/insert/telnet/main.go rename to docs/examples/go/insert/telnet/main.go diff --git a/docs-examples/go/query/async/main.go b/docs/examples/go/query/async/main.go similarity index 100% rename from docs-examples/go/query/async/main.go rename to docs/examples/go/query/async/main.go diff --git a/docs-examples/go/query/sync/main.go b/docs/examples/go/query/sync/main.go similarity index 100% rename from docs-examples/go/query/sync/main.go rename to docs/examples/go/query/sync/main.go diff --git a/docs-examples/go/rest/opentsdbjson/main.go b/docs/examples/go/rest/opentsdbjson/main.go similarity index 100% rename from docs-examples/go/rest/opentsdbjson/main.go rename to docs/examples/go/rest/opentsdbjson/main.go diff --git a/docs-examples/go/sub/main.go b/docs/examples/go/sub/main.go similarity index 100% rename from docs-examples/go/sub/main.go rename to docs/examples/go/sub/main.go diff --git a/docs-examples/java/.gitignore b/docs/examples/java/.gitignore similarity index 100% rename from docs-examples/java/.gitignore rename to docs/examples/java/.gitignore diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..77c6a3ad60135a023ee1e72c2220e904c1f6313f --- /dev/null +++ b/docs/examples/java/pom.xml @@ -0,0 +1,76 @@ + + + + 4.0.0 + + com.taos + javaexample + 1.0 + + JavaExample + + + UTF-8 + 1.8 + 1.8 + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.38 + + + + org.slf4j + slf4j-api + 1.7.36 + + + ch.qos.logback + logback-classic + 1.2.11 + + + junit + junit + 4.13.1 + test + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.5 + + true + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/lib + false + false + true + + + + + + + + diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java rename to docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java diff --git a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java b/docs/examples/java/src/main/java/com/taos/example/JSONProtocolExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java rename to docs/examples/java/src/main/java/com/taos/example/JSONProtocolExample.java diff --git a/docs/examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs/examples/java/src/main/java/com/taos/example/LineProtocolExample.java new file mode 100644 index 0000000000000000000000000000000000000000..09c3078e222c1da083c6ed0da19274b7772bba16 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/LineProtocolExample.java @@ -0,0 +1,42 @@ +package com.taos.example; + +import com.taosdata.jdbc.SchemalessWriter; +import com.taosdata.jdbc.enums.SchemalessProtocolType; +import com.taosdata.jdbc.enums.SchemalessTimestampType; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +public class LineProtocolExample { + // format: measurement,tag_set field_set timestamp + private static String[] lines = { + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro + // seconds + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", + }; + + private static Connection getConnection() throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + return DriverManager.getConnection(jdbcUrl); + } + + private static void createDatabase(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + // the default precision is ms (millisecond), but we use us(microsecond) here. + stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'"); + stmt.execute("USE test"); + } + } + + public static void main(String[] args) throws SQLException { + try (Connection conn = getConnection()) { + createDatabase(conn); + SchemalessWriter writer = new SchemalessWriter(conn); + writer.write(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.MICRO_SECONDS); + } + } +} diff --git a/docs-examples/java/src/main/java/com/taos/example/RESTConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/RESTConnectExample.java rename to docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java diff --git a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java rename to docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java diff --git a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java b/docs/examples/java/src/main/java/com/taos/example/RestQueryExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java rename to docs/examples/java/src/main/java/com/taos/example/RestQueryExample.java diff --git a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/StmtInsertExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java rename to docs/examples/java/src/main/java/com/taos/example/StmtInsertExample.java diff --git a/docs-examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/SubscribeDemo.java rename to docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java diff --git a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java rename to docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java diff --git a/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java b/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java new file mode 100644 index 0000000000000000000000000000000000000000..89fa8eaed5f7fa90bb56e21c7427a9f12fb8fa4e --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java @@ -0,0 +1,26 @@ +package com.taos.example; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +public class TestTableNotExits { + private static Connection getConnection() throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + return DriverManager.getConnection(jdbcUrl); + } + public static void main(String[] args) throws SQLException { + try(Connection conn = getConnection()) { + try(Statement stmt = conn.createStatement()) { + try { + stmt.executeUpdate("insert into test.t1 values(1, 2) test.t2 values(3, 4)"); + } catch (SQLException e) { + System.out.println(e.getErrorCode()); + System.out.println(Integer.toHexString(e.getErrorCode())); + System.out.println(e); + } + } + } + } +} diff --git a/docs-examples/java/src/main/java/com/taos/example/WSConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java similarity index 100% rename from docs-examples/java/src/main/java/com/taos/example/WSConnectExample.java rename to docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -0,0 +1,63 @@ +package com.taos.example.highvolume; + +import java.sql.*; + +/** + * Prepare target database. + * Count total records in database periodically so that we can estimate the writing speed. + */ +public class DataBaseMonitor { + private Connection conn; + private Statement stmt; + + public DataBaseMonitor init() throws SQLException { + if (conn == null) { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + conn = DriverManager.getConnection(jdbcURL); + stmt = conn.createStatement(); + } + return this; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } + + public void prepareDatabase() throws SQLException { + stmt.execute("DROP DATABASE IF EXISTS test"); + stmt.execute("CREATE DATABASE test"); + stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); + } + + public Long count() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters"); + result.next(); + return result.getLong(1); + } + return null; + } + + /** + * show test.stables; + * + * name | created_time | columns | tags | tables | + * ============================================================================================ + * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 | + */ + public Long getTableCount() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("show test.stables"); + result.next(); + return result.getLong(5); + } + return null; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java new file mode 100644 index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java @@ -0,0 +1,70 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + + +public class FastWriteExample { + final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class); + + final static int taskQueueCapacity = 1000000; + final static List> taskQueues = new ArrayList<>(); + final static List readTasks = new ArrayList<>(); + final static List writeTasks = new ArrayList<>(); + final static DataBaseMonitor databaseMonitor = new DataBaseMonitor(); + + public static void stopAll() { + logger.info("shutting down"); + readTasks.forEach(task -> task.stop()); + writeTasks.forEach(task -> task.stop()); + databaseMonitor.close(); + } + + public static void main(String[] args) throws InterruptedException, SQLException { + int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1; + int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3; + int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000; + int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000; + + logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}", + readTaskCount, writeTaskCount, tableCount, maxBatchSize); + + databaseMonitor.init().prepareDatabase(); + + // Create task queues, whiting tasks and start writing threads. + for (int i = 0; i < writeTaskCount; ++i) { + BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity); + taskQueues.add(queue); + WriteTask task = new WriteTask(queue, maxBatchSize); + Thread t = new Thread(task); + t.setName("WriteThread-" + i); + t.start(); + } + + // create reading tasks and start reading threads + int tableCountPerTask = tableCount / readTaskCount; + for (int i = 0; i < readTaskCount; ++i) { + ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask); + Thread t = new Thread(task); + t.setName("ReadThread-" + i); + t.start(); + } + + Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll)); + + long lastCount = 0; + while (true) { + Thread.sleep(10000); + long numberOfTable = databaseMonitor.getTableCount(); + long count = databaseMonitor.count(); + logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10); + lastCount = count; + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java new file mode 100644 index 0000000000000000000000000000000000000000..6fe83f002ebcb9d82e026e9a32886fd22bfefbe9 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java @@ -0,0 +1,53 @@ +package com.taos.example.highvolume; + +import java.util.Iterator; + +/** + * Generate test data + */ +class MockDataSource implements Iterator { + private String tbNamePrefix; + private int tableCount; + private long maxRowsPerTable = 1000000000L; + + // 100 milliseconds between two neighbouring rows. + long startMs = System.currentTimeMillis() - maxRowsPerTable * 100; + private int currentRow = 0; + private int currentTbId = -1; + + // mock values + String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"}; + float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f}; + int[] voltage = {119, 116, 111, 113, 118}; + float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f}; + + public MockDataSource(String tbNamePrefix, int tableCount) { + this.tbNamePrefix = tbNamePrefix; + this.tableCount = tableCount; + } + + @Override + public boolean hasNext() { + currentTbId += 1; + if (currentTbId == tableCount) { + currentTbId = 0; + currentRow += 1; + } + return currentRow < maxRowsPerTable; + } + + @Override + public String next() { + long ts = startMs + 100 * currentRow; + int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1; + StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName + sb.append(ts).append(','); // ts + sb.append(current[currentRow % 5]).append(','); // current + sb.append(voltage[currentRow % 5]).append(','); // voltage + sb.append(phase[currentRow % 5]).append(','); // phase + sb.append(location[currentRow % 5]).append(','); // location + sb.append(groupId); // groupID + + return sb.toString(); + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java new file mode 100644 index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.BlockingQueue; + +class ReadTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(ReadTask.class); + private final int taskId; + private final List> taskQueues; + private final int queueCount; + private final int tableCount; + private boolean active = true; + + public ReadTask(int readTaskId, List> queues, int tableCount) { + this.taskId = readTaskId; + this.taskQueues = queues; + this.queueCount = queues.size(); + this.tableCount = tableCount; + } + + /** + * Assign data received to different queues. + * Here we use the suffix number in table name. + * You are expected to define your own rule in practice. + * + * @param line record received + * @return which queue to use + */ + public int getQueueId(String line) { + String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101 + String suffixNumber = tbName.split("_")[1]; + return Integer.parseInt(suffixNumber) % this.queueCount; + } + + @Override + public void run() { + logger.info("started"); + Iterator it = new MockDataSource("tb" + this.taskId, tableCount); + try { + while (it.hasNext() && active) { + String line = it.next(); + int queueId = getQueueId(line); + taskQueues.get(queueId).put(line); + } + } catch (Exception e) { + logger.error("Read Task Error", e); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -0,0 +1,205 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.HashMap; +import java.util.Map; + +/** + * A helper class encapsulate the logic of writing using SQL. + *

+ * The main interfaces are two methods: + *

    + *
  1. {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
  2. + *
  3. {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
  4. + *
+ *

+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb". + * This ensure that checking table existence is a one-time-only operation. + *

+ * + *

+ */ +public class SQLWriter { + final static Logger logger = LoggerFactory.getLogger(SQLWriter.class); + + private Connection conn; + private Statement stmt; + + /** + * current number of buffered records + */ + private int bufferedCount = 0; + /** + * Maximum number of buffered records. + * Flush action will be triggered if bufferedCount reached this value, + */ + private int maxBatchSize; + + + /** + * Maximum SQL length. + */ + private int maxSQLLength; + + /** + * Map from table name to column values. For example: + * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)" + */ + private Map tbValues = new HashMap<>(); + + /** + * Map from table name to tag values in the same order as creating stable. + * Used for creating table. + */ + private Map tbTags = new HashMap<>(); + + public SQLWriter(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + + + /** + * Get Database Connection + * + * @return Connection + * @throws SQLException + */ + private static Connection getConnection() throws SQLException { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + return DriverManager.getConnection(jdbcURL); + } + + /** + * Create Connection and Statement + * + * @throws SQLException + */ + public void init() throws SQLException { + conn = getConnection(); + stmt = conn.createStatement(); + stmt.execute("use test"); + ResultSet rs = stmt.executeQuery("show variables"); + while (rs.next()) { + String configName = rs.getString(1); + if ("maxSQLLength".equals(configName)) { + maxSQLLength = Integer.parseInt(rs.getString(2)); + logger.info("maxSQLLength={}", maxSQLLength); + } + } + } + + /** + * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap. + * Trigger writing when number of buffered records reached maxBachSize. + * + * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId + */ + public void processLine(String line) throws SQLException { + bufferedCount += 1; + int firstComma = line.indexOf(','); + String tbName = line.substring(0, firstComma); + int lastComma = line.lastIndexOf(','); + int secondLastComma = line.lastIndexOf(',', lastComma - 1); + String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") "; + if (tbValues.containsKey(tbName)) { + tbValues.put(tbName, tbValues.get(tbName) + value); + } else { + tbValues.put(tbName, value); + } + if (!tbTags.containsKey(tbName)) { + String location = line.substring(secondLastComma + 1, lastComma); + String groupId = line.substring(lastComma + 1); + String tagValues = "('" + location + "'," + groupId + ')'; + tbTags.put(tbName, tagValues); + } + if (bufferedCount == maxBatchSize) { + flush(); + } + } + + + /** + * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it. + * In case of "Table does not exit" exception, create all tables in the sql and retry the sql. + */ + public void flush() throws SQLException { + StringBuilder sb = new StringBuilder("INSERT INTO "); + for (Map.Entry entry : tbValues.entrySet()) { + String tableName = entry.getKey(); + String values = entry.getValue(); + String q = tableName + " values " + values + " "; + if (sb.length() + q.length() > maxSQLLength) { + executeSQL(sb.toString()); + logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance"); + sb = new StringBuilder("INSERT INTO "); + } + sb.append(q); + } + executeSQL(sb.toString()); + tbValues.clear(); + bufferedCount = 0; + } + + private void executeSQL(String sql) throws SQLException { + try { + stmt.executeUpdate(sql); + } catch (SQLException e) { + // convert to error code defined in taoserror.h + int errorCode = e.getErrorCode() & 0xffff; + if (errorCode == 0x362 || errorCode == 0x218) { + // Table does not exist + createTables(); + executeSQL(sql); + } else { + logger.error("Execute SQL: {}", sql); + throw e; + } + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + /** + * Create tables in batch using syntax: + *

+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; + *

+ */ + private void createTables() throws SQLException { + StringBuilder sb = new StringBuilder("CREATE TABLE "); + for (String tbName : tbValues.keySet()) { + String tagValues = tbTags.get(tbName); + sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" "); + } + String sql = sb.toString(); + try { + stmt.executeUpdate(sql); + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + public boolean hasBufferedValues() { + return bufferedCount > 0; + } + + public int getBufferedCount() { + return bufferedCount; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java @@ -0,0 +1,4 @@ +package com.taos.example.highvolume; + +public class StmtWriter { +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java new file mode 100644 index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.BlockingQueue; + +class WriteTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(WriteTask.class); + private final int maxBatchSize; + + // the queue from which this writing task get raw data. + private final BlockingQueue queue; + + // A flag indicate whether to continue. + private boolean active = true; + + public WriteTask(BlockingQueue taskQueue, int maxBatchSize) { + this.queue = taskQueue; + this.maxBatchSize = maxBatchSize; + } + + @Override + public void run() { + logger.info("started"); + String line = null; // data getting from the queue just now. + SQLWriter writer = new SQLWriter(maxBatchSize); + try { + writer.init(); + while (active) { + line = queue.poll(); + if (line != null) { + // parse raw data and buffer the data. + writer.processLine(line); + } else if (writer.hasBufferedValues()) { + // write data immediately if no more data in the queue + writer.flush(); + } else { + // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, . + Thread.sleep(100); + } + } + if (writer.hasBufferedValues()) { + writer.flush(); + } + } catch (Exception e) { + String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount()); + logger.error(msg, e); + } finally { + writer.close(); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/resources/highvolume.drawio b/docs/examples/java/src/main/resources/highvolume.drawio new file mode 100644 index 0000000000000000000000000000000000000000..410216061813d307b9e8cc289fe58df05c01e390 --- /dev/null +++ b/docs/examples/java/src/main/resources/highvolume.drawio @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/examples/java/src/main/resources/highvolume2.drawio b/docs/examples/java/src/main/resources/highvolume2.drawio new file mode 100644 index 0000000000000000000000000000000000000000..8c9ae090071d93574e98305d3c8e458539a6b50d --- /dev/null +++ b/docs/examples/java/src/main/resources/highvolume2.drawio @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/examples/java/src/main/resources/logback.xml b/docs/examples/java/src/main/resources/logback.xml new file mode 100644 index 0000000000000000000000000000000000000000..15c6d77de733f650f3f91cb2a3163a563dbcf90f --- /dev/null +++ b/docs/examples/java/src/main/resources/logback.xml @@ -0,0 +1,22 @@ + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + demo.log + true + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + \ No newline at end of file diff --git a/docs-examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java similarity index 100% rename from docs-examples/java/src/test/java/com/taos/test/TestAll.java rename to docs/examples/java/src/test/java/com/taos/test/TestAll.java diff --git a/docs-examples/node/.gitignore b/docs/examples/node/.gitignore similarity index 100% rename from docs-examples/node/.gitignore rename to docs/examples/node/.gitignore diff --git a/docs-examples/node/nativeexample/async_query_example.js b/docs/examples/node/nativeexample/async_query_example.js similarity index 100% rename from docs-examples/node/nativeexample/async_query_example.js rename to docs/examples/node/nativeexample/async_query_example.js diff --git a/docs-examples/node/nativeexample/connect.js b/docs/examples/node/nativeexample/connect.js similarity index 100% rename from docs-examples/node/nativeexample/connect.js rename to docs/examples/node/nativeexample/connect.js diff --git a/docs-examples/node/nativeexample/influxdb_line_example.js b/docs/examples/node/nativeexample/influxdb_line_example.js similarity index 100% rename from docs-examples/node/nativeexample/influxdb_line_example.js rename to docs/examples/node/nativeexample/influxdb_line_example.js diff --git a/docs-examples/node/nativeexample/insert_example.js b/docs/examples/node/nativeexample/insert_example.js similarity index 100% rename from docs-examples/node/nativeexample/insert_example.js rename to docs/examples/node/nativeexample/insert_example.js diff --git a/docs-examples/node/nativeexample/multi_bind_example.js b/docs/examples/node/nativeexample/multi_bind_example.js similarity index 100% rename from docs-examples/node/nativeexample/multi_bind_example.js rename to docs/examples/node/nativeexample/multi_bind_example.js diff --git a/docs-examples/node/nativeexample/opentsdb_json_example.js b/docs/examples/node/nativeexample/opentsdb_json_example.js similarity index 100% rename from docs-examples/node/nativeexample/opentsdb_json_example.js rename to docs/examples/node/nativeexample/opentsdb_json_example.js diff --git a/docs-examples/node/nativeexample/opentsdb_telnet_example.js b/docs/examples/node/nativeexample/opentsdb_telnet_example.js similarity index 100% rename from docs-examples/node/nativeexample/opentsdb_telnet_example.js rename to docs/examples/node/nativeexample/opentsdb_telnet_example.js diff --git a/docs-examples/node/nativeexample/param_bind_example.js b/docs/examples/node/nativeexample/param_bind_example.js similarity index 100% rename from docs-examples/node/nativeexample/param_bind_example.js rename to docs/examples/node/nativeexample/param_bind_example.js diff --git a/docs-examples/node/nativeexample/query_example.js b/docs/examples/node/nativeexample/query_example.js similarity index 100% rename from docs-examples/node/nativeexample/query_example.js rename to docs/examples/node/nativeexample/query_example.js diff --git a/docs-examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js similarity index 100% rename from docs-examples/node/nativeexample/subscribe_demo.js rename to docs/examples/node/nativeexample/subscribe_demo.js diff --git a/docs-examples/node/package.json b/docs/examples/node/package.json similarity index 100% rename from docs-examples/node/package.json rename to docs/examples/node/package.json diff --git a/docs-examples/node/restexample/connect.js b/docs/examples/node/restexample/connect.js similarity index 100% rename from docs-examples/node/restexample/connect.js rename to docs/examples/node/restexample/connect.js diff --git a/docs-examples/other/mock.js b/docs/examples/other/mock.js similarity index 100% rename from docs-examples/other/mock.js rename to docs/examples/other/mock.js diff --git a/docs-examples/php/connect.php b/docs/examples/php/connect.php similarity index 100% rename from docs-examples/php/connect.php rename to docs/examples/php/connect.php diff --git a/docs-examples/php/insert.php b/docs/examples/php/insert.php similarity index 100% rename from docs-examples/php/insert.php rename to docs/examples/php/insert.php diff --git a/docs-examples/php/insert_stmt.php b/docs/examples/php/insert_stmt.php similarity index 100% rename from docs-examples/php/insert_stmt.php rename to docs/examples/php/insert_stmt.php diff --git a/docs-examples/php/query.php b/docs/examples/php/query.php similarity index 100% rename from docs-examples/php/query.php rename to docs/examples/php/query.php diff --git a/docs-examples/python/.gitignore b/docs/examples/python/.gitignore similarity index 100% rename from docs-examples/python/.gitignore rename to docs/examples/python/.gitignore diff --git a/docs-examples/python/.gitkeep b/docs/examples/python/.gitkeep similarity index 100% rename from docs-examples/python/.gitkeep rename to docs/examples/python/.gitkeep diff --git a/docs-examples/python/async_query_example.py b/docs/examples/python/async_query_example.py similarity index 100% rename from docs-examples/python/async_query_example.py rename to docs/examples/python/async_query_example.py diff --git a/docs-examples/python/bind_param_example.py b/docs/examples/python/bind_param_example.py similarity index 100% rename from docs-examples/python/bind_param_example.py rename to docs/examples/python/bind_param_example.py diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5555cf6d328b37dbc92ba561222122917fe7e8 --- /dev/null +++ b/docs/examples/python/conn_native_pandas.py @@ -0,0 +1,19 @@ +import taos +import pandas + +conn = taos.connect() +df: pandas.DataFrame = pandas.read_sql("SELECT * FROM meters", conn) + +# print index +print(df.index) +# print data type of element in ts column +print(type(df.ts[0])) +print(df.head(3)) + +# output: +# RangeIndex(start=0, stop=8, step=1) +# +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs/examples/python/conn_native_sqlalchemy.py b/docs/examples/python/conn_native_sqlalchemy.py new file mode 100644 index 0000000000000000000000000000000000000000..40c68bda557920182b9e91767487cac0adefd095 --- /dev/null +++ b/docs/examples/python/conn_native_sqlalchemy.py @@ -0,0 +1,19 @@ +import pandas +from sqlalchemy import create_engine + +engine = create_engine("taos://root:taosdata@localhost:6030/power") +df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine) + +# print index +print(df.index) +# print data type of element in ts column +print(type(df.ts[0])) +print(df.head(3)) + +# output: +# RangeIndex(start=0, stop=8, step=1) +# +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..356f01c5c580db72cb160ec1d57ca329cb161ad5 --- /dev/null +++ b/docs/examples/python/conn_rest_pandas.py @@ -0,0 +1,19 @@ +import taosrest +import pandas + +conn = taosrest.connect() +df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", conn) + +# print index +print(df.index) +# print data type of element in ts column +print(type(df.ts[0])) +print(df.head(3)) + +# output: +# RangeIndex(start=0, stop=8, step=1) +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_sqlalchemy.py similarity index 100% rename from docs-examples/python/conn_rest_pandas.py rename to docs/examples/python/conn_rest_sqlalchemy.py diff --git a/docs-examples/python/connect_example.py b/docs/examples/python/connect_example.py similarity index 100% rename from docs-examples/python/connect_example.py rename to docs/examples/python/connect_example.py diff --git a/docs-examples/python/connect_native_reference.py b/docs/examples/python/connect_native_reference.py similarity index 100% rename from docs-examples/python/connect_native_reference.py rename to docs/examples/python/connect_native_reference.py diff --git a/docs-examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py similarity index 100% rename from docs-examples/python/connect_rest_examples.py rename to docs/examples/python/connect_rest_examples.py diff --git a/docs-examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py similarity index 100% rename from docs-examples/python/connection_usage_native_reference.py rename to docs/examples/python/connection_usage_native_reference.py diff --git a/docs-examples/python/cursor_usage_native_reference.py b/docs/examples/python/cursor_usage_native_reference.py similarity index 100% rename from docs-examples/python/cursor_usage_native_reference.py rename to docs/examples/python/cursor_usage_native_reference.py diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035 --- /dev/null +++ b/docs/examples/python/fast_write_example.py @@ -0,0 +1,180 @@ +# install dependencies: +# recommend python >= 3.8 +# pip3 install faster-fifo +# + +import logging +import math +import sys +import time +import os +from multiprocessing import Process +from faster_fifo import Queue +from mockdatasource import MockDataSource +from queue import Empty +from typing import List + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s") + +READ_TASK_COUNT = 1 +WRITE_TASK_COUNT = 1 +TABLE_COUNT = 1000 +QUEUE_SIZE = 1000000 +MAX_BATCH_SIZE = 3000 + +read_processes = [] +write_processes = [] + + +def get_connection(): + """ + If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used. + You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD + """ + import taos + firstEP = os.environ.get("TDENGINE_FIRST_EP") + if firstEP: + host, port = firstEP.split(":") + else: + host, port = None, 0 + user = os.environ.get("TDENGINE_USER", "root") + password = os.environ.get("TDENGINE_PASSWORD", "taosdata") + return taos.connect(host=host, port=int(port), user=user, password=password) + + +# ANCHOR: read + +def run_read_task(task_id: int, task_queues: List[Queue]): + table_count_per_task = TABLE_COUNT // READ_TASK_COUNT + data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + try: + for batch in data_source: + for table_id, rows in batch: + # hash data to different queue + i = table_id % len(task_queues) + # block putting forever when the queue is full + task_queues[i].put_many(rows, block=True, timeout=-1) + except KeyboardInterrupt: + pass + + +# ANCHOR_END: read + +# ANCHOR: write +def run_write_task(task_id: int, queue: Queue): + from sql_writer import SQLWriter + log = logging.getLogger(f"WriteTask-{task_id}") + writer = SQLWriter(get_connection) + lines = None + try: + while True: + try: + # get as many as possible + lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + writer.process_lines(lines) + except Empty: + time.sleep(0.01) + except KeyboardInterrupt: + pass + except BaseException as e: + log.debug(f"lines={lines}") + raise e + + +# ANCHOR_END: write + +def set_global_config(): + argc = len(sys.argv) + if argc > 1: + global READ_TASK_COUNT + READ_TASK_COUNT = int(sys.argv[1]) + if argc > 2: + global WRITE_TASK_COUNT + WRITE_TASK_COUNT = int(sys.argv[2]) + if argc > 3: + global TABLE_COUNT + TABLE_COUNT = int(sys.argv[3]) + if argc > 4: + global QUEUE_SIZE + QUEUE_SIZE = int(sys.argv[4]) + if argc > 5: + global MAX_BATCH_SIZE + MAX_BATCH_SIZE = int(sys.argv[5]) + + +# ANCHOR: monitor +def run_monitor_process(): + log = logging.getLogger("DataBaseMonitor") + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE test") + conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 + + last_count = 0 + while True: + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + + +# ANCHOR_END: monitor +# ANCHOR: main +def main(): + set_global_config() + logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " + f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") + + monitor_process = Process(target=run_monitor_process) + monitor_process.start() + time.sleep(3) # waiting for database ready. + + task_queues: List[Queue] = [] + # create task queues + for i in range(WRITE_TASK_COUNT): + queue = Queue(max_size_bytes=QUEUE_SIZE) + task_queues.append(queue) + + # create write processes + for i in range(WRITE_TASK_COUNT): + p = Process(target=run_write_task, args=(i, task_queues[i])) + p.start() + logging.debug(f"WriteTask-{i} started with pid {p.pid}") + write_processes.append(p) + + # create read processes + for i in range(READ_TASK_COUNT): + queues = assign_queues(i, task_queues) + p = Process(target=run_read_task, args=(i, queues)) + p.start() + logging.debug(f"ReadTask-{i} started with pid {p.pid}") + read_processes.append(p) + + try: + monitor_process.join() + except KeyboardInterrupt: + monitor_process.terminate() + [p.terminate() for p in read_processes] + [p.terminate() for p in write_processes] + [q.close() for q in task_queues] + + +def assign_queues(read_task_id, task_queues): + """ + Compute target queues for a specific read task. + """ + ratio = WRITE_TASK_COUNT / READ_TASK_COUNT + from_index = math.floor(read_task_id * ratio) + end_index = math.ceil((read_task_id + 1) * ratio) + return task_queues[from_index:end_index] + + +if __name__ == '__main__': + main() +# ANCHOR_END: main diff --git a/docs-examples/python/handle_exception.py b/docs/examples/python/handle_exception.py similarity index 100% rename from docs-examples/python/handle_exception.py rename to docs/examples/python/handle_exception.py diff --git a/docs/examples/python/highvolume_faster_queue.py b/docs/examples/python/highvolume_faster_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..14aebc67eee5a0701081f2f5da605184568c3a89 --- /dev/null +++ b/docs/examples/python/highvolume_faster_queue.py @@ -0,0 +1,205 @@ +# install dependencies: +# recommend python >= 3.8 +# pip3 install faster-fifo +# + +import logging +import sys +import time +import os +from multiprocessing import Process +from faster_fifo import Queue +from queue import Empty +from typing import List + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s") + +READ_TASK_COUNT = 1 +WRITE_TASK_COUNT = 1 +TABLE_COUNT = 1000 +QUEUE_SIZE = 1000000 +MAX_BATCH_SIZE = 3000 + +read_processes = [] +write_processes = [] + + +def get_connection(): + """ + If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used. + You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD + """ + import taos + firstEP = os.environ.get("TDENGINE_FIRST_EP") + if firstEP: + host, port = firstEP.split(":") + else: + host, port = None, 0 + user = os.environ.get("TDENGINE_USER", "root") + password = os.environ.get("TDENGINE_PASSWORD", "taosdata") + return taos.connect(host=host, port=int(port), user=user, password=password) + + +# ANCHOR: MockDataSource +class MockDataSource: + location = ["LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"] + current = [8.8, 10.7, 9.9, 8.9, 9.4] + voltage = [119, 116, 111, 113, 118] + phase = [0.32, 0.34, 0.33, 0.329, 0.141] + max_rows_per_table = 10 ** 9 + + def __init__(self, tb_name_prefix, table_count): + self.table_name_prefix = tb_name_prefix + self.table_count = table_count + self.start_ms = round(time.time() * 1000) - self.max_rows_per_table * 100 + + def __iter__(self): + self.row = 0 + self.table_id = -1 + return self + + def __next__(self): + """ + next 100 rows of current table + """ + self.table_id += 1 + if self.table_id == self.table_count: + self.table_id = 0 + if self.row >= self.max_rows_per_table: + raise StopIteration + rows = [] + + while len(rows) < 100: + self.row += 1 + ts = self.start_ms + 100 * self.row + group_id = self.table_id % 5 if self.table_id % 5 == 0 else self.table_id % 5 + 1 + tb_name = self.table_name_prefix + '_' + str(self.table_id) + ri = self.row % 5 + rows.append(f"{tb_name},{ts},{self.current[ri]},{self.voltage[ri]},{self.phase[ri]},{self.location[ri]},{group_id}") + return self.table_id, rows + + +# ANCHOR_END: MockDataSource + +# ANCHOR: read +def run_read_task(task_id: int, task_queues: List[Queue]): + table_count_per_task = TABLE_COUNT // READ_TASK_COUNT + data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + try: + for table_id, rows in data_source: + # hash data to different queue + i = table_id % len(task_queues) + # block putting forever when the queue is full + task_queues[i].put_many(rows, block=True, timeout=-1) + except KeyboardInterrupt: + pass + + +# ANCHOR_END: read + +# ANCHOR: write +def run_write_task(task_id: int, queue: Queue): + from sql_writer import SQLWriter + log = logging.getLogger(f"WriteTask-{task_id}") + writer = SQLWriter(get_connection) + lines = None + try: + while True: + try: + # get as many as possible + lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + writer.process_lines(lines) + except Empty: + time.sleep(0.01) + except KeyboardInterrupt: + pass + except BaseException as e: + log.debug(f"lines={lines}") + raise e + + +# ANCHOR_END: write + +def set_global_config(): + argc = len(sys.argv) + if argc > 1: + global READ_TASK_COUNT + READ_TASK_COUNT = int(sys.argv[1]) + if argc > 2: + global WRITE_TASK_COUNT + WRITE_TASK_COUNT = int(sys.argv[2]) + if argc > 3: + global TABLE_COUNT + TABLE_COUNT = int(sys.argv[3]) + if argc > 4: + global QUEUE_SIZE + QUEUE_SIZE = int(sys.argv[4]) + if argc > 5: + global MAX_BATCH_SIZE + MAX_BATCH_SIZE = int(sys.argv[5]) + + +# ANCHOR: monitor +def run_monitor_process(): + import taos + log = logging.getLogger("DataBaseMonitor") + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE test") + conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 + + last_count = 0 + while True: + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + + +# ANCHOR_END: monitor +# ANCHOR: main +def main(): + set_global_config() + logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " + f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") + + monitor_process = Process(target=run_monitor_process) + monitor_process.start() + time.sleep(3) # waiting for database ready. + + task_queues: List[Queue] = [] + # create task queues + for i in range(WRITE_TASK_COUNT): + queue = Queue(max_size_bytes=QUEUE_SIZE) + task_queues.append(queue) + # create write processes + for i in range(WRITE_TASK_COUNT): + p = Process(target=run_write_task, args=(i, task_queues[i])) + p.start() + logging.debug(f"WriteTask-{i} started with pid {p.pid}") + write_processes.append(p) + # create read processes + for i in range(READ_TASK_COUNT): + p = Process(target=run_read_task, args=(i, task_queues)) + p.start() + logging.debug(f"ReadTask-{i} started with pid {p.pid}") + read_processes.append(p) + + try: + monitor_process.join() + except KeyboardInterrupt: + monitor_process.terminate() + [p.terminate() for p in read_processes] + [p.terminate() for p in write_processes] + [q.close() for q in task_queues] + + +if __name__ == '__main__': + main() +# ANCHOR_END: main diff --git a/docs-examples/python/json_protocol_example.py b/docs/examples/python/json_protocol_example.py similarity index 100% rename from docs-examples/python/json_protocol_example.py rename to docs/examples/python/json_protocol_example.py diff --git a/docs-examples/python/line_protocol_example.py b/docs/examples/python/line_protocol_example.py similarity index 100% rename from docs-examples/python/line_protocol_example.py rename to docs/examples/python/line_protocol_example.py diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py new file mode 100644 index 0000000000000000000000000000000000000000..852860aec0adc8f9b043c9dcd5deb0bf00239201 --- /dev/null +++ b/docs/examples/python/mockdatasource.py @@ -0,0 +1,49 @@ +import time + + +class MockDataSource: + samples = [ + "8.8,119,0.32,LosAngeles,0", + "10.7,116,0.34,SanDiego,1", + "9.9,111,0.33,Hollywood,2", + "8.9,113,0.329,Compton,3", + "9.4,118,0.141,San Francisco,4" + ] + + def __init__(self, tb_name_prefix, table_count): + self.table_name_prefix = tb_name_prefix + "_" + self.table_count = table_count + self.max_rows = 10000000 + self.current_ts = round(time.time() * 1000) - self.max_rows * 100 + # [(tableId, tableName, values),] + self.data = self._init_data() + + def _init_data(self): + lines = self.samples * (self.table_count // 5 + 1) + data = [] + for i in range(self.table_count): + table_name = self.table_name_prefix + str(i) + data.append((i, table_name, lines[i])) # tableId, row + return data + + def __iter__(self): + self.row = 0 + return self + + def __next__(self): + """ + next 1000 rows for each table. + return: {tableId:[row,...]} + """ + # generate 1000 timestamps + ts = [] + for _ in range(1000): + self.current_ts += 100 + ts.append(str(self.current_ts)) + # add timestamp to each row + # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])] + result = [] + for table_id, table_name, values in self.data: + rows = [table_name + ',' + t + ',' + values for t in ts] + result.append((table_id, rows)) + return result diff --git a/docs-examples/python/multi_bind_example.py b/docs/examples/python/multi_bind_example.py similarity index 100% rename from docs-examples/python/multi_bind_example.py rename to docs/examples/python/multi_bind_example.py diff --git a/docs-examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py similarity index 100% rename from docs-examples/python/native_insert_example.py rename to docs/examples/python/native_insert_example.py diff --git a/docs-examples/python/query_example.py b/docs/examples/python/query_example.py similarity index 100% rename from docs-examples/python/query_example.py rename to docs/examples/python/query_example.py diff --git a/docs-examples/python/rest_client_example.py b/docs/examples/python/rest_client_example.py similarity index 100% rename from docs-examples/python/rest_client_example.py rename to docs/examples/python/rest_client_example.py diff --git a/docs-examples/python/result_set_examples.py b/docs/examples/python/result_set_examples.py similarity index 100% rename from docs-examples/python/result_set_examples.py rename to docs/examples/python/result_set_examples.py diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..cb04f85c239af7c4801e2a5ef0483a88b21245ef --- /dev/null +++ b/docs/examples/python/sql_writer.py @@ -0,0 +1,90 @@ +import logging +import taos + + +class SQLWriter: + log = logging.getLogger("SQLWriter") + + def __init__(self, get_connection_func): + self._tb_values = {} + self._tb_tags = {} + self._conn = get_connection_func() + self._max_sql_length = self.get_max_sql_length() + self._conn.execute("USE test") + + def get_max_sql_length(self): + rows = self._conn.query("SHOW variables").fetch_all() + for r in rows: + name = r[0] + if name == "maxSQLLength": + return int(r[1]) + return 1024 * 1024 + + def process_lines(self, lines: str): + """ + :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] + """ + for line in lines: + ps = line.split(",") + table_name = ps[0] + value = '(' + ",".join(ps[1:-2]) + ') ' + if table_name in self._tb_values: + self._tb_values[table_name] += value + else: + self._tb_values[table_name] = value + + if table_name not in self._tb_tags: + location = ps[-2] + group_id = ps[-1] + tag_value = f"('{location}',{group_id})" + self._tb_tags[table_name] = tag_value + self.flush() + + def flush(self): + """ + Assemble INSERT statement and execute it. + When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created. + In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed. + """ + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + for tb_name, values in self._tb_values.items(): + q = tb_name + " VALUES " + values + if sql_len + len(q) >= self._max_sql_length: + sql += " ".join(buf) + self.execute_sql(sql) + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + buf.append(q) + sql_len += len(q) + sql += " ".join(buf) + self.execute_sql(sql) + self._tb_values.clear() + + def execute_sql(self, sql): + try: + self._conn.execute(sql) + except taos.Error as e: + error_code = e.errno & 0xffff + # Table does not exit + if error_code == 0x362 or error_code == 0x218: + self.create_tables() + else: + self.log.error("Execute SQL: %s", sql) + raise e + except BaseException as baseException: + self.log.error("Execute SQL: %s", sql) + raise baseException + + def create_tables(self): + sql = "CREATE TABLE " + for tb in self._tb_values.keys(): + tag_values = self._tb_tags[tb] + sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " " + try: + self._conn.execute(sql) + except BaseException as e: + self.log.error("Execute SQL: %s", sql) + raise e diff --git a/docs/examples/python/stmt_writer.py b/docs/examples/python/stmt_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..60846b5a6491491655905008b58e6411818720fb --- /dev/null +++ b/docs/examples/python/stmt_writer.py @@ -0,0 +1,2 @@ +class StmtWriter: + pass diff --git a/docs-examples/python/subscribe_demo.py b/docs/examples/python/subscribe_demo.py similarity index 100% rename from docs-examples/python/subscribe_demo.py rename to docs/examples/python/subscribe_demo.py diff --git a/docs-examples/python/telnet_line_protocol_example.py b/docs/examples/python/telnet_line_protocol_example.py similarity index 100% rename from docs-examples/python/telnet_line_protocol_example.py rename to docs/examples/python/telnet_line_protocol_example.py diff --git a/docs-examples/rust/Cargo.toml b/docs/examples/rust/Cargo.toml similarity index 100% rename from docs-examples/rust/Cargo.toml rename to docs/examples/rust/Cargo.toml diff --git a/docs-examples/rust/nativeexample/Cargo.toml b/docs/examples/rust/nativeexample/Cargo.toml similarity index 100% rename from docs-examples/rust/nativeexample/Cargo.toml rename to docs/examples/rust/nativeexample/Cargo.toml diff --git a/docs-examples/rust/nativeexample/examples/connect.rs b/docs/examples/rust/nativeexample/examples/connect.rs similarity index 100% rename from docs-examples/rust/nativeexample/examples/connect.rs rename to docs/examples/rust/nativeexample/examples/connect.rs diff --git a/docs-examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs similarity index 100% rename from docs-examples/rust/nativeexample/examples/stmt_example.rs rename to docs/examples/rust/nativeexample/examples/stmt_example.rs diff --git a/docs-examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs similarity index 100% rename from docs-examples/rust/nativeexample/examples/subscribe_demo.rs rename to docs/examples/rust/nativeexample/examples/subscribe_demo.rs diff --git a/docs-examples/rust/nativeexample/src/main.rs b/docs/examples/rust/nativeexample/src/main.rs similarity index 100% rename from docs-examples/rust/nativeexample/src/main.rs rename to docs/examples/rust/nativeexample/src/main.rs diff --git a/docs-examples/rust/restexample/Cargo.toml b/docs/examples/rust/restexample/Cargo.toml similarity index 100% rename from docs-examples/rust/restexample/Cargo.toml rename to docs/examples/rust/restexample/Cargo.toml diff --git a/docs-examples/rust/restexample/examples/connect.rs b/docs/examples/rust/restexample/examples/connect.rs similarity index 100% rename from docs-examples/rust/restexample/examples/connect.rs rename to docs/examples/rust/restexample/examples/connect.rs diff --git a/docs-examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs similarity index 100% rename from docs-examples/rust/restexample/examples/insert_example.rs rename to docs/examples/rust/restexample/examples/insert_example.rs diff --git a/docs-examples/rust/restexample/examples/query_example.rs b/docs/examples/rust/restexample/examples/query_example.rs similarity index 100% rename from docs-examples/rust/restexample/examples/query_example.rs rename to docs/examples/rust/restexample/examples/query_example.rs diff --git a/docs-examples/rust/restexample/src/main.rs b/docs/examples/rust/restexample/src/main.rs similarity index 100% rename from docs-examples/rust/restexample/src/main.rs rename to docs/examples/rust/restexample/src/main.rs diff --git a/docs-examples/rust/schemalessexample/Cargo.toml b/docs/examples/rust/schemalessexample/Cargo.toml similarity index 100% rename from docs-examples/rust/schemalessexample/Cargo.toml rename to docs/examples/rust/schemalessexample/Cargo.toml diff --git a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs similarity index 100% rename from docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs rename to docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs similarity index 100% rename from docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs rename to docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs similarity index 100% rename from docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs rename to docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs diff --git a/docs-examples/rust/schemalessexample/src/main.rs b/docs/examples/rust/schemalessexample/src/main.rs similarity index 100% rename from docs-examples/rust/schemalessexample/src/main.rs rename to docs/examples/rust/schemalessexample/src/main.rs diff --git a/docs-cn/01-index.md b/docs/zh/01-index.md similarity index 100% rename from docs-cn/01-index.md rename to docs/zh/01-index.md diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md new file mode 100644 index 0000000000000000000000000000000000000000..607cef719572f25d7200c7eb480b0a31257094f8 --- /dev/null +++ b/docs/zh/02-intro.md @@ -0,0 +1,124 @@ +--- +title: 产品简介 +toc_max_heading_level: 2 +--- + +TDengine 是一款高性能、分布式、支持 SQL 的时序数据库 (Database),其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库 (Database) 功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/subscribe)、[流式计算](../develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 + +本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。 + +## 主要功能 + +TDengine的主要功能如下: + +1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入; +2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入; +3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等 +4. 支持[用户自定义函数](../develop/udf) +5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis +6. 支持[连续查询](../develop/continuous-query)(Continuous Query) +7. 支持[数据订阅](../develop/subscribe),而且可以指定过滤条件 +8. 支持[集群](../cluster/),可以通过多节点进行水平扩展,并通过多副本实现高可靠 +9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询 +10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export) +11. 支持对[TDengine 集群本身的监控](/operation/monitor) +12. 提供各种语言的[连接器](../reference/connector): C/C++, Java, Go, Python, Rust, Node.JS, C# +13. 支持 [REST 接口](../reference/rest-api/) +14. 支持与[ Grafana 无缝集成](../third-party/grafana) +15. 支持与 Google Data Studio 无缝集成 + +更多细小的功能,请阅读整个文档。 + +## 竞争优势 + +由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点: + +- **[高性能](https://www.taosdata.com/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,而且存储空间也大为节省。 + +- **[分布式](https://www.taosdata.com/scalable)**:通过原生分布式的设计,TDengine 提供了水平扩展的能力,只需要增加节点就能获得更强的数据处理能力,同时通过多副本机制保证了系统的高可用。 + +- **[支持 SQL](https://www.taosdata.com/sql-support)**:TDengine 采用 SQL 作为数据查询语言,减少学习和迁移成本,同时提供 SQL 扩展来处理时序数据特有的分析,而且支持方便灵活的 schemaless 数据写入。 + +- **All in One**:将数据库、消息队列、缓存、流式计算等功能融合一起,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低应用开发和维护成本。 + +- **零管理**:安装、集群几秒搞定,无任何依赖,不用分库分表,系统运行状态监测能与 Grafana 或其他运维工具无缝集成。 + +- **零学习成本**:采用 SQL 查询语言,支持 C/C++、Python、Java、Go、Rust、Node.js、C#、Lua(社区贡献)、PHP(社区贡献) 等多种编程语言,与 MySQL 相似,零学习成本。 + +- **无缝集成**:不用一行代码,即可与 Telegraf、Grafana、Prometheus、EMQX、HiveMQ、StatsD、collectd、icinga、TCollector、Matlab、R 等第三方工具无缝集成。 + +- **互动 Console**: 通过命令行 console,不用编程,执行 SQL 语句就能做即席查询、各种数据库的操作、管理以及集群的维护. + +采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面: + +1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低 +2. 因为采用 SQL 接口,能与众多第三方软件无缝集成,学习迁移成本大幅下降 +3. 因为其 All In One 的特性,系统复杂度降低,能降研发成本 +4. 因为运维维护简单,运营维护成本能大幅降低 + +## 技术生态 + +在整个时序大数据平台中,TDengine 在其中扮演的角色如下: + +
+ +![TDengine Database 技术生态图](eco_system.webp) + +
+
图 1. TDengine技术生态图
+ +上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。 + +## 总体适用场景 + +作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。 + +### 数据源特点和需求 + +从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。 + +| 数据源特点和需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| ---------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------------- | +| 总体数据量巨大 | | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。 | +| 数据输入速度偶尔或者持续巨大 | | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。 | +| 数据源数目巨大 | | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。 | + +### 系统架构要求 + +| 系统架构要求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| ---------------------- | ------ | -------- | -------- | ----------------------------------------------------------------------------------------------------- | +| 要求简单可靠的系统架构 | | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。 | +| 要求容错和高可靠 | | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。 | +| 标准化规范 | | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。 | + +### 系统功能需求 + +| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- | +| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 | +| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 | + +### 系统性能需求 + +| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ | +| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 | +| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 | +| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 | + +### 系统维护需求 + +| 系统维护需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| ---------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- | +| 要求系统可靠运行 | | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。 | +| 要求运维学习成本可控 | | | √ | 同上。 | +| 要求市场有大量人才储备 | √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。 | + +## 与其他数据库的对比测试 + +- [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) +- [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) +- [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) +- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html) +- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html) +- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) diff --git a/docs-cn/04-concept/_category_.yml b/docs/zh/04-concept/_category_.yml similarity index 100% rename from docs-cn/04-concept/_category_.yml rename to docs/zh/04-concept/_category_.yml diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md new file mode 100644 index 0000000000000000000000000000000000000000..0a0e4a3a2f251a5316f95c5dbb071215d0af35db --- /dev/null +++ b/docs/zh/04-concept/index.md @@ -0,0 +1,173 @@ +--- +title: 数据模型和基本概念 +--- + +为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格: + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Device IDTime StampCollected MetricsTags
Device IDTime StampcurrentvoltagephaselocationgroupId
d1001153854868500010.32190.31California.SanFrancisco2
d1002153854868400010.22200.23California.SanFrancisco3
d1003153854868650011.52210.35California.LosAngeles3
d1004153854868550013.42230.29California.LosAngeles2
d1001153854869500012.62180.33California.SanFrancisco2
d1004153854869660011.82210.28California.LosAngeles2
d1002153854869665010.32180.25California.SanFrancisco3
d1001153854869680012.32210.31California.SanFrancisco2
+表 1:智能电表数据示例 +
+ +每一条记录都有设备 ID,时间戳,采集的物理量以及每个设备相关的静态标签。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。 + +## 采集量 (Metric) + +采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。 + +## 标签 (Label/Tag) + +标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。 + +## 数据采集点 (Data Collection Point) + +数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。 + +## 表 (Table) + +因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。 + +为充分利用其数据的时序性和其他数据特点,TDengine 采取**一个数据采集点一张表**的策略,要求对每个数据采集点单独建表(比如有一千万个智能电表,就需创建一千万张表,上述表格中的 d1001,d1002,d1003,d1004 都需单独建表),用来存储这个数据采集点所采集的时序数据。这种设计有几大优点: + +1. 由于不同数据采集点产生数据的过程完全独立,每个数据采集点的数据源是唯一的,一张表也就只有一个写入者,这样就可采用无锁方式来写,写入速度就能大幅提升。 +2. 对于一个数据采集点而言,其产生的数据是按照时间排序的,因此写的操作可用追加的方式实现,进一步大幅提高数据写入速度。 +3. 一个数据采集点的数据是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。 +4. 一个数据块内部,采用列式存储,对于不同数据类型,采用不同压缩算法,而且由于一个数据采集点的采集量的变化是缓慢的,压缩率更高。 + +如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。** + +TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 current,voltage,phase),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。 + +对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。 + +## 超级表 (STable) + +由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。 + +超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。 + +在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。 + +## 子表 (Subtable) + +当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于: + +1. 子表就是表,因此所有正常表的SQL操作都可以在子表上执行。 +2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。 +3. 子表一定属于一张超级表,但普通表不属于任何超级表 +4. 普通表无法转为子表,子表也无法转为普通表。 + +超级表与基于超级表建立的子表之间的关系表现在: + +1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。 +2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。 +3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。 + +查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。 + +TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。 + +## 库 (database) + +库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。 + +一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。 + +## FQDN & End Point + +FQDN (fully qualified domain name, 完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。 + +TDengine 集群的每个节点是由 End Point 来唯一标识的,End Point 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。 + +TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 diff --git a/docs-cn/05-get-started/_apt_get_install.mdx b/docs/zh/05-get-started/_apt_get_install.mdx similarity index 100% rename from docs-cn/05-get-started/_apt_get_install.mdx rename to docs/zh/05-get-started/_apt_get_install.mdx diff --git a/docs-cn/05-get-started/_category_.yml b/docs/zh/05-get-started/_category_.yml similarity index 100% rename from docs-cn/05-get-started/_category_.yml rename to docs/zh/05-get-started/_category_.yml diff --git a/docs/zh/05-get-started/_pkg_install.mdx b/docs/zh/05-get-started/_pkg_install.mdx new file mode 100644 index 0000000000000000000000000000000000000000..85d6d7b6ede9b3bc126c6f3b3680ad76b5cd1241 --- /dev/null +++ b/docs/zh/05-get-started/_pkg_install.mdx @@ -0,0 +1,17 @@ +import PkgList from "/components/PkgList"; + +TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 + +为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 + +在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 + +发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: + + + +具体的安装方法,请参见[安装包的安装和卸载](../13-operation/01-pkg-install.md)。 + +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads)。 + +查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases)。 diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md new file mode 100644 index 0000000000000000000000000000000000000000..272a231ed714305ce27525229333f8049e1ff450 --- /dev/null +++ b/docs/zh/05-get-started/index.md @@ -0,0 +1,173 @@ +--- +title: 立即开始 +description: '从 Docker,安装包或使用 apt-get 快速安装 TDengine, 通过命令行程序TDengine CLI和工具 taosdemo 快速体验 TDengine 功能' +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import PkgInstall from "./\_pkg_install.mdx"; +import AptGetInstall from "./\_apt_get_install.mdx"; + +## 安装 + +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前 2.X 版服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../reference/rest-api)。但在 2.4 之前的版本中没有 taosAdapter,RESTful 接口是由 taosd 内置的 HTTP 服务提供的。 + +TDengine 支持 X64/ARM64/MIPS64/Alpha64 硬件平台,后续将支持 ARM32、RISC-V 等 CPU 架构。 + + + +如果已经安装了 docker, 只需执行下面的命令。 + +```shell +docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +``` + +确定该容器已经启动并且在正常运行 + +```shell +docker ps +``` + +进入该容器并执行 bash + +```shell +docker exec -it bash +``` + +然后就可以执行相关的 Linux 命令操作和访问 TDengine + +详细操作方法请参照 [通过 Docker 快速体验 TDengine](../train-faq/docker)。 + +:::info +从 2.4.0.10 开始,除 taosd 以外,Docker 镜像还包含:taos、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码。启动 Docker 容器时,将同时启动 taosAdapter 和 taosd,实现对 RESTful 的支持。 + +::: + + + + + + + + + + +如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. + +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/cn/all-downloads/)。 + + + + +## 启动 + +安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。 + +```bash +systemctl start taosd +``` + +检查服务是否正常工作: + +```bash +systemctl status taosd +``` + +如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。 + +:::info + +- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。 +- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting,将其设为 0,就可将其关闭。 +- TDengine 采用 FQDN(一般就是 hostname)作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 FQDN,在 TDengine CLI 或应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。 +- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。 + +TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包: + +```bash +which systemctl +``` + +如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。 + +:::note + +## TDengine 命令行 (CLI) + +为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。 + +```bash +taos +``` + +如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下: + +```cmd +taos> +``` + +在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: + +```sql +create database demo; +use demo; +create table t (ts timestamp, speed int); +insert into t values ('2019-07-15 00:00:00', 10); +insert into t values ('2019-07-15 01:00:00', 20); +select * from t; + ts | speed | +======================================== + 2019-07-15 00:00:00.000 | 10 | + 2019-07-15 01:00:00.000 | 20 | +Query OK, 2 row(s) in set (0.003128s) +``` + +除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/) + +## 使用 taosBenchmark 体验写入速度 + +启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`): + +```bash +taosBenchmark +``` + +该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。 + +这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 + +taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 + +## 使用 TDengine CLI 体验查询速度 + +使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。 + +查询超级表下记录总条数: + +```sql +taos> select count(*) from test.meters; +``` + +查询 1 亿条记录的平均值、最大值、最小值等: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.meters; +``` + +查询 location="San Francisco" 的记录总条数: + +```sql +taos> select count(*) from test.meters where location="San Francisco"; +``` + +查询 groupId=10 的所有记录的平均值、最大值、最小值等: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; +``` + +对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: + +```sql +taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); +``` diff --git a/docs-cn/07-develop/01-connect/_category_.yml b/docs/zh/07-develop/01-connect/_category_.yml similarity index 100% rename from docs-cn/07-develop/01-connect/_category_.yml rename to docs/zh/07-develop/01-connect/_category_.yml diff --git a/docs/zh/07-develop/01-connect/_connect_c.mdx b/docs/zh/07-develop/01-connect/_connect_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1b145538dc9ec77a91d3a20786521a8922cafc66 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_c.mdx @@ -0,0 +1,3 @@ +```c title="原生连接" +{{#include docs/examples/c/connect_example.c}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_cs.mdx b/docs/zh/07-develop/01-connect/_connect_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..13b8a5dff250e6143fbed3090ba1f35e74adb9a0 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_cs.mdx @@ -0,0 +1,8 @@ +```csharp title="原生连接" +{{#include docs/examples/csharp/ConnectExample.cs}} +``` + +:::info +C# 连接器目前只支持原生连接。 + +::: diff --git a/docs/zh/07-develop/01-connect/_connect_go.mdx b/docs/zh/07-develop/01-connect/_connect_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d69720496df86436153a7b969c0125235b2d93b0 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_go.mdx @@ -0,0 +1,17 @@ +#### 使用数据库访问统一接口 + +```go title="原生连接" +{{#include docs/examples/go/connect/cgoexample/main.go}} +``` + +```go title="REST 连接" +{{#include docs/examples/go/connect/restexample/main.go}} +``` + +#### 使用高级封装 + +也可以使用 driver-go 的 af 包建立连接。这个模块封装了 TDengine 的高级功能, 如:参数绑定、订阅等。 + +```go title="使用 af 包建立原生连接" +{{#include docs/examples/go/connect/afconn/main.go}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_java.mdx b/docs/zh/07-develop/01-connect/_connect_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..124b61182c98807012333b8df60ed64fef783b42 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_java.mdx @@ -0,0 +1,15 @@ +```java title="原生连接" +{{#include docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java}} +``` + +```java title="REST 连接" +{{#include docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java:main}} +``` + +使用 REST 连接时,如果查询数据量比较大,还可开启批量拉取功能。 + +```java title="开启批量拉取功能" {4} +{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} +``` + +更多连接参数配置,参考[Java 连接器](../../reference/connector/java) diff --git a/docs/zh/07-develop/01-connect/_connect_node.mdx b/docs/zh/07-develop/01-connect/_connect_node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3a856d40229513c8fc890aab4abe36dfdf15382e --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_node.mdx @@ -0,0 +1,7 @@ +```js title="原生连接" +{{#include docs/examples/node/nativeexample/connect.js}} +``` + +```js title="REST 连接" +{{#include docs/examples/node/restexample/connect.js}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_php.mdx b/docs/zh/07-develop/01-connect/_connect_php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dbad72bc1988bd5336f1da132dd9e6ba9b8020e6 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_php.mdx @@ -0,0 +1,3 @@ +```php title="原生连接" +{{#include docs/examples/php/connect.php}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_python.mdx b/docs/zh/07-develop/01-connect/_connect_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b331f4648c60fdcf354ea6e4440c9d716040a609 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_python.mdx @@ -0,0 +1,3 @@ +```python title="原生连接" +{{#include docs/examples/python/connect_example.py}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_r.mdx b/docs/zh/07-develop/01-connect/_connect_r.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ba72dc848c5393340f1041c83f2fe3581db5a5d3 --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_r.mdx @@ -0,0 +1,3 @@ +```r title="原生连接" +{{#include docs/examples/R/connect_native.r:demo}} +``` diff --git a/docs/zh/07-develop/01-connect/_connect_rust.mdx b/docs/zh/07-develop/01-connect/_connect_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..25f178a285f63230ad4e0af4c5de1bae638e77da --- /dev/null +++ b/docs/zh/07-develop/01-connect/_connect_rust.mdx @@ -0,0 +1,8 @@ +```rust title="原生连接/REST 连接" +{{#include docs/examples/rust/nativeexample/examples/connect.rs}} +``` + +:::note +对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "rest" 特性,那么只有 RESTful 的实现会被编译进来。 + +::: diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md new file mode 100644 index 0000000000000000000000000000000000000000..5448e6ba1a5f2ae3c233adcec5c03f6320b69b85 --- /dev/null +++ b/docs/zh/07-develop/01-connect/index.md @@ -0,0 +1,280 @@ +--- +title: 建立连接 +description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。" +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import ConnJava from "./_connect_java.mdx"; +import ConnGo from "./_connect_go.mdx"; +import ConnRust from "./_connect_rust.mdx"; +import ConnNode from "./_connect_node.mdx"; +import ConnPythonNative from "./_connect_python.mdx"; +import ConnCSNative from "./_connect_cs.mdx"; +import ConnC from "./_connect_c.mdx"; +import ConnR from "./_connect_r.mdx"; +import ConnPHP from "./_connect_php.mdx"; +import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx"; +import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx"; +import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; +import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; + +TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua(社区贡献)和 PHP (社区贡献)的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 + +## 连接器建立连接的方式 + +连接器建立连接的方式,TDengine 提供两种: + +1. 通过 taosAdapter 组件提供的 REST API 建立与 taosd 的连接,这种连接方式下文中简称“REST 连接” +2. 通过客户端驱动程序 taosc 直接与服务端程序 taosd 建立连接,这种连接方式下文中简称“原生连接”。 + +无论使用何种方式建立连接,连接器都提供了相同或相似的 API 操作数据库,都可以执行 SQL 语句,只是初始化连接的方式稍有不同,用户在使用上不会感到什么差别。 + +关键不同点在于: + +1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 +2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../reference/connector/cpp#参数绑定-api)、[订阅](../../reference/connector/cpp#订阅和消费-api)等等。 + +## 安装客户端驱动 taosc + +如果选择原生连接,而且应用程序不在 TDengine 同一台服务器上运行,你需要先安装客户端驱动,否则可以跳过此一步。为避免客户端驱动和服务端不兼容,请使用一致的版本。 + +### 安装步骤 + + + + + + + + + + +### 安装验证 + +以上安装和配置完成后,并确认 TDengine 服务已经正常启动运行,此时可以执行安装包里带有的 TDengine 命令行程序 taos 进行登录。 + + + + + + + + + + +## 安装连接器 + + + + +如果使用 maven 管理项目,只需在 pom.xml 中加入以下依赖。 + +```xml + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.38 + +``` + + + + +使用 `pip` 从 PyPI 安装: + +``` +pip install taospy +``` + +从 Git URL 安装: + +``` +pip install git+https://github.com/taosdata/taos-connector-python.git +``` + + + + +编辑 `go.mod` 添加 `driver-go` 依赖即可。 + +```go-mod title=go.mod +{{#include docs/examples/go/go.mod}} +``` + +:::note +driver-go 使用 cgo 封装了 taosc 的 API。cgo 需要使用 gcc 编译 C 的源码。因此需要确保你的系统上有 gcc。 + +::: + + + + +编辑 `Cargo.toml` 添加 `libtaos` 依赖即可。 + +```toml title=Cargo.toml +[dependencies] +libtaos = { version = "0.4.2"} +``` + +:::info +Rust 连接器通过不同的特性区分不同的连接方式。如果要建立 REST 连接,需要开启 `rest` 特性: + +```toml +libtaos = { version = "*", features = ["rest"] } +``` + +::: + + + + +Node.js 连接器通过不同的包提供不同的连接方式。 + +1. 安装 Node.js 原生连接器 + + ``` + npm i td2.0-connector + ``` + +:::note +推荐 Node 版本大于等于 `node-v12.8.0` 小于 `node-v13.0.0` +::: + +2. 安装 Node.js REST 连接器 + + ``` + npm i td2.0-rest-connector + ``` + + + + +编辑项目配置文件中添加 [TDengine.Connector](https://www.nuget.org/packages/TDengine.Connector/) 的引用即可: + +```xml title=csharp.csproj {12} + + + + Exe + net6.0 + enable + enable + TDengineExample.AsyncQueryExample + + + + + + + +``` + +也可通过 dotnet 命令添加: + +``` +dotnet add package TDengine.Connector +``` + +:::note +以下示例代码,均基于 dotnet6.0,如果使用其它版本,可能需要做适当调整。 + +::: + + + + +1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/)。 +2. 安装 R 的依赖包`RJDBC`: + +```R +install.packages("RJDBC") +``` + + + + +如果已经安装了 TDengine 服务端软件或 TDengine 客户端驱动 taosc, 那么已经安装了 C 连接器,无需额外操作。 +
+ +
+ + +**下载代码并解压:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> 版本 `v1.0.2` 只是示例,可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases) 中查看可用版本。 + +**非 Swoole 环境:** + +```shell +phpize && ./configure && make -j && make install +``` + +**手动指定 TDengine 目录:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` 后跟上 TDengine 目录。 +> 适用于默认找不到的情况,或者 macOS 系统用户。 + +**Swoole 环境:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**启用扩展:** + +方法一:在 `php.ini` 中加入 `extension=tdengine` + +方法二:运行带参数 `php -d extension=tdengine test.php` + + +
+ +## 建立连接 + +在执行这一步之前,请确保有一个正在运行的,且可以访问到的 TDengine,而且服务端的 FQDN 配置正确。以下示例代码,都假设 TDengine 安装在本机,且 FQDN(默认 localhost) 和 serverPort(默认 6030) 都使用默认配置。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +:::tip +如果建立连接失败,大部分情况下是 FQDN 或防火墙的配置不正确,详细的排查方法请看[《常见问题及反馈》](https://docs.taosdata.com/train-faq/faq)中的“遇到错误 Unable to establish connection, 我怎么办?” + +::: diff --git a/docs-cn/07-develop/02-model/_category_.yml b/docs/zh/07-develop/02-model/_category_.yml similarity index 100% rename from docs-cn/07-develop/02-model/_category_.yml rename to docs/zh/07-develop/02-model/_category_.yml diff --git a/docs-cn/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx similarity index 100% rename from docs-cn/07-develop/02-model/index.mdx rename to docs/zh/07-develop/02-model/index.mdx diff --git a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx similarity index 100% rename from docs-cn/07-develop/03-insert-data/01-sql-writing.mdx rename to docs/zh/07-develop/03-insert-data/01-sql-writing.mdx diff --git a/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx similarity index 100% rename from docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx rename to docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx diff --git a/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx similarity index 100% rename from docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx rename to docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx diff --git a/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx similarity index 100% rename from docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx rename to docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md new file mode 100644 index 0000000000000000000000000000000000000000..b8647b6ad71b2c40d307061b369dd9565dfdf471 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md @@ -0,0 +1,440 @@ +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# 高效写入 + +本节介绍如何高效地向 TDengine 写入数据。 + +## 高效写入原理 {#principle} + +### 客户端程序的角度 {#application-view} + +从客户端程序的角度来说,高效写入数据要考虑以下几个因素: + +1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。可通过配置客户端参数 maxSQLLength(默认值为 65480)进行修改。 +2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。 +3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效; +4. 写入方式。一般来讲: + - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。 + - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在 + - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构 + +客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。 + +### 数据源的角度 {#datasource-view} + +客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列: + +1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。 +2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。 +3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。 + +如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如: + +1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性 +2. 通过订阅多个 Topic 实现数据汇聚 +3. 通过增加 Consumer 线程数增加写入的并发度 +4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量 + +### 服务器配置的角度 {#setting-view} + +从服务器配置的角度来说,也有很多优化写入性能的方法。 + +如果总表数不多(远小于核数乘以1000), 且无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那么很可能是因为表在各个 vgroup 分布不均。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么所有的表都会分布在 1 个 vgroup 上。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则可将表分布至 10 个 vgroup。(假设 maxVgroupsPerDb 大于等于 10)。 + +如果总表数比较大(比如大于500万),适当增加 maxVgroupsPerDb 也能显著提高建表的速度。maxVgroupsPerDb 默认值为 0, 自动配置为 CPU 的核数。 如果表的数量巨大,也建议调节 maxTablesPerVnode 参数,以免超过单个 vnode 建表的上限。 + +更多调优参数,请参考[性能优化](../../../operation/optimize)和[配置参考](../../../reference/config)部分。 + +## 高效写入示例 {#sample-code} + +### 场景设计 {#scenario} + +下面的示例程序展示了如何高效写入数据,场景设计如下: + +- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源 +- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列 +- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理 +- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据 + +![TDengine 高效写入示例场景的线程模型](highvolume.webp) + +### 示例代码 {#code} + +这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。 + +本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。 + + + + +**程序清单** + +| 类名 | 功能说明 | +| ---------------- | --------------------------------------------------------------------------- | +| FastWriteExample | 主程序 | +| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue | +| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine | +| MockDataSource | 模拟生成一定数量 meters 子表的数据 | +| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 | +| StmtWriter | 实现参数绑定方式批量写入(暂未完成) | +| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 | + + +以下是各类的完整代码和更详细的功能说明。 + +
+FastWriteExample +主程序负责: + +1. 创建消息队列 +2. 启动写线程 +3. 启动读线程 +4. 每隔 10 秒统计一次写入速度 + +主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优: + +1. 读线程个数。默认为 1。 +2. 写线程个数。默认为 3。 +3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。 +4. 每批最多写入记录数量。默认为 3000。 + +队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}} +``` + +
+ +
+ReadTask + +读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。 + +读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}} +``` + +
+ +
+WriteTask + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}} +``` + +
+ +
+ +MockDataSource + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}} +``` + +
+ +
+ +SQLWriter + +SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}} +``` + +
+ +
+ +DataBaseMonitor + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}} +``` + +
+ +**执行步骤** + +
+执行 Java 示例程序 + +执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置: + +``` +TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" +``` + +**本地集成开发环境执行示例程序** + +1. clone TDengine 仓库 + ``` + git clone git@github.com:taosdata/TDengine.git --depth 1 + ``` +2. 用集成开发环境打开 `docs/examples/java` 目录。 +3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。 +4. 运行类 `com.taos.example.highvolume.FastWriteExample`。 + +**远程服务器上执行示例程序** + +若要在服务器上执行示例程序,可按照下面的步骤操作: + +1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行: + ``` + mvn package + ``` +2. 远程服务器上创建 examples 目录: + ``` + mkdir -p examples/java + ``` +3. 复制依赖到服务器指定目录: + - 复制依赖包,只用复制一次 + ``` + scp -r .\target\lib @:~/examples/java + ``` + - 复制本程序的 jar 包,每次更新代码都需要复制 + ``` + scp -r .\target\javaexample-1.0.jar @:~/examples/java + ``` +4. 配置环境变量。 + 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如: + + ``` + export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" + ``` + + 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。 + +5. 用 java 命令启动示例程序,命令模板: + + ``` + java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample + ``` + +6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。 + 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。 + + ``` + root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12 + 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000 + 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444 + 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521 + 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394 + 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933 + 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696 + 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729 + 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521 + 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788 + 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950 + ``` + +
+ +
+ + +**程序清单** + +Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。 + +| 函数或类 | 功能说明 | +| ------------------------ | -------------------------------------------------------------------- | +| main 函数 | 程序入口, 创建各个子进程和消息队列 | +| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 | +| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 | +| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 | +| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 | +| SQLWriter类 | SQL 写入和自动建表 | +| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) | + + +
+main 函数 + +main 函数负责创建消息队列和启动子进程,子进程有 3 类: + +1. 1 个监控进程,负责数据库初始化和统计写入速度 +2. n 个读进程,负责从其它数据系统读数据 +3. m 个写进程,负责写数据库 + +main 函数可以接收 5 个启动参数,依次是: + +1. 读任务(进程)数, 默认为 1 +2. 写任务(进程)数, 默认为 1 +3. 模拟生成的总表数,默认为 1000 +4. 队列大小(单位字节),默认为 1000000 +5. 每批最多写入记录数量, 默认为 3000 + +```python +{{#include docs/examples/python/fast_write_example.py:main}} +``` + +
+ +
+run_monitor_process + +监控进程负责初始化数据库,并监控当前的写入速度。 + +```python +{{#include docs/examples/python/fast_write_example.py:monitor}} +``` + +
+ +
+ +run_read_task 函数 + +读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。 + +```python +{{#include docs/examples/python/fast_write_example.py:read}} +``` + +
+ +
+ +MockDataSource + +以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。 + +```python +{{#include docs/examples/python/mockdatasource.py}} +``` + +
+ +
+run_write_task 函数 + +写进程每次从队列中取出尽量多的数据,并批量写入。 + +```python +{{#include docs/examples/python/fast_write_example.py:write}} +``` + +
+ +
+ +SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大。 + +SQLWriter + +```python +{{#include docs/examples/python/sql_writer.py}} +``` + +
+ +**执行步骤** + +
+ +执行 Python 示例程序 + +1. 前提条件 + + - 已安装 TDengine 客户端驱动 + - 已安装 Python3, 推荐版本 >= 3.8 + - 已安装 taospy + +2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue + + ``` + pip3 install faster-fifo + ``` + +3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。 + +4. 执行示例程序 + + ``` + python3 fast_write_example.py + ``` + + 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。 + + ``` + root@vm85$ python3 fast_write_example.py 8 8 + 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000 + 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347 + 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348 + 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349 + 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350 + 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351 + 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352 + 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353 + 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354 + 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355 + 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356 + 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357 + 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358 + 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359 + 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361 + 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364 + 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365 + 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0 + 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0 + 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0 + 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0 + 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0 + 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0 + 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0 + 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0 + 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0 + 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0 + 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0 + 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0 + 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0 + 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0 + 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0 + ``` + +
+ +:::note +使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。 +如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。 + +::: + +
+
+ + diff --git a/docs/zh/07-develop/03-insert-data/_c_line.mdx b/docs/zh/07-develop/03-insert-data/_c_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7f2f0d5dd8198d52dda1da34256e54a1bbb4c967 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_c_line.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/line_example.c:main}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_c_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_c_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..34b1d8ab3c1e299c2ab2a1ad6d47f81dfaa364cc --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_c_opts_json.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/json_protocol_example.c:main}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_c_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_c_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6bda068d12fd0b379a5af96438029c9ae476a753 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_c_opts_telnet.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/telnet_line_example.c:main}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_c_sql.mdx b/docs/zh/07-develop/03-insert-data/_c_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4e55c3387ee1c6fe860f312afdbdad65142bf7fb --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_c_sql.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/insert_example.c}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_c_stmt.mdx b/docs/zh/07-develop/03-insert-data/_c_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..78f2d20dfb95859448e998bc41dc815efc4d9bd0 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_c_stmt.mdx @@ -0,0 +1,6 @@ +```c title=一次绑定一行 +{{#include docs/examples/c/stmt_example.c}} +``` +```c title=一次绑定多行 72:117 +{{#include docs/examples/c/multi_bind_example.c}} +``` \ No newline at end of file diff --git a/docs-cn/07-develop/03-insert-data/_category_.yml b/docs/zh/07-develop/03-insert-data/_category_.yml similarity index 100% rename from docs-cn/07-develop/03-insert-data/_category_.yml rename to docs/zh/07-develop/03-insert-data/_category_.yml diff --git a/docs/zh/07-develop/03-insert-data/_cs_line.mdx b/docs/zh/07-develop/03-insert-data/_cs_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..71f46c62be3dfe7d771a35b2298e476bed353aba --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_cs_line.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/InfluxDBLineExample.cs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..8d80d042c984c513df5ca91813c0cd0a17b58eb5 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/OptsJsonExample.cs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cff32abf1feaf703971111542749fbe40152bc33 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/OptsTelnetExample.cs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_cs_sql.mdx b/docs/zh/07-develop/03-insert-data/_cs_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1dc7bb3d1366aa3000212786756506eb5eb280e6 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_cs_sql.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/SQLInsertExample.cs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx b/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..229c874ab9f515e7eae66890a3dfe2e59c129e86 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/StmtInsertExample.cs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_go_line.mdx b/docs/zh/07-develop/03-insert-data/_go_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..df2afc0e8720ca14e42e0e4bd7e50276cecace43 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_go_line.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/line/main.go}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_go_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_go_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..362ce430515c70a3ac502e646630025d7f950612 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_go_opts_json.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/json/main.go}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_go_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_go_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..518ea4c8164ab148afff9e21b03d892cbc1bfaf8 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_go_opts_telnet.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/telnet/main.go}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_go_sql.mdx b/docs/zh/07-develop/03-insert-data/_go_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..02f4d4e2ba21bc14dd67cb0443a1631b06750923 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_go_sql.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/insert/sql/main.go}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_go_stmt.mdx b/docs/zh/07-develop/03-insert-data/_go_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d65a96549fbef3cd14b47853f765d557447dde1d --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_go_stmt.mdx @@ -0,0 +1,8 @@ +```go +{{#include docs/examples/go/insert/stmt/main.go}} +``` + +:::tip +driver-go 的模块 `github.com/taosdata/driver-go/v2/wrapper` 是 C 接口的底层封装。使用这个模块也可以实现参数绑定写入。 + +::: diff --git a/docs/zh/07-develop/03-insert-data/_java_line.mdx b/docs/zh/07-develop/03-insert-data/_java_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..17f759d30fdb76744dc032be60ee91b6dd9f1540 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_java_line.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/LineProtocolExample.java}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_java_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_java_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1fc0adc202f26c73e64da09456e7e42bdc6367f6 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_java_opts_json.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/JSONProtocolExample.java}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_java_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_java_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b68f54b4e872a57f34ae6d5c3651a70812b71154 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_java_opts_telnet.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_java_sql.mdx b/docs/zh/07-develop/03-insert-data/_java_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..636c7e00eb8846704678ef3cdd8394a99a4528f8 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_java_sql.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java:insert}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_java_stmt.mdx b/docs/zh/07-develop/03-insert-data/_java_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2f6a33769044ef5052e633e28a9b60fdab130e88 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_java_stmt.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/StmtInsertExample.java}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_js_line.mdx b/docs/zh/07-develop/03-insert-data/_js_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cc138a76bde76e779eaa1fe554ecc82c1f564e24 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_js_line.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/influxdb_line_example.js}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_js_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_js_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cb3c275ce8140ed58d668bf03972a1f960bb6564 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_js_opts_json.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/opentsdb_json_example.js}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_js_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_js_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..db96742f31440342516134636db998af987af9fb --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_js_opts_telnet.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/opentsdb_telnet_example.js}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_js_sql.mdx b/docs/zh/07-develop/03-insert-data/_js_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a9a12f5d2cfb31bcaefba25a82846b455dbc8671 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_js_sql.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/insert_example.js}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_js_stmt.mdx b/docs/zh/07-develop/03-insert-data/_js_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b94ae121c50387c899336fc6f09348f48483bf58 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_js_stmt.mdx @@ -0,0 +1,12 @@ +```js title=一次绑定一行 +{{#include docs/examples/node/nativeexample/param_bind_example.js}} +``` + +```js title=一次绑定多行 +{{#include docs/examples/node/nativeexample/multi_bind_example.js:insertData}} +``` + +:::info +一次绑定一行效率不如一次绑定多行,但支持非 INSERT 语句。一次绑定多行效率更高,但仅支持 INSERT 语句。 + +::: diff --git a/docs/zh/07-develop/03-insert-data/_php_sql.mdx b/docs/zh/07-develop/03-insert-data/_php_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..78cd663ec219dabc2eeb81c7e67426eda41d7762 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_php_sql.mdx @@ -0,0 +1,3 @@ +```php +{{#include docs/examples/php/insert.php}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_php_stmt.mdx b/docs/zh/07-develop/03-insert-data/_php_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3bb7b2f8da9887c1063822e69bfdff599aa50b7b --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_php_stmt.mdx @@ -0,0 +1,3 @@ +```php +{{#include docs/examples/php/insert_stmt.php}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_py_line.mdx b/docs/zh/07-develop/03-insert-data/_py_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..85f7e32e6681c6d428a2332220194c169c421f2f --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_py_line.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/line_protocol_example.py}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_py_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_py_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..195c7090c02e03131c4261c57f1414a5ab1ba6b6 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_py_opts_json.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/json_protocol_example.py}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_py_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_py_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3bae1ea57bcffe50be5b4e96a7ae8f83faed2087 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_py_opts_telnet.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/telnet_line_protocol_example.py}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_py_sql.mdx b/docs/zh/07-develop/03-insert-data/_py_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1557e3994b04e64c596918ee67c63e7765ebaa07 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_py_sql.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/native_insert_example.py}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_py_stmt.mdx b/docs/zh/07-develop/03-insert-data/_py_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e244288401ae99228701fc3b965389f4c3a362b4 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_py_stmt.mdx @@ -0,0 +1,12 @@ +```py title=一次绑定一行 +{{#include docs/examples/python/bind_param_example.py}} +``` + +```py title=一次绑定多行 +{{#include docs/examples/python/multi_bind_example.py:bind_batch}} +``` + +:::info +一次绑定一行效率不如一次绑定多行,但支持非 INSERT 语句。一次绑定多行效率更高,但仅支持 INSERT 语句。 + +::: \ No newline at end of file diff --git a/docs/zh/07-develop/03-insert-data/_rust_line.mdx b/docs/zh/07-develop/03-insert-data/_rust_line.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dbb35d76bc3517463902b642ce4a3861ae42b2f8 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_line.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cc2055510bce006491ed277a8e884b9958a5a993 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx new file mode 100644 index 0000000000000000000000000000000000000000..109c0c5d019e250b87e12c535e4f55c69924b4af --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_rust_sql.mdx b/docs/zh/07-develop/03-insert-data/_rust_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fb59a4826510e666457ac592328cc5ba17412c79 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_sql.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/restexample/examples/insert_example.rs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/_rust_stmt.mdx b/docs/zh/07-develop/03-insert-data/_rust_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a889b56745601158489037a590b6cf5bd80da543 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_stmt.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/nativeexample/examples/stmt_example.rs}} +``` diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp new file mode 100644 index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ diff --git a/docs-cn/07-develop/03-insert-data/index.md b/docs/zh/07-develop/03-insert-data/index.md similarity index 100% rename from docs-cn/07-develop/03-insert-data/index.md rename to docs/zh/07-develop/03-insert-data/index.md diff --git a/docs/zh/07-develop/04-query-data/_c.mdx b/docs/zh/07-develop/04-query-data/_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c51557ef2918dd9152e329c6e1937109d286b11c --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_c.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/query_example.c}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/04-query-data/_c_async.mdx b/docs/zh/07-develop/04-query-data/_c_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..641a53e82ddb252e1b3255799bd922158a08f229 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_c_async.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/async_query_example.c:demo}} +``` \ No newline at end of file diff --git a/docs-cn/07-develop/04-query-data/_category_.yml b/docs/zh/07-develop/04-query-data/_category_.yml similarity index 100% rename from docs-cn/07-develop/04-query-data/_category_.yml rename to docs/zh/07-develop/04-query-data/_category_.yml diff --git a/docs/zh/07-develop/04-query-data/_cs.mdx b/docs/zh/07-develop/04-query-data/_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4bb582ecbfaeceac679af975e7752d1caeacb018 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_cs.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/QueryExample.cs}} +``` diff --git a/docs/zh/07-develop/04-query-data/_cs_async.mdx b/docs/zh/07-develop/04-query-data/_cs_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3ecf635fd39db402d1db68de6d7336b7b2d9d8e8 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_cs_async.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/AsyncQueryExample.cs}} +``` diff --git a/docs/zh/07-develop/04-query-data/_go.mdx b/docs/zh/07-develop/04-query-data/_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b43894a1ebe8aa0a261cce5f2469f2b3f8449fc4 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_go.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/query/sync/main.go}} +``` diff --git a/docs/zh/07-develop/04-query-data/_go_async.mdx b/docs/zh/07-develop/04-query-data/_go_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3fbc6f5b6dac9d3987678e64d7268eed200ce513 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_go_async.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/query/async/main.go}} +``` diff --git a/docs/zh/07-develop/04-query-data/_java.mdx b/docs/zh/07-develop/04-query-data/_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..74de32658c658fb81c29349a1997e32ed512db1b --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_java.mdx @@ -0,0 +1,3 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/RestQueryExample.java}} +``` diff --git a/docs/zh/07-develop/04-query-data/_js.mdx b/docs/zh/07-develop/04-query-data/_js.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5883d378e7c7acab033bffb2018f00f1ab5a48d5 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_js.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/query_example.js}} +``` diff --git a/docs/zh/07-develop/04-query-data/_js_async.mdx b/docs/zh/07-develop/04-query-data/_js_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4b0f54a0342e62da1e5050d49546ca605ae1d729 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_js_async.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/async_query_example.js}} +``` diff --git a/docs/zh/07-develop/04-query-data/_php.mdx b/docs/zh/07-develop/04-query-data/_php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bcafd1cfbcb1bbb55b03f6fe198e6fa1b5251b19 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_php.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/php/query.php}} +``` diff --git a/docs/zh/07-develop/04-query-data/_py.mdx b/docs/zh/07-develop/04-query-data/_py.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7184c66b8ea35a72309246aefc737d430434bb54 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_py.mdx @@ -0,0 +1,11 @@ +通过迭代逐行获取查询结果。 + +```py +{{#include docs/examples/python/query_example.py:iter}} +``` + +一次获取所有查询结果,并把每一行转化为一个字典返回。 + +```py +{{#include docs/examples/python/query_example.py:fetch_all}} +``` diff --git a/docs/zh/07-develop/04-query-data/_py_async.mdx b/docs/zh/07-develop/04-query-data/_py_async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dd323ef364d67fc5d37c6c094433e0c9d14d5b08 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_py_async.mdx @@ -0,0 +1,8 @@ +```py +{{#include docs/examples/python/async_query_example.py}} +``` + +:::note +这个示例程序,目前在 Windows 系统上还无法运行 + +::: diff --git a/docs/zh/07-develop/04-query-data/_rust.mdx b/docs/zh/07-develop/04-query-data/_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cab1b403fbba0cb432ecb9cb280a0fa7582c5be1 --- /dev/null +++ b/docs/zh/07-develop/04-query-data/_rust.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/restexample/examples/query_example.rs}} +``` diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..58b6738f43909b100f59829d74f0e62cdb7ae44a --- /dev/null +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -0,0 +1,181 @@ +--- +title: 查询数据 +description: "主要查询功能,通过连接器执行同步查询和异步查询" +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import JavaQuery from "./_java.mdx"; +import PyQuery from "./_py.mdx"; +import GoQuery from "./_go.mdx"; +import RustQuery from "./_rust.mdx"; +import NodeQuery from "./_js.mdx"; +import CsQuery from "./_cs.mdx"; +import CQuery from "./_c.mdx"; +import PhpQuery from "./_php.mdx"; +import PyAsync from "./_py_async.mdx"; +import NodeAsync from "./_js_async.mdx"; +import CsAsync from "./_cs_async.mdx"; +import CAsync from "./_c_async.mdx"; + +## 主要查询功能 + +TDengine 采用 SQL 作为查询语言。应用程序可以通过 REST API 或连接器发送 SQL 语句,用户还可以通过 TDengine 命令行工具 taos 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: + +- 单列、多列数据查询 +- 标签和数值的多种过滤条件:>, <, =, <\>, like 等 +- 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset) +- 数值列及聚合结果的四则运算 +- 时间戳对齐的连接查询(Join Query: 隐式连接)操作 +- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff 等 + +例如:在命令行工具 taos 中,从表 d1001 中查询出 voltage > 215 的记录,按时间降序排列,仅仅输出 2 条。 + +```sql +taos> select * from d1001 where voltage > 215 order by ts desc limit 2; + ts | current | voltage | phase | +====================================================================================== + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | +Query OK, 2 row(s) in set (0.001100s) +``` + +为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine 还支持连续查询。 + +具体的查询语法请看 [TAOS SQL 的数据查询](/taos-sql/select) 章节。 + +## 多表聚合查询 + +物联网场景中,往往同一个类型的数据采集点有多个。TDengine 采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时 TDengine 使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine 提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 + +### 示例一 + +在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 + +``` +taos> SELECT AVG(voltage) FROM meters GROUP BY location; + avg(voltage) | location | +============================================================= + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | +Query OK, 2 row(s) in set (0.002136s) +``` + +### 示例二 + +在 TAOS shell, 查找 groupId 为 2 的所有智能电表过去 24 小时的记录条数,电流的最大值。 + +``` +taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; + cunt(*) | max(current) | +================================== + 5 | 13.4 | +Query OK, 1 row(s) in set (0.002136s) +``` + +TDengine 仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 + +## 降采样查询、插值 + +物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和 + +``` +taos> SELECT sum(current) FROM d1001 INTERVAL(10s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:00.000 | 10.300000191 | + 2018-10-03 14:38:10.000 | 24.900000572 | +Query OK, 2 row(s) in set (0.000883s) +``` + +降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 + +``` +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.000 | 10.199999809 | + 2018-10-03 14:38:05.000 | 32.900000572 | + 2018-10-03 14:38:06.000 | 11.500000000 | + 2018-10-03 14:38:15.000 | 12.600000381 | + 2018-10-03 14:38:16.000 | 36.000000000 | +Query OK, 5 row(s) in set (0.001538s) +``` + +降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始 + +``` +taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.500 | 11.189999809 | + 2018-10-03 14:38:05.500 | 31.900000572 | + 2018-10-03 14:38:06.500 | 11.600000000 | + 2018-10-03 14:38:15.500 | 12.300000381 | + 2018-10-03 14:38:16.500 | 35.000000000 | +Query OK, 5 row(s) in set (0.001521s) +``` + +物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。 + +如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。 + +语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/interval) 章节。 + +## 示例代码 + +### 查询数据 + +在 [SQL 写入](/develop/insert-data/sql-writing) 一章,我们创建了 power 数据库,并向 meters 表写入了一些数据,以下示例代码展示如何查询这个表的数据。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + +:::note + +1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。 +2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。 + +::: + +### 异步查询 + +除同步查询 API 之外,TDengine 还提供性能更高的异步调用 API 处理数据插入、查询操作。在软硬件环境相同的情况下,异步 API 处理数据插入的速度比同步 API 快 2-4 倍。异步 API 采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步 API 在网络延迟严重的情况下,优点尤为突出。 + +需要注意的是,只有使用原生连接的连接器,才能使用异步查询功能。 + + + + + + + + + + + + diff --git a/docs-cn/07-develop/06-continuous-query.mdx b/docs/zh/07-develop/06-continuous-query.mdx similarity index 100% rename from docs-cn/07-develop/06-continuous-query.mdx rename to docs/zh/07-develop/06-continuous-query.mdx diff --git a/docs/zh/07-develop/07-subscribe.mdx b/docs/zh/07-develop/07-subscribe.mdx new file mode 100644 index 0000000000000000000000000000000000000000..eb1103963a04252881de7ad4dd3d03e598982226 --- /dev/null +++ b/docs/zh/07-develop/07-subscribe.mdx @@ -0,0 +1,251 @@ +--- +sidebar_label: 数据订阅 +description: "轻量级的数据订阅与推送服务。连续写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" +title: 数据订阅 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import Java from "./_sub_java.mdx"; +import Python from "./_sub_python.mdx"; +import Go from "./_sub_go.mdx"; +import Rust from "./_sub_rust.mdx"; +import Node from "./_sub_node.mdx"; +import CSharp from "./_sub_cs.mdx"; +import CDemo from "./_sub_c.mdx"; + +基于数据天然的时间序列特性,TDengine 的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine 在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine 中每一张表均可视为一个标准的消息队列。 + +TDengine 内嵌支持轻量级的消息订阅与推送服务。使用系统提供的 API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 + +TDengine 的订阅与推送服务的状态是由客户端维持,TDengine 服务端并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 + +TDengine 的 API 中,与订阅相关的主要有以下三个: + +```c +taos_subscribe +taos_consume +taos_unsubscribe +``` + +这些 API 的文档请见 [C/C++ Connector](../../reference/connector/cpp),下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”),完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c) 找到。 + +如果我们希望当某个电表的电流超过一定限制(比如 10A)后能得到通知并进行一些处理, 有两种方法:一是分别对每张子表进行查询,每次查询后记录最后一条数据的时间戳,后续只查询这个时间戳之后的数据: + +```sql +select * from D1001 where ts > {last_timestamp1} and current > 10; +select * from D1002 where ts > {last_timestamp2} and current > 10; +... +``` + +这确实可行,但随着电表数量的增加,查询数量也会增加,客户端和服务端的性能都会受到影响,当电表数增长到一定的程度,系统就无法承受了。 + +另一种方法是对超级表进行查询。这样,无论有多少电表,都只需一次查询: + +```sql +select * from meters where ts > {last_timestamp} and current > 10; +``` + +但是,如何选择 `last_timestamp` 就成了一个新的问题。因为,一方面数据的产生时间(也就是数据时间戳)和数据入库的时间一般并不相同,有时偏差还很大;另一方面,不同电表的数据到达 TDengine 的时间也会有差异。所以,如果我们在查询中使用最慢的那台电表的数据的时间戳作为 `last_timestamp`,就可能重复读入其它电表的数据;如果使用最快的电表的时间戳,其它电表的数据就可能被漏掉。 + +TDengine 的订阅功能为上面这个问题提供了一个彻底的解决方案。 + +首先是使用 `taos_subscribe` 创建订阅: + +```c +TAOS_SUB* tsub = NULL; +if (async) { +  // create an asynchronized subscription, the callback function will be called every 1s +  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); +} else { +  // create an synchronized subscription, need to call 'taos_consume' manually +  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); +} +``` + +TDengine 中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数 `async` 的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用 `taos_consume` 来拉取数据,而异步则由 API 在内部的另一个线程中调用 `taos_consume`,然后把拉取到的数据交给回调函数 `subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。) + +参数 `taos` 是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在 API 的内部线程中被调用,而 TDengine 的部分 API 不是线程安全的。 + +参数 `sql` 是查询语句,可以在其中使用 where 子句指定过滤条件。在我们的例子中,如果只想订阅电流超过 10A 时的数据,可以这样写: + +```sql +select * from meters where current > 10; +``` + +注意,这里没有指定起始时间,所以会读到所有时间的数据。如果只想从一天前的数据开始订阅,而不需要更早的历史数据,可以再加上一个时间条件: + +```sql +select * from meters where ts > now - 1d and current > 10; +``` + +订阅的 `topic` 实际上是它的名字,因为订阅功能是在客户端 API 中实现的,所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。 + +如果名为 `topic` 的订阅不存在,参数 `restart` 没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个 `topic` 时,`restart` 就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果 `restart` 是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且 `restart` 是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。 + +`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用 `taos_consume` 的时间间隔小于此时间,`taos_consume` 会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。 + +`taos_subscribe` 的倒数第二个参数用于用户程序向回调函数传递附加参数,订阅 API 不对其做任何处理,只原样传递给回调函数。此参数在同步模式下无意义。 + +订阅创建以后,就可以消费其数据了,同步模式下,示例代码是下面的 else 部分: + +```c +if (async) { +  getchar(); +} else while(1) { +  TAOS_RES* res = taos_consume(tsub); +  if (res == NULL) { +    printf("failed to consume data."); +    break; +  } else { +    print_result(res, blockFetch); +    getchar(); +  } +} +``` + +这里是一个 **while** 循环,用户每按一次回车键就调用一次 `taos_consume`,而 `taos_consume` 的返回值是查询到的结果集,与 `taos_use_result` 完全相同,例子中使用这个结果集的代码是函数 `print_result`: + +```c +void print_result(TAOS_RES* res, int blockFetch) { +  TAOS_ROW row = NULL; +  int num_fields = taos_num_fields(res); +  TAOS_FIELD* fields = taos_fetch_fields(res); +  int nRows = 0; +  if (blockFetch) { +    nRows = taos_fetch_block(res, &row); +    for (int i = 0; i < nRows; i++) { +      char temp[256]; +      taos_print_row(temp, row + i, fields, num_fields); +      puts(temp); +    } +  } else { +    while ((row = taos_fetch_row(res))) { +      char temp[256]; +      taos_print_row(temp, row, fields, num_fields); +      puts(temp); +      nRows++; +    } +  } +  printf("%d rows consumed.\n", nRows); +} +``` + +其中的 `taos_print_row` 用于处理订阅到数据,在我们的例子中,它会打印出所有符合条件的记录。而异步模式下,消费订阅到的数据则显得更为简单: + +```c +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { +  print_result(res, *(int*)param); +} +``` + +当要结束一次数据订阅时,需要调用 `taos_unsubscribe`: + +```c +taos_unsubscribe(tsub, keep); +``` + +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 + +代码介绍完毕,我们来看一下实际的运行效果。假设: + +- 示例代码已经下载到本地 +- TDengine 也已经在同一台机器上安装好 +- 示例所需的数据库、超级表、子表已经全部创建好 + +则可以在示例代码所在目录执行以下命令来编译并启动示例程序: + +```bash +make +./subscribe -sql='select * from meters where current > 10;' +``` + +示例程序启动后,打开另一个终端窗口,启动 TDengine CLI 向 **D1001** 插入一条电流为 12A 的数据: + +```sql +$ taos +> use test; +> insert into D1001 values(now, 12, 220, 1); +``` + +这时,因为电流超过了 10A,您应该可以看到示例程序将它输出到了屏幕上。您可以继续插入一些数据观察示例程序的输出。 + +## 示例程序 + +下面的示例程序展示是如何使用连接器订阅所有电流超过 10A 的记录。 + +### 准备数据 + +``` +# create database "power" +taos> create database power; +# use "power" as the database in following operations +taos> use power; +# create super table "meters" +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); +# create tabes using the schema defined by super table "meters" +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LosAngeles", 2); +# insert some rows +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# filter out the rows in which current is bigger than 10A +taos> select * from meters where current > 10; + ts | current | voltage | phase | location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +### 示例代码 + + + + + + + + + {/* + + */} + {/* + + + + + */} + + + + + +### 运行示例程序 + +示例程序会先消费符合查询条件的所有历史数据: + +```bash +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 +``` + +接着,使用 TDengine CLI 向表中新增一条数据: + +``` +# taos +taos> use power; +taos> insert into d1001 values(now, 12.4, 220, 1); +``` + +因为这条数据的电流大于 10A,示例程序会将其消费: + +``` +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 +``` diff --git a/docs-cn/07-develop/08-cache.md b/docs/zh/07-develop/08-cache.md similarity index 100% rename from docs-cn/07-develop/08-cache.md rename to docs/zh/07-develop/08-cache.md diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md new file mode 100644 index 0000000000000000000000000000000000000000..998260fe2101bc80b39768d45edc97e8ab215f2e --- /dev/null +++ b/docs/zh/07-develop/09-udf.md @@ -0,0 +1,208 @@ +--- +sidebar_label: 用户定义函数 +title: UDF(用户定义函数) +description: "支持用户编码的聚合函数和标量函数,在查询中嵌入并使用用户定义函数,拓展查询的能力和功能。" +--- + +在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。 + +从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 + +用户可以通过 UDF 实现两类函数: 标量函数 和 聚合函数。 + +## 用 C/C++ 语言来定义 UDF + +### 标量函数 + +用户可以按照下列函数模板定义自己的标量计算函数 + + `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` + + 其中 udfNormalFunc 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算,其参数项是固定的,用于按照约束完成与引擎之间的数据交换。 + +- udfNormalFunc 中各参数的具体含义是: + - data:输入数据。 + - itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](../../reference/rest-api/)。例如 4 用于表示 INT 型。 + - iBytes:输入数据中每个值会占用的字节数。 + - numOfRows:输入数据的总行数。 + - ts:主键时间戳在输入中的列数据(只读)。 + - dataOutput:输出数据的缓冲区,缓冲区大小为用户指定的输出类型大小 \* numOfRows。 + - interBuf:中间计算结果的缓冲区,大小为用户在创建 UDF 时指定的 BUFSIZE 大小。通常用于计算中间结果与最终结果不一致时使用,由引擎负责分配与释放。 + - tsOutput:主键时间戳在输出时的列数据,如果非空可用于输出结果对应的时间戳。 + - numOfOutput:输出结果的个数(行数)。 + - oType:输出数据的类型。取值含义与 itype 参数一致。 + - oBytes:输出数据中每个值占用的字节数。 + - buf:用于在 UDF 与引擎间的状态控制信息传递块。 + + [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现,也即上面定义的 udfNormalFunc 函数的一个具体实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。 + +### 聚合函数 + +用户可以按照如下函数模板定义自己的聚合函数。 + +`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` + +其中 udfMergeFunc 是函数名的占位符,以上述模板实现的函数用于对计算中间结果进行聚合,只有针对超级表的聚合查询才需要调用该函数。其中各参数的具体含义是: + + - data:udfNormalFunc 的输出数据数组,如果使用了 interBuf 那么 data 就是 interBuf 的数组。 + - numOfRows:data 中数据的行数。 + - dataOutput:输出数据的缓冲区,大小等于一条最终结果的大小。如果此时输出还不是最终结果,可以选择输出到 interBuf 中即 data 中。 + - numOfOutput:输出结果的个数(行数)。 + - buf:用于在 UDF 与引擎间的状态控制信息传递块。 + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。 + +其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`)来生成每个子表的中间结果,再将子表的中间结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成超级表的最终聚合结果或中间结果。聚合查询最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把超级表的中间结果处理为最终结果,最终结果只能含 0 或 1 条结果数据。 + +其他典型场景,如协方差的计算,也可通过定义聚合 UDF 的方式实现。 + +### 最终计算 + +用户可以按下面的函数模板实现自己的函数对计算结果进行最终计算,通常用于有 interBuf 使用的场景。 + +`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` + +其中 udfFinalizeFunc 是函数名的占位符 ,其中各参数的具体含义是: + - dataOutput:输出数据的缓冲区。 + - interBuf:中间结算结果缓冲区,可作为输入。 + - numOfOutput:输出数据的个数,对聚合函数来说只能是 0 或者 1。 + - buf:用于在 UDF 与引擎间的状态控制信息传递块。 + +## UDF 实现方式的规则总结 + +三类 UDF 函数: udfNormalFunc、udfMergeFunc、udfFinalizeFunc ,其函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名,也即 udfNormalFunc 函数不需要在实际函数名后添加后缀;而udfMergeFunc 的函数名要加上后缀 `_merge`、udfFinalizeFunc 的函数名要加上后缀 `_finalize`,这是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。 + +根据 UDF 函数类型的不同,用户所要实现的功能函数也不同: + +- 标量函数:UDF 中需实现 udfNormalFunc。 +- 聚合函数:UDF 中需实现 udfNormalFunc、udfMergeFunc(对超级表查询)、udfFinalizeFunc。 + +:::note +如果对应的函数不需要具体的功能,也需要实现一个空函数。 + +::: + +## 编译 UDF + +用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。 + +例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件: + +```bash +gcc -g -O0 -fPIC -shared add_one.c -o add_one.so +``` + +这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。 + +## 在系统中管理和使用 UDF + +### 创建 UDF + +用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 + +在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外, UDF 支持输入与输出类型不一致,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 + +- 创建标量函数 +```sql +CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +``` + + - ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 + + 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: + + ```sql + CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; + ``` + +- 创建聚合函数: +```sql +CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +``` + + - ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 + + 关于中间计算结果的使用,可以参考示例程序[demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) + + 例如,如下语句可以把 demo.so 创建为系统中可用的 UDF: + + ```sql + CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; + ``` + +### 管理 UDF + +- 删除指定名称的用户定义函数: +``` +DROP FUNCTION ids(X); +``` + +- ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 +```sql +DROP FUNCTION add_one; +``` +- 显示系统中当前可用的所有 UDF: +```sql +SHOW FUNCTIONS; +``` + +### 调用 UDF + +在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: +```sql +SELECT X(c) FROM table/stable; +``` + +表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 + +## UDF 的一些使用限制 + +在当前版本下,使用 UDF 存在如下这些限制: + +1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统; +2. UDF 不能与系统内建的 SQL 函数混合使用,暂不支持在一条 SQL 语句中使用多个不同名的 UDF ; +3. UDF 只支持以单个数据列作为输入; +4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; +5. 无法通过 RESTful 接口来创建 UDF; +6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 + +## 示例代码 + +### 标量函数示例 [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) + +
+add_one.c + +```c +{{#include tests/script/sh/add_one.c}} +``` + +
+ +### 向量函数示例 [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) + +
+abs_max.c + +```c +{{#include tests/script/sh/abs_max.c}} +``` + +
+ +### 使用中间计算结果示例 [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) + +
+demo.c + +```c +{{#include tests/script/sh/demo.c}} +``` + +
diff --git a/docs-cn/07-develop/_category_.yml b/docs/zh/07-develop/_category_.yml similarity index 100% rename from docs-cn/07-develop/_category_.yml rename to docs/zh/07-develop/_category_.yml diff --git a/docs/zh/07-develop/_sub_c.mdx b/docs/zh/07-develop/_sub_c.mdx new file mode 100644 index 0000000000000000000000000000000000000000..da492a0269f064d8cdf9dfb80969894131d94015 --- /dev/null +++ b/docs/zh/07-develop/_sub_c.mdx @@ -0,0 +1,3 @@ +```c +{{#include docs/examples/c/subscribe_demo.c}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_cs.mdx b/docs/zh/07-develop/_sub_cs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a435ea0273c94cbe75eaf7431e1a9c39d49d92e3 --- /dev/null +++ b/docs/zh/07-develop/_sub_cs.mdx @@ -0,0 +1,3 @@ +```csharp +{{#include docs/examples/csharp/SubscribeDemo.cs}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_go.mdx b/docs/zh/07-develop/_sub_go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..34b2aefd92c5eef75b59fbbba96b83da091722a7 --- /dev/null +++ b/docs/zh/07-develop/_sub_go.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/go/sub/main.go}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..52df23f7dd0dbdc9810b1e53d66c4fcfd610759e --- /dev/null +++ b/docs/zh/07-develop/_sub_java.mdx @@ -0,0 +1,7 @@ +```java +{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} +``` +:::note +目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 + +::: \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_node.mdx b/docs/zh/07-develop/_sub_node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3eeff0922a31a478dd34a77c6cb6471f51a57a8c --- /dev/null +++ b/docs/zh/07-develop/_sub_node.mdx @@ -0,0 +1,3 @@ +```js +{{#include docs/examples/node/nativeexample/subscribe_demo.js}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_python.mdx b/docs/zh/07-develop/_sub_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..490b76fca6deb61e61dc59c2096b30742a7d25f7 --- /dev/null +++ b/docs/zh/07-develop/_sub_python.mdx @@ -0,0 +1,3 @@ +```py +{{#include docs/examples/python/subscribe_demo.py}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_rust.mdx b/docs/zh/07-develop/_sub_rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..afb8d79daa3bbd72d72795cb4425f12277d710fc --- /dev/null +++ b/docs/zh/07-develop/_sub_rust.mdx @@ -0,0 +1,3 @@ +```rs +{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md new file mode 100644 index 0000000000000000000000000000000000000000..041ac090b5f117a940912021797c075a14a197ff --- /dev/null +++ b/docs/zh/07-develop/index.md @@ -0,0 +1,24 @@ +--- +title: 开发指南 +--- + +开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做: +1. 确定应用到TDengine的链接方式。无论你使用何种编程语言,你总可以使用REST接口, 但也可以使用每种编程语言独有的连接器方便的进行链接。 +2. 根据自己的应用场景,确定数据模型。根据数据特征,决定建立一个还是多个库;分清静态标签、采集量,建立正确的超级表,建立子表。 +3. 决定插入数据的方式。TDengine支持使用标准的SQL写入,但同时也支持schemaless模式写入,这样不用手工建表,可以将数据直接写入。 +4. 根据业务要求,看需要撰写哪些SQL查询语句。 +5. 如果你要基于时序数据做实时的统计分析,包括各种监测看板,那么建议你采用TDengine的连续查询功能,而不用上线Spark, Flink等复杂的流式计算系统。 +6. 如果你的应用有模块需要消费插入的数据,希望有新的数据插入时,就能获取通知,那么建议你采用TDengine提供的数据订阅功能,而无需专门部署Kafka或其他消息队列软件。 +7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。 +8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。 + +本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。 + +如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs-cn/10-cluster/01-deploy.md b/docs/zh/10-cluster/01-deploy.md similarity index 100% rename from docs-cn/10-cluster/01-deploy.md rename to docs/zh/10-cluster/01-deploy.md diff --git a/docs-cn/10-cluster/02-cluster-mgmt.md b/docs/zh/10-cluster/02-cluster-mgmt.md similarity index 100% rename from docs-cn/10-cluster/02-cluster-mgmt.md rename to docs/zh/10-cluster/02-cluster-mgmt.md diff --git a/docs-cn/10-cluster/03-ha-and-lb.md b/docs/zh/10-cluster/03-ha-and-lb.md similarity index 100% rename from docs-cn/10-cluster/03-ha-and-lb.md rename to docs/zh/10-cluster/03-ha-and-lb.md diff --git a/docs-cn/10-cluster/_category_.yml b/docs/zh/10-cluster/_category_.yml similarity index 100% rename from docs-cn/10-cluster/_category_.yml rename to docs/zh/10-cluster/_category_.yml diff --git a/docs-cn/10-cluster/index.md b/docs/zh/10-cluster/index.md similarity index 100% rename from docs-cn/10-cluster/index.md rename to docs/zh/10-cluster/index.md diff --git a/docs-cn/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md similarity index 100% rename from docs-cn/12-taos-sql/01-data-type.md rename to docs/zh/12-taos-sql/01-data-type.md diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md new file mode 100644 index 0000000000000000000000000000000000000000..e3a0aa7c87461fd1621a38093871a1542e3dbf98 --- /dev/null +++ b/docs/zh/12-taos-sql/02-database.md @@ -0,0 +1,127 @@ +--- +sidebar_label: 数据库管理 +title: 数据库管理 +description: "创建、删除数据库,查看、修改数据库参数" +--- + +## 创建数据库 + +``` +CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; +``` + +:::info +1. KEEP 是该数据库的数据保留多长天数,缺省是 3650 天(10 年),数据库会自动删除超过时限的数据; +2. UPDATE 标志数据库支持更新相同时间戳数据;(从 2.1.7.0 版本开始此参数支持设为 2,表示允许部分列更新,也即更新数据行时未被设置的列会保留原值。)(从 2.0.8.0 版本开始支持此参数。注意此参数不能通过 `ALTER DATABASE` 指令进行修改。) + 1. UPDATE 设为 0 时,表示不允许更新数据,后发送的相同时间戳的数据会被直接丢弃; + 2. UPDATE 设为 1 时,表示更新全部列数据,即如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL; + 3. UPDATE 设为 2 时,表示支持更新部分列数据,即如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值; + 4. 更多关于 UPDATE 参数的用法,请参考[FAQ](/train-faq/faq)。 +3. 数据库名最大长度为 33; +4. 一条 SQL 语句的最大长度为 65480 个字符; +5. 创建数据库时可用的参数有: + - cache: [详细说明](/reference/config/#cache) + - blocks: [详细说明](/reference/config/#blocks) + - days: [详细说明](/reference/config/#days) + - keep: [详细说明](/reference/config/#keep) + - minRows: [详细说明](/reference/config/#minrows) + - maxRows: [详细说明](/reference/config/#maxrows) + - wal: [详细说明](/reference/config/#wallevel) + - fsync: [详细说明](/reference/config/#fsync) + - update: [详细说明](/reference/config/#update) + - cacheLast: [详细说明](/reference/config/#cachelast) + - replica: [详细说明](/reference/config/#replica) + - quorum: [详细说明](/reference/config/#quorum) + - comp: [详细说明](/reference/config/#comp) + - precision: [详细说明](/reference/config/#precision) +6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 + +::: + +### 创建数据库示例 + +创建时间精度为纳秒的数据库, 保留 1 年数据: + +```sql +CREATE DATABASE test PRECISION 'ns' KEEP 365; +``` + +## 显示系统当前参数 + +``` +SHOW VARIABLES; +``` + +## 使用数据库 + +``` +USE db_name; +``` + +使用/切换数据库(在 REST 连接方式下无效)。 + +## 删除数据库 + +``` +DROP DATABASE [IF EXISTS] db_name; +``` + +删除数据库。指定 Database 所包含的全部数据表将被删除,谨慎使用! + +## 修改数据库参数 + +``` +ALTER DATABASE db_name COMP 2; +``` + +COMP 参数是指修改数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。0 表示不压缩,1 表示一阶段压缩,2 表示两阶段压缩。 + +``` +ALTER DATABASE db_name REPLICA 2; +``` + +REPLICA 参数是指修改数据库副本数,取值范围 [1, 3]。在集群中使用,副本数必须小于或等于 DNODE 的数目。 + +``` +ALTER DATABASE db_name KEEP 365; +``` + +KEEP 参数是指修改数据文件保存的天数,缺省值为 3650,取值范围 [days, 365000],必须大于或等于 days 参数值。 + +``` +ALTER DATABASE db_name QUORUM 2; +``` + +QUORUM 参数是指数据写入成功所需要的确认数,取值范围 [1, 2]。对于异步复制,quorum 设为 1,具有 master 角色的虚拟节点自己确认即可。对于同步复制,quorum 设为 2。原则上,Quorum >= 1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 + +``` +ALTER DATABASE db_name BLOCKS 100; +``` + +BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache \* blocks)。取值范围 [3, 1000]。 + +``` +ALTER DATABASE db_name CACHELAST 0; +``` + +CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。) +说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。 + +:::tip +以上所有参数修改后都可以用 show databases 来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。 +::: + +## 显示系统所有数据库 + +``` +SHOW DATABASES; +``` + +## 显示一个数据库的创建语句 + +``` +SHOW CREATE DATABASE db_name; +``` + +常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。 + diff --git a/docs-cn/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md similarity index 100% rename from docs-cn/12-taos-sql/03-table.md rename to docs/zh/12-taos-sql/03-table.md diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md similarity index 100% rename from docs-cn/12-taos-sql/04-stable.md rename to docs/zh/12-taos-sql/04-stable.md diff --git a/docs-cn/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md similarity index 100% rename from docs-cn/12-taos-sql/05-insert.md rename to docs/zh/12-taos-sql/05-insert.md diff --git a/docs-cn/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md similarity index 100% rename from docs-cn/12-taos-sql/06-select.md rename to docs/zh/12-taos-sql/06-select.md diff --git a/docs-cn/07-develop/05-delete-data.mdx b/docs/zh/12-taos-sql/08-delete-data.mdx similarity index 100% rename from docs-cn/07-develop/05-delete-data.mdx rename to docs/zh/12-taos-sql/08-delete-data.mdx diff --git a/docs-cn/12-taos-sql/07-function.md b/docs/zh/12-taos-sql/10-function.md similarity index 100% rename from docs-cn/12-taos-sql/07-function.md rename to docs/zh/12-taos-sql/10-function.md diff --git a/docs/zh/12-taos-sql/12-interval.md b/docs/zh/12-taos-sql/12-interval.md new file mode 100644 index 0000000000000000000000000000000000000000..ac273a6d8a85d7758ffece890120b92c40815555 --- /dev/null +++ b/docs/zh/12-taos-sql/12-interval.md @@ -0,0 +1,113 @@ +--- +sidebar_label: 按窗口切分聚合 +title: 按窗口切分聚合 +--- + + +TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。 +窗口子句用于针对查询的数据集合进行按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。 + +## 时间窗口 + +INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 + +![TDengine Database 时间窗口示意图](./timewindow-1.webp) + +INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法: + +``` +SELECT * FROM temp_tb_1 INTERVAL(1m); +``` + +SLIDING 的向前滑动的时间不能超过一个窗口的时间范围。以下语句非法: + +``` +SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); +``` + +当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。 +_ 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。 +_ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。 \* **注意**:用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。 + +## 状态窗口 + +使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) + +![TDengine Database 时间窗口示意图](./timewindow-3.webp) + +使用 STATE_WINDOW 来确定状态窗口划分的列。例如: + +``` +SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); +``` + +## 会话窗口 + +会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。 + +![TDengine Database 时间窗口示意图](./timewindow-2.webp) + +在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) + +``` + +SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +``` + +这种类型的查询语法如下: + +``` +SELECT function_list FROM tb_name + [WHERE where_condition] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] + +SELECT function_list FROM stb_name + [WHERE where_condition] + [INTERVAL(interval [, offset]) [SLIDING sliding]] + [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] + [GROUP BY tags] +``` + +- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。 +- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。 +- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。 +- + +- WHERE 语句可以指定查询的起止时间和其他过滤条件。 +- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: + 1. 不进行填充:NONE(默认填充模式)。 + 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。 + 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。 + 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。 + 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。 + 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。 + +:::info + +1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。 +2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。 +3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。 + +::: + +时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](../../develop/continuous-query)。 + +## 示例 + +智能电表的建表语句如下: + +``` +CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); +``` + +针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下: + +``` +SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters + WHERE ts>=NOW-1d and ts<=now + INTERVAL(10m) + FILL(PREV); +``` diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs/zh/12-taos-sql/14-limit.md similarity index 100% rename from docs-cn/12-taos-sql/09-limit.md rename to docs/zh/12-taos-sql/14-limit.md diff --git a/docs/zh/12-taos-sql/16-json.md b/docs/zh/12-taos-sql/16-json.md new file mode 100644 index 0000000000000000000000000000000000000000..d1f7b3300bca25e20affa023c402024de4a0efff --- /dev/null +++ b/docs/zh/12-taos-sql/16-json.md @@ -0,0 +1,100 @@ +--- +sidebar_label: JSON 类型使用说明 +title: JSON 类型使用说明 +--- + + +## 语法说明 + +1. 创建 json 类型 tag + + ``` + create stable s1 (ts timestamp, v1 int) tags (info json) + + create table s1_1 using s1 tags ('{"k1": "v1"}') + ``` + +2. json 取值操作符 -> + + ``` + select * from s1 where info->'k1' = 'v1' + + select info->'k1' from s1 + ``` + +3. json key 是否存在操作符 contains + + ``` + select * from s1 where info contains 'k2' + + select * from s1 where info contains 'k1' + ``` + +## 支持的操作 + +1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is no null,不支持 in + + ``` + select * from s1 where info->'k1' match 'v*'; + + select * from s1 where info->'k1' like 'v%' and info contains 'k2'; + + select * from s1 where info is null; + + select * from s1 where info->'k1' is not null + ``` + +2. 支持 json tag 放在 group by、order by、join 子句、union all 以及子查询中,比如 group by json->'key' + +3. 支持 distinct 操作. + + ``` + select distinct info->'k1' from s1 + ``` + +4. 标签操作 + + 支持修改 json 标签值(全量覆盖) + + + ``` + alter table s1_1 set tag info = '{"k1": "v2"}'; + ``` + + 支持修改 json 标签名 + + ``` + alter stable s1 change tag info info2 ; + ``` + + 不支持添加 json 标签、删除 json 标签、修改 json 标签列宽 + +## 其他约束条件 + +1. 只有标签列可以使用 json 类型,如果用 json 标签,标签列只能有一个。 + +2. 长度限制:json 中 key 的长度不能超过 256,并且 key 必须为可打印 ascii 字符;json 字符串总长度不超过 4096 个字节。 + +3. json 格式限制: + + 1. json 输入字符串可以为空("","\t"," "或 null)或 object,不能为非空的字符串,布尔型和数组。 + 2. object 可为{},如果 object 为{},则整个 json 串记为空。key 可为"",若 key 为"",则 json 串中忽略该 k-v 对。 + 3. value 可以为数字(int/double)或字符串或 bool 或 null,暂不可以为数组。不允许嵌套。 + 4. 若 json 字符串中出现两个相同的 key,则第一个生效。 + 5. json 字符串里暂不支持转义。 + +4. 当查询 json 中不存在的 key 时,返回 NULL + +5. 当 json tag 作为子查询结果时,不再支持上层查询继续对子查询中的 json 串做解析查询。 + + 比如暂不支持 + + ``` + select jtag->'key' from (select jtag from stable) + ``` + + 不支持 + + ``` + select jtag->'key' from (select jtag from stable) where jtag->'key'>0 + ``` diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md new file mode 100644 index 0000000000000000000000000000000000000000..1200669c40525682bba6b2c7743f5d1dcaf6c7d5 --- /dev/null +++ b/docs/zh/12-taos-sql/18-escape.md @@ -0,0 +1,30 @@ +--- +title: 转义字符说明 +--- + +## 转义字符表 + +| 字符序列 | **代表的字符** | +| :------: | -------------- | +| `\'` | 单引号' | +| `\"` | 双引号" | +| \n | 换行符 | +| \r | 回车符 | +| \t | tab 符 | +| `\\` | 斜杠\ | +| `\%` | % 规则见下 | +| `\_` | \_ 规则见下 | + +:::note +转义符的功能从 2.4.0.4 版本开始 + +::: + +## 转义字符使用规则 + +1. 标识符里有转义字符(数据库名、表名、列名) + 1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。 + 2. 反引号``标识符: 保持原样,不转义 +2. 数据里有转义字符 + 1. 遇到上面定义的转义字符会转义(%和\_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 + 2. 对于%和\_,因为在 like 里这两个字符是通配符,所以在模式匹配 like 里用`\%`和`\_`表示字符里本身的%和\_,如果在 like 模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和\_。 diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md new file mode 100644 index 0000000000000000000000000000000000000000..f7a6107791ab206608982a54c8e47eae51691035 --- /dev/null +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -0,0 +1,355 @@ +--- +sidebar_label: 参数限制与保留关键字 +title: TDengine 参数限制与保留关键字 +--- + +## 名称命名规则 + +1. 合法字符:英文字符、数字和下划线 +2. 允许英文字符或下划线开头,不允许以数字开头 +3. 不区分大小写 +4. 转义后表(列)名规则: + 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 + 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 + + 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + 需要注意的是转义字符中的内容必须是可打印字符。 + 支持转义符的功能从 2.3.0.1 版本开始。 + +## 密码合法字符集 + +`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` + +去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) + +- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB +- 表的列名:不能包含特殊字符,不能超过 64 个字节 +- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” +- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) +- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) +- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 +- 数据库副本数:不能超过 3 +- 用户名:不能超过 23 个 字节 +- 用户密码:不能超过 15 个 字节 +- 标签(Tags)数量:不能超过 128 个,可以 0 个 +- 标签的总长度:不能超过 16KB +- 记录条数:仅受存储空间限制 +- 表的个数:仅受节点个数限制 +- 库的个数:仅受节点个数限制 +- 单个库上虚拟节点个数:不能超过 64 个 +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) + +## 保留关键字 + +目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: + +### A + +- ABORT +- ACCOUNT +- ACCOUNTS +- ADD +- AFTER +- ALL +- ALTER +- AND +- AS +- ASC +- ATTACH + +### B + +- BEFORE +- BEGIN +- BETWEEN +- BIGINT +- BINARY +- BITAND +- BITNOT +- BITOR +- BLOCKS +- BOOL +- BY + +### C + +- CACHE +- CACHELAST +- CASCADE +- CHANGE +- CLUSTER +- COLON +- COLUMN +- COMMA +- COMP +- COMPACT +- CONCAT +- CONFLICT +- CONNECTION +- CONNECTIONS +- CONNS +- COPY +- CREATE +- CTIME + +### D + +- DATABASE +- DATABASES +- DAYS +- DBS +- DEFERRED +- DELETE +- DELIMITERS +- DESC +- DESCRIBE +- DETACH +- DISTINCT +- DIVIDE +- DNODE +- DNODES +- DOT +- DOUBLE +- DROP + +### E + +- END +- EQ +- EXISTS +- EXPLAIN + +### F + +- FAIL +- FILE +- FILL +- FLOAT +- FOR +- FROM +- FSYNC + +### G + +- GE +- GLOB +- GRANTS +- GROUP +- GT + +### H + +- HAVING + +### I + +- ID +- IF +- IGNORE +- IMMEDIA +- IMPORT +- IN +- INITIAL +- INSERT +- INSTEAD +- INT +- INTEGER +- INTERVA +- INTO +- IS +- ISNULL + +### J + +- JOIN + +### K + +- KEEP +- KEY +- KILL + +### L + +- LE +- LIKE +- LIMIT +- LINEAR +- LOCAL +- LP +- LSHIFT +- LT + +### M + +- MATCH +- MAXROWS +- MINROWS +- MINUS +- MNODES +- MODIFY +- MODULES + +### N + +- NE +- NONE +- NOT +- NOTNULL +- NOW +- NULL + +### O + +- OF +- OFFSET +- OR +- ORDER + +### P + +- PARTITION +- PASS +- PLUS +- PPS +- PRECISION +- PREV +- PRIVILEGE + +### Q + +- QTIME +- QUERIE +- QUERY +- QUORUM + +### R + +- RAISE +- REM +- REPLACE +- REPLICA +- RESET +- RESTRIC +- ROW +- RP +- RSHIFT + +### S + +- SCORES +- SELECT +- SEMI +- SESSION +- SET +- SHOW +- SLASH +- SLIDING +- SLIMIT +- SMALLIN +- SOFFSET +- STable +- STableS +- STAR +- STATE +- STATEMEN +- STATE_WI +- STORAGE +- STREAM +- STREAMS +- STRING +- SYNCDB + +### T + +- TABLE +- TABLES +- TAG +- TAGS +- TBNAME +- TIMES +- TIMESTAMP +- TINYINT +- TOPIC +- TOPICS +- TRIGGER +- TSERIES + +### U + +- UMINUS +- UNION +- UNSIGNED +- UPDATE +- UPLUS +- USE +- USER +- USERS +- USING + +### V + +- VALUES +- VARIABLE +- VARIABLES +- VGROUPS +- VIEW +- VNODES + +### W + +- WAL +- WHERE + +### _ + +- _C0 +- _QSTART +- _QSTOP +- _QDURATION +- _WSTART +- _WSTOP +- _WDURATION + + +## 特殊说明 +### TBNAME +`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。 + +获取一个超级表所有的子表名及相关的标签信息: +```mysql +SELECT TBNAME, location FROM meters; +``` + +统计超级表下辖子表数量: +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如: +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +表示查询过滤窗口的起始,结束以及持续时间 (从2.6.0.0版本开始支持) + +### _WSTART/_WSTOP/_WDURATION +窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间(从 2.6.0.0 版本开始支持) + +### _c0 +表示表或超级表的第一列 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/_category_.yml b/docs/zh/12-taos-sql/_category_.yml similarity index 100% rename from docs-cn/12-taos-sql/_category_.yml rename to docs/zh/12-taos-sql/_category_.yml diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md new file mode 100644 index 0000000000000000000000000000000000000000..36cc20bd3b6e4c668e386014f8444baa716f039d --- /dev/null +++ b/docs/zh/12-taos-sql/index.md @@ -0,0 +1,38 @@ +--- +title: TAOS SQL +description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容" +--- + +本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。 + +TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 没有提供时序数据的删除功能,因此 TAOS SQL 中也没有提供数据删除的相关功能。不过从 TDengine 企业版从 2.6 开始提供了 DELETE 语句。 + +本章节 SQL 语法遵循如下约定: + +- <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 +- \[ \] 表示内容为可选项,但不能输入 [] 本身 +- | 表示多选一,选择其中一个即可,但不能输入 | 本身 +- … 表示前面的项可重复多个 + +为更好地说明 SQL 语法的规则及其特点,本文假设存在一个数据集。以智能电表(meters)为例,假设每个智能电表采集电流、电压、相位三个量。其建模如下: + +``` +taos> DESCRIBE meters; + Field | Type | Length | Note | +================================================================================= + ts | TIMESTAMP | 8 | | + current | FLOAT | 4 | | + voltage | INT | 4 | | + phase | FLOAT | 4 | | + location | BINARY | 64 | TAG | + groupid | INT | 4 | TAG | +``` + +数据集包含 4 个智能电表的数据,按照 TDengine 的建模规则,对应 4 个子表,其名称分别是 d1001, d1002, d1003, d1004。 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs-en/12-taos-sql/timewindow-1.webp b/docs/zh/12-taos-sql/timewindow-1.webp similarity index 100% rename from docs-en/12-taos-sql/timewindow-1.webp rename to docs/zh/12-taos-sql/timewindow-1.webp diff --git a/docs-en/12-taos-sql/timewindow-2.webp b/docs/zh/12-taos-sql/timewindow-2.webp similarity index 100% rename from docs-en/12-taos-sql/timewindow-2.webp rename to docs/zh/12-taos-sql/timewindow-2.webp diff --git a/docs-en/12-taos-sql/timewindow-3.webp b/docs/zh/12-taos-sql/timewindow-3.webp similarity index 100% rename from docs-en/12-taos-sql/timewindow-3.webp rename to docs/zh/12-taos-sql/timewindow-3.webp diff --git a/docs-cn/13-operation/01-pkg-install.md b/docs/zh/13-operation/01-pkg-install.md similarity index 100% rename from docs-cn/13-operation/01-pkg-install.md rename to docs/zh/13-operation/01-pkg-install.md diff --git a/docs-cn/13-operation/02-planning.mdx b/docs/zh/13-operation/02-planning.mdx similarity index 100% rename from docs-cn/13-operation/02-planning.mdx rename to docs/zh/13-operation/02-planning.mdx diff --git a/docs-cn/13-operation/03-tolerance.md b/docs/zh/13-operation/03-tolerance.md similarity index 100% rename from docs-cn/13-operation/03-tolerance.md rename to docs/zh/13-operation/03-tolerance.md diff --git a/docs-cn/13-operation/06-admin.md b/docs/zh/13-operation/06-admin.md similarity index 100% rename from docs-cn/13-operation/06-admin.md rename to docs/zh/13-operation/06-admin.md diff --git a/docs-cn/13-operation/07-import.md b/docs/zh/13-operation/07-import.md similarity index 100% rename from docs-cn/13-operation/07-import.md rename to docs/zh/13-operation/07-import.md diff --git a/docs-cn/13-operation/08-export.md b/docs/zh/13-operation/08-export.md similarity index 100% rename from docs-cn/13-operation/08-export.md rename to docs/zh/13-operation/08-export.md diff --git a/docs-cn/13-operation/09-status.md b/docs/zh/13-operation/09-status.md similarity index 100% rename from docs-cn/13-operation/09-status.md rename to docs/zh/13-operation/09-status.md diff --git a/docs-cn/13-operation/10-monitor.md b/docs/zh/13-operation/10-monitor.md similarity index 100% rename from docs-cn/13-operation/10-monitor.md rename to docs/zh/13-operation/10-monitor.md diff --git a/docs-cn/13-operation/11-optimize.md b/docs/zh/13-operation/11-optimize.md similarity index 100% rename from docs-cn/13-operation/11-optimize.md rename to docs/zh/13-operation/11-optimize.md diff --git a/docs-cn/13-operation/17-diagnose.md b/docs/zh/13-operation/17-diagnose.md similarity index 100% rename from docs-cn/13-operation/17-diagnose.md rename to docs/zh/13-operation/17-diagnose.md diff --git a/docs-cn/13-operation/_category_.yml b/docs/zh/13-operation/_category_.yml similarity index 100% rename from docs-cn/13-operation/_category_.yml rename to docs/zh/13-operation/_category_.yml diff --git a/docs-cn/13-operation/index.md b/docs/zh/13-operation/index.md similarity index 100% rename from docs-cn/13-operation/index.md rename to docs/zh/13-operation/index.md diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a8a92606e4aadf7298359023e739d568788094fd --- /dev/null +++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx @@ -0,0 +1,307 @@ +--- +title: REST API +--- + +为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 + +:::note +与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。从 2.2.0.0 版本开始,支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认由 taosAdapter 提供,要求必须在 URL 中指定 db_name。 +::: + +## 安装 + +RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。 + +## 验证 + +在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。 + +下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。 + +下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: + +```html +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql +``` + +返回值结果如下表示验证通过: + +```json +{ + "status": "succ", + "head": [ + "name", + "created_time", + "ntables", + "vgroups", + "replica", + "quorum", + "days", + "keep1,keep2,keep(D)", + "cache(MB)", + "blocks", + "minrows", + "maxrows", + "wallevel", + "fsync", + "comp", + "precision", + "status" + ], + "data": [ + [ + "log", + "2020-09-02 17:23:00.039", + 4, + 1, + 1, + 1, + 10, + "30,30,30", + 1, + 3, + 100, + 4096, + 1, + 3000, + 2, + "us", + "ready" + ] + ], + "rows": 1 +} +``` + +## HTTP 请求格式 + +``` +http://:/rest/sql/[db_name] +``` + +参数说明: + +- fqnd: 集群中的任一台主机 FQDN 或 IP 地址 +- port: 配置文件中 httpPort 配置项,缺省为 6041 +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) + +例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 + +HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 + +- 自定义身份认证信息如下所示(token 稍后介绍) + + ``` + Authorization: Taosd + ``` + +- Basic 身份认证信息如下所示 + + ``` + Authorization: Basic + ``` + +HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据表应提供数据库前缀,例如 db_name.tb_name。如果表名不带数据库前缀,又没有在 URL 中指定数据库名的话,系统会返回错误。因为 HTTP 模块只是一个简单的转发,没有当前 DB 的概念。 + +使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: + +```bash +curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] +``` + +或者 + +```bash +curl -L -u username:password -d "" :/rest/sql/[db_name] +``` + +其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==` + +## HTTP 返回格式 + +返回值为 JSON 格式,如下: + +```json +{ + "status": "succ", + "head": ["ts","current", …], + "column_meta": [["ts",9,8],["current",6,4], …], + "data": [ + ["2018-10-03 14:38:05.000", 10.3, …], + ["2018-10-03 14:38:15.000", 12.6, …] + ], + "rows": 2 +} +``` + +说明: + +- status: 告知操作结果是成功还是失败。 +- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在后续版本中,有可能会从返回值中去掉 head 这一项。) +- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。 +- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 +- rows: 表明总共多少行数据。 + +column_meta 中的列类型说明: + +- 1:BOOL +- 2:TINYINT +- 3:SMALLINT +- 4:INT +- 5:BIGINT +- 6:FLOAT +- 7:DOUBLE +- 8:BINARY +- 9:TIMESTAMP +- 10:NCHAR + +## 自定义授权码 + +HTTP 请求中需要带有授权码 ``,用于身份识别。授权码通常由管理员提供,可简单的通过发送 `HTTP GET` 请求来获取授权码,操作如下: + +```bash +curl http://:/rest/login// +``` + +其中,`fqdn` 是 TDengine 数据库的 FQDN 或 IP 地址,`port` 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 JSON 格式,各字段含义如下: + +- status:请求结果的标志位 + +- code:返回值代码 + +- desc:授权码 + +获取授权码示例: + +```bash +curl http://192.168.0.1:6041/rest/login/root/taosdata +``` + +返回值: + +```json +{ + "status": "succ", + "code": 0, + "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" +} +``` + +## 使用示例 + +- 在 demo 库里查询表 d1001 的所有记录: + + ```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + ``` + + 返回值: + + ```json + { + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], + ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] + ], + "rows": 2 + } + ``` + +- 创建库 demo: + + ```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + ``` + + 返回值: + + ```json + { + "status": "succ", + "head": ["affected_rows"], + "column_meta": [["affected_rows", 4, 4]], + "data": [[1]], + "rows": 1 + } + ``` + +## 其他用法 + +### 结果集采用 Unix 时间戳 + +HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 + +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt +``` + +返回结果: + +```json +{ + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + [1538548685000, 10.3, 219, 0.31], + [1538548695000, 12.6, 218, 0.33] + ], + "rows": 2 +} +``` + +### 结果集采用 UTC 时间字符串 + +HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 + +```bash + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc +``` + +返回值: + +```json +{ + "status": "succ", + "head": ["ts", "current", "voltage", "phase"], + "column_meta": [ + ["ts", 9, 8], + ["current", 6, 4], + ["voltage", 4, 4], + ["phase", 6, 4] + ], + "data": [ + ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], + ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] + ], + "rows": 2 +} +``` + +## 重要配置项 + +下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。 + +- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。 +- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。 +- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。 +- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。 +- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息。 +- httpDbNameMandatory: 是否必须在 RESTful URL 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful URL 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。 + +:::note +如果使用 taosd 提供的 REST API, 那么以上配置需要写在 taosd 的配置文件 taos.cfg 中。如果使用 taosAdapter 提供的 REST API, 那么需要参考 taosAdapter [对应的配置方法](/reference/taosadapter/)。 +::: diff --git a/docs-en/14-reference/02-rest-api/_category_.yml b/docs/zh/14-reference/02-rest-api/_category_.yml similarity index 100% rename from docs-en/14-reference/02-rest-api/_category_.yml rename to docs/zh/14-reference/02-rest-api/_category_.yml diff --git a/docs-cn/14-reference/03-connector/03-connector.mdx b/docs/zh/14-reference/03-connector/03-connector.mdx similarity index 100% rename from docs-cn/14-reference/03-connector/03-connector.mdx rename to docs/zh/14-reference/03-connector/03-connector.mdx diff --git a/docs-cn/14-reference/03-connector/_category_.yml b/docs/zh/14-reference/03-connector/_category_.yml similarity index 100% rename from docs-cn/14-reference/03-connector/_category_.yml rename to docs/zh/14-reference/03-connector/_category_.yml diff --git a/docs-cn/14-reference/03-connector/_linux_install.mdx b/docs/zh/14-reference/03-connector/_linux_install.mdx similarity index 100% rename from docs-cn/14-reference/03-connector/_linux_install.mdx rename to docs/zh/14-reference/03-connector/_linux_install.mdx diff --git a/docs-cn/14-reference/03-connector/_preparition.mdx b/docs/zh/14-reference/03-connector/_preparition.mdx similarity index 100% rename from docs-cn/14-reference/03-connector/_preparition.mdx rename to docs/zh/14-reference/03-connector/_preparition.mdx diff --git a/docs/zh/14-reference/03-connector/_verify_linux.mdx b/docs/zh/14-reference/03-connector/_verify_linux.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fcb8aae6ae27cdcec58e000c4ab2e8a7ec6d9a5e --- /dev/null +++ b/docs/zh/14-reference/03-connector/_verify_linux.mdx @@ -0,0 +1,14 @@ +在 Linux shell 下直接执行 `taos` 连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下: + +```text +$ taos +Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 +Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. +taos> show databases; +name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | +========================================================================================================================================================================================================================= +test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | +log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | +Query OK, 2 row(s) in set (0.001198s) +taos> +``` diff --git a/docs/zh/14-reference/03-connector/_verify_windows.mdx b/docs/zh/14-reference/03-connector/_verify_windows.mdx new file mode 100644 index 0000000000000000000000000000000000000000..87c9fbd024f2c80b88434d47f35d919ed1ea77b2 --- /dev/null +++ b/docs/zh/14-reference/03-connector/_verify_windows.mdx @@ -0,0 +1,14 @@ +在 cmd 下进入到 C:\TDengine 目录下直接执行 `taos.exe`,连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下: + +```text + C:\TDengine>taos + Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 + Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | + =================================================================================================================================================================================================================================================================== + test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | + log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | + Query OK, 2 row(s) in set (0.045000s) + taos> +``` diff --git a/docs-cn/14-reference/03-connector/_windows_install.mdx b/docs/zh/14-reference/03-connector/_windows_install.mdx similarity index 100% rename from docs-cn/14-reference/03-connector/_windows_install.mdx rename to docs/zh/14-reference/03-connector/_windows_install.mdx diff --git a/docs-en/14-reference/03-connector/connector.webp b/docs/zh/14-reference/03-connector/connector.webp similarity index 100% rename from docs-en/14-reference/03-connector/connector.webp rename to docs/zh/14-reference/03-connector/connector.webp diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/14-reference/03-connector/cpp.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c74b6f76f041ca237a01ccdd619cf6ec271a3e16 --- /dev/null +++ b/docs/zh/14-reference/03-connector/cpp.mdx @@ -0,0 +1,451 @@ +--- +sidebar_position: 1 +sidebar_label: C/C++ +title: C/C++ Connector +--- + +C/C++ 开发人员可以使用 TDengine 的客户端驱动,即 C/C++连接器 (以下都用 TDengine 客户端驱动表示),开发自己的应用来连接 TDengine 集群完成数据存储、查询以及其他功能。TDengine 客户端驱动的 API 类似于 MySQL 的 C API。应用程序使用时,需要包含 TDengine 头文件 _taos.h_,里面列出了提供的 API 的函数原型;应用程序还要链接到所在平台上对应的动态库。 + +```c +#include +``` + +TDengine 服务端或客户端安装后,`taos.h` 位于: + +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` + +TDengine 客户端驱动的动态库位于: + +- Linux: `/usr/local/taos/driver/libtaos.so` +- Windows: `C:\TDengine\taos.dll` + +## 支持的平台 + +请参考[支持的平台列表](../#支持的平台) + +## 支持的版本 + +TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一对应的强对应关系,建议使用与 TDengine 服务端完全相同的客户端驱动。虽然低版本的客户端驱动在前三段版本号一致(即仅第四段版本号不同)的情况下也能够与高版本的服务端相兼容,但这并非推荐用法。强烈不建议使用高版本的客户端驱动访问低版本的服务端。 + +## 安装步骤 + +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) + +## 建立连接 + +使用客户端驱动访问 TDengine 集群的基本过程为:建立连接、查询和写入、关闭连接、清除资源。 + +下面为建立连接的示例代码,其中省略了查询和写入部分,展示了如何建立连接、关闭连接以及清除资源。 + +```c + TAOS *taos = taos_connect("localhost:6030", "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/); + exit(1); + } + + /* put your code here for read and write */ + + taos_close(taos); + taos_cleanup(); +``` + +在上面的示例代码中, `taos_connect()` 建立到客户端程序所在主机的 6030 端口的连接,`taos_close()`关闭当前连接,`taos_cleanup()`清除客户端驱动所申请和使用的资源。 + +:::note + +- 如未特别说明,当 API 的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 +- 所有的错误码以及对应的原因描述在 `taoserror.h` 文件中。 + +::: + +## 示例程序 + +本节展示了使用客户端驱动访问 TDengine 集群的常见访问方式的示例代码。 + +### 同步查询示例 + +
+同步查询 + +```c +{{#include examples/c/demo.c}} +``` + +
+ +### 异步查询示例 + +
+异步查询 + +```c +{{#include examples/c/asyncdemo.c}} +``` + +
+ +### 参数绑定示例 + +
+参数绑定 + +```c +{{#include examples/c/prepare.c}} +``` + +
+ +### 无模式写入示例 + +
+无模式写入 + +```c +{{#include examples/c/schemaless.c}} +``` + +
+ +### 订阅和消费示例 + +
+订阅和消费 + +```c +{{#include examples/c/subscribe.c}} +``` + +
+ +:::info +更多示例代码及下载请见 [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c)。 +也可以在安装目录下的 `examples/c` 路径下找到。 该目录下有 makefile,在 Linux 环境下,直接执行 make 就可以编译得到执行文件。 +**提示:**在 ARM 环境下编译时,请将 makefile 中的 `-msse4.2` 去掉,这个选项只有在 x64/x86 硬件平台上才能支持。 + +::: + +## API 参考 + +以下分别介绍 TDengine 客户端驱动的基础 API、同步 API、异步 API、订阅 API 和无模式写入 API。 + +### 基础 API + +基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。 + +- `void taos_init()` + + 初始化运行环境。如果没有主动调用该 API,那么调用 `taos_connect()` 时驱动将自动调用该 API,故程序一般无需手动调用。 + +- `void taos_cleanup()` + + 清理运行环境,应用退出前应调用。 + +- `int taos_options(TSDB_OPTION option, const void * arg, ...)` + + 设置客户端选项,目前支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。 + +- `char *taos_get_client_info()` + + 获取客户端版本信息。 + +- `TAOS *taos_connect(const char *host, const char *user, const char *pass, const char *db, int port)` + + 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: + + - host:TDengine 集群中任一节点的 FQDN + - user:用户名 + - pass:密码 + - db: 数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库 + - port:taosd 程序监听的端口 + + 返回值为空表示失败。应用程序需要保存返回的参数,以便后续使用。 + + :::info + 同一进程可以根据不同的 host/port 连接多个 TDengine 集群 + + ::: + +- `char *taos_get_server_info(TAOS *taos)` + + 获取服务端版本信息。 + +- `int taos_select_db(TAOS *taos, const char *db)` + + 将当前的缺省数据库设置为 `db`。 + +- `void taos_close(TAOS *taos)` + + 关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。 + +### 同步查询 API + +本小节介绍 API 均属于同步接口。应用调用后,会阻塞等待响应,直到获得返回结果或错误信息。 + +- `TAOS_RES* taos_query(TAOS *taos, const char *sql)` + + 执行 SQL 语句,可以是 DQL、DML 或 DDL 语句。 其中的 `taos` 参数是通过 `taos_connect()` 获得的句柄。不能通过返回值是否是 `NULL` 来判断执行结果是否失败,而是需要用 `taos_errno()` 函数解析结果集中的错误代码来进行判断。 + +- `int taos_result_precision(TAOS_RES *res)` + + 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。 + +- `TAOS_ROW taos_fetch_row(TAOS_RES *res)` + + 按行获取查询结果集中的数据。 + +- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)` + + 批量获取查询结果集中的数据,返回值为获取到的数据的行数。 + +- `int taos_num_fields(TAOS_RES *res)` 和 `int taos_field_count(TAOS_RES *res)` + + 这两个 API 等价,用于获取查询结果集中的列数。 + +- `int* taos_fetch_lengths(TAOS_RES *res)` + + 获取结果集中每个字段的长度。返回值是一个数组,其长度为结果集的列数。 + +- `int taos_affected_rows(TAOS_RES *res)` + + 获取被所执行的 SQL 语句影响的行数。 + +- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` + + 获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fileds()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下: + +```c +typedef struct taosField { + char name[65]; // column name + uint8_t type; // data type + int16_t bytes; // length, in bytes +} TAOS_FIELD; +``` + +- `void taos_stop_query(TAOS_RES *res)` + + 停止当前查询的执行。 + +- `void taos_free_result(TAOS_RES *res)` + + 释放查询结果集以及相关的资源。查询完成后,务必调用该 API 释放资源,否则可能导致应用内存泄露。但也需注意,释放资源后,如果再调用 `taos_consume()` 等获取查询结果的函数,将导致应用崩溃。 + +- `char *taos_errstr(TAOS_RES *res)` + + 获取最近一次 API 调用失败的原因,返回值为字符串标识的错误提示信息。 + +- `int taos_errno(TAOS_RES *res)` + + 获取最近一次 API 调用失败的原因,返回值为错误代码。 + +:::note +2.0 及以上版本 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池。而不推荐在应用中将该连接 (TAOS\*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性,但 “USE statement” 等状态量有可能在线程之间相互干扰。此外,C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 `taos_close()` 关闭连接。 + +::: + +### 异步查询 API + +TDengine 还提供性能更高的异步 API 处理数据插入、查询操作。在软硬件环境相同的情况下,异步 API 处理数据插入的速度比同步 API 快 2 ~ 4 倍。异步 API 采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步 API 在网络延迟严重的情况下,优势尤为突出。 + +异步 API 都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的 API 而定。第一个参数 param 是应用调用异步 API 时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是 SQL 操作的结果集,如果为空,比如 insert 操作,表示没有记录返回,如果不为空,比如 select 操作,表示有记录返回。 + +异步 API 对于使用者的要求相对较高,用户可根据具体应用场景选择性使用。下面是两个重要的异步 API: + +- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);` + + 异步执行 SQL 语句。 + + - taos:调用 `taos_connect()` 返回的数据库连接 + - sql:需要执行的 SQL 语句 + - fp:用户定义的回调函数,其第三个参数 `code` 用于指示操作是否成功,`0` 表示成功,负数表示失败(调用 `taos_errstr()` 可获取失败原因)。应用在定义回调函数的时候,主要处理第二个参数 `TAOS_RES *`,该参数是查询返回的结果集 + - param:应用提供一个用于回调的参数 + +- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);` + + 批量获取异步查询的结果集,只能与 `taos_query_a()` 配合使用。其中: + + - res:`taos_query_a()` 回调时返回的结果集 + - fp:回调函数。其参数 `param` 是用户可定义的传递给回调函数的参数结构体;`numOfRows` 是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用 `taos_fetch_row()` 前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用 `taos_fetch_rows_a()` 获取下一批记录进行处理,直到返回的记录数 `numOfRows` 为零(结果返回完成)或记录数为负值(查询出错)。 + +TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 + +### 参数绑定 API + +除了直接调用 `taos_query()` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,风格与 MySQL 类似,目前也仅支持用问号 `?` 来代表待绑定的参数。 + +从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下: + +1. 调用 `taos_stmt_init()` 创建参数绑定对象; +2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句; +3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname()` 来设置表名; +4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值; +5. 调用 `taos_stmt_bind_param_batch()` 以多行的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值; +6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理; +7. 可以重复第 3 ~ 6 步,为批处理加入更多的数据行; +8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令; +9. 执行完毕,调用 `taos_stmt_close()` 释放所有资源。 + +说明:如果 `taos_stmt_execute()` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare()` 的解析结果,直接进行第 3 ~ 6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init()` 步骤重新开始。 + +接口相关的具体函数如下(也可以参考 [prepare.c](https://github.com/taosdata/TDengine/blob/develop/examples/c/prepare.c) 文件中使用对应函数的方式): + +- `TAOS_STMT* taos_stmt_init(TAOS *taos)` + + 创建一个 TAOS_STMT 对象用于后续调用。 + +- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` + + 解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。 + +- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` + + 不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。 + 进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下: + + ```c + typedef struct TAOS_BIND { + int buffer_type; + void * buffer; + uintptr_t buffer_length; // not in use + uintptr_t * length; + int * is_null; + int is_unsigned; // not in use + int * error; // not in use + } TAOS_BIND; + ``` + +- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)` + + (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) + 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 + +- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` + + (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) + 当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。 + +- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)` + + (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) + 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下: + + ```c + typedef struct TAOS_MULTI_BIND { + int buffer_type; + void * buffer; + uintptr_t buffer_length; + uintptr_t * length; + char * is_null; + int num; // the number of columns + } TAOS_MULTI_BIND; + ``` + +- `int taos_stmt_add_batch(TAOS_STMT *stmt)` + + 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。 + +- `int taos_stmt_execute(TAOS_STMT *stmt)` + + 执行准备好的语句。目前,一条语句只能执行一次。 + +- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` + + 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。 + +- `int taos_stmt_close(TAOS_STMT *stmt)` + + 执行完毕,释放所有资源。 + +- `char * taos_stmt_errstr(TAOS_STMT *stmt)` + + (2.1.3.0 版本新增) + 用于在其他 STMT API 返回错误(返回错误码或空指针)时获取错误信息。 + +### 无模式(schemaless)写入 API + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` + + **功能说明** + 该接口将行协议的文本数据写入到 TDengine 中。 + + **参数说明** + taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 + lines:文本数据。满足解析格式要求的无模式文本字符串。 + numLines:文本数据的行数,不能为 0 。 + protocol: 行协议类型,用于标识文本数据格式。 + precision:文本数据中的时间戳精度字符串。 + + **返回值** + TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 + 在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。 + 返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。 + + **说明** + 协议类型是枚举类型,包含以下三种格式: + + - TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol) + - TSDB_SML_TELNET_PROTOCOL: OpenTSDB Telnet 文本行协议 + - TSDB_SML_JSON_PROTOCOL: OpenTSDB Json 协议格式 + + 时间戳分辨率的定义,定义在 `taos.h` 文件中,具体内容如下: + + - TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0, + - TSDB_SML_TIMESTAMP_HOURS, + - TSDB_SML_TIMESTAMP_MINUTES, + - TSDB_SML_TIMESTAMP_SECONDS, + - TSDB_SML_TIMESTAMP_MILLI_SECONDS, + - TSDB_SML_TIMESTAMP_MICRO_SECONDS, + - TSDB_SML_TIMESTAMP_NANO_SECONDS + + 需要注意的是,时间戳分辨率参数只在协议类型为 `SML_LINE_PROTOCOL` 的时候生效。 + 对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。 + + **支持版本** + 该功能接口从 2.3.0.0 版本开始支持。 + +### 订阅和消费 API + +订阅 API 目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 + +- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` + + 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: + + - taos:已经建立好的数据库连接 + - restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + - topic:订阅的主题(即名称),此参数是订阅的唯一标识 + - sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + - fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + - param:调用回调函数时的附加参数,系统 API 将其原样传递到回调函数,不进行任何处理 + - interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用 `taos_consume()` 的间隔小于此周期,API 将会阻塞,直到时间间隔超过此周期。 + +- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` + + 异步模式下,回调函数的原型,其参数为: + + - tsub:订阅对象 + - res:查询结果集,注意结果集中可能没有记录 + - param:调用 `taos_subscribe()` 时客户程序提供的附加参数 + - code:错误码 + + :::note + 在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。 + + ::: + +- `TAOS_RES *taos_consume(TAOS_SUB *tsub)` + + 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用 `taos_consume()` 的间隔小于订阅的轮询周期,API 将会阻塞,直到时间间隔超过此周期。如果数据库有新记录到达,该 API 将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此 API。 + + :::note + 在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。 + + ::: + +- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` + + 取消订阅。 如参数 `keepProgress` 不为 0,API 会保留订阅的进度信息,后续调用 `taos_subscribe()` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 + diff --git a/docs/zh/14-reference/03-connector/csharp.mdx b/docs/zh/14-reference/03-connector/csharp.mdx new file mode 100644 index 0000000000000000000000000000000000000000..db3c59806dd69a3592383ab9a82400abe51259bf --- /dev/null +++ b/docs/zh/14-reference/03-connector/csharp.mdx @@ -0,0 +1,189 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 7 +sidebar_label: C# +title: C# Connector +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +import Preparition from "./_preparition.mdx" +import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" +import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" +import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" +import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" +import CSQuery from "../../07-develop/04-query-data/_cs.mdx" +import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" + +`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 + +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../../rest-api/) 文档自行编写。 + +本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 + +`TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet)。 + +## 支持的平台 + +支持的平台和 TDengine 客户端驱动支持的平台一致。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 支持的功能特性 + +1. 连接管理 +2. 普通查询 +3. 连续查询 +4. 参数绑定 +5. 订阅功能 +6. Schemaless + +## 安装步骤 + +### 安装前准备 + +* 安装 [.NET SDK](https://dotnet.microsoft.com/download) +* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) +* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +### 使用 dotnet CLI 安装 + + + + +可以在当前 .NET 项目的路径下,通过 dotnet 命令引用 Nuget 中发布的 `TDengine.Connector` 到当前项目。 + +``` bash +dotnet add package TDengine.Connector +``` + + + + +可以下载 TDengine 的源码,直接引用最新版本的 TDengine.Connector 库 + +```bash +git clone https://github.com/taosdata/TDengine.git +cd TDengine/src/connector/C#/src/ +cp -r TDengineDriver/ myProject + +cd myProject +dotnet add TDengineDriver/TDengineDriver.csproj +``` + + + +## 建立连接 + +``` C# +using TDengineDriver; + +namespace TDengineExample +{ + + internal class EstablishConnection + { + static void Main(String[] args) + { + string host = "localhost"; + short port = 6030; + string username = "root"; + string password = "taosdata"; + string dbname = ""; + + var conn = TDengine.Connect(host, username, password, dbname, port); + if (conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + TDengine.Close(conn); + TDengine.Cleanup(); + } + } +} + +``` + +## 使用示例 + +### 写入数据 + +#### SQL 写入 + + + +#### InfluxDB 行协议写入 + + + +#### OpenTSDB Telnet 行协议写入 + + + +#### OpenTSDB JSON 行协议写入 + + + +### 查询数据 + +#### 同步查询 + + + +#### 异步查询 + + + +### 更多示例程序 + +|示例程序 | 示例程序描述 | +|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| +| [C#checker](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/C%23checker) | 使用 TDengine.Connector 可以通过 help 命令中提供的参数,测试C# Driver的同步写入和查询 | +| [TDengineTest](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/TDengineTest) | 使用 TDengine.Connector 实现的简单写入和查询的示例 | +| [insertCn](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/insertCn) | 使用 TDengine.Connector 实现的写入和查询中文字符的示例 | +| [jsonTag](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/jsonTag) | 使用 TDengine.Connector 实现的写入和查询 json tag 类型数据的示例 | +| [stmt](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/stmt) | 使用 TDengine.Connector 实现的参数绑定的示例 | +| [schemaless](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | +| [benchmark](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/taosdemo) | 使用 TDengine.Connector 实现的简易 Benchmark | +| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/QueryAsyncSample.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | +| [subscribe](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/SubscribeSample.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | + +## 重要更新记录 + +| TDengine.Connector | 说明 | +|--------------------|--------------------------------| +| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 | +| 1.0.5 | 修复 Windows 同步查询中文报错 bug。 | +| 1.0.4 | 新增异步查询,订阅等功能。修复绑定参数 bug。 | +| 1.0.3 | 新增参数绑定、schemaless、 json tag等功能。 | +| 1.0.2 | 新增连接管理、同步查询、错误信息等功能。 | + +## 其他说明 + +### 第三方驱动 + +`Maikebing.Data.Taos` 是一个 TDengine 的 ADO.NET 连接器,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考: + +* 接口下载: +* 用法说明: + +## 常见问题 + +1. "Unable to establish connection","Unable to resolve FQDN" + + 一般是因为 FQDN 配置不正确。可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html)解决。 + +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: 找不到指定的模块。 + + 一般是因为程序没有找到依赖的客户端驱动。解决方法为:Windows 下可以将 `C:\TDengine\driver\taos.dll` 拷贝到 `C:\Windows\System32\ ` 目录下,Linux 下建立如下软链接 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +## API 参考 + +[API 参考](https://docs.taosdata.com/api/connector-csharp/html/860d2ac1-dd52-39c9-e460-0829c4e5a40b.htm) diff --git a/docs/zh/14-reference/03-connector/go.mdx b/docs/zh/14-reference/03-connector/go.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b566c36624e0efe861e08f708b36ec9c1e0900dd --- /dev/null +++ b/docs/zh/14-reference/03-connector/go.mdx @@ -0,0 +1,409 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 4 +sidebar_label: Go +title: TDengine Go Connector +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import Preparition from "./_preparition.mdx"; +import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"; +import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"; +import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"; +import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"; +import GoQuery from "../../07-develop/04-query-data/_go.mdx"; + +`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 + +`driver-go` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。另外一种是 **REST 连接**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 运行实例。REST 连接实现的功能特性集合和原生连接有少量不同。 + +本文介绍如何安装 `driver-go`,并通过 `driver-go` 连接 TDengine 集群、进行数据查询、数据写入等基本操作。 + +`driver-go` 的源码托管在 [GitHub](https://github.com/taosdata/driver-go)。 + +## 支持的平台 + +原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 +REST 连接支持所有能运行 Go 的平台。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 支持的功能特性 + +### 原生连接 + +“原生连接”指连接器通过 TDengine 客户端驱动(taosc)直接与 TDengine 运行实例建立的连接。支持的功能特性有: + +- 普通查询 +- 连续查询 +- 订阅 +- schemaless 接口 +- 参数绑定接口 + +### REST 连接 + +"REST 连接"指连接器通过 taosAdapter 组件提供的 REST API 与 TDengine 运行实例建立的连接。支持的功能特性有: + +- 普通查询 +- 连续查询 + +## 安装步骤 + +### 安装前准备 + +- 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) +- 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +配置好环境变量,检查命令: + +- `go env` +- `gcc -v` + +### 使用 go get 安装 + +`go get -u github.com/taosdata/driver-go/v2@latest` + +### 使用 go mod 管理 + +1. 使用 `go mod` 命令初始化项目: + +```text +go mod init taos-demo +``` + +2. 引入 taosSql : + +```go +import ( + "database/sql" + _ "github.com/taosdata/driver-go/v2/taosSql" +) +``` + +3. 使用 `go mod tidy` 更新依赖包: + +```text +go mod tidy +``` + +4. 使用 `go run taos-demo` 运行程序或使用 `go build` 命令编译出二进制文件。 + +```text +go run taos-demo +go build +``` + +## 建立连接 + +### 数据源名称(DSN) + +数据源名称具有通用格式,例如 [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php),但没有类型前缀(方括号表示可选): + +```text +[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN] +``` + +完整形式的 DSN: + +```text +username:password@protocol(address)/dbname?param=value +``` + +### 使用连接器进行连接 + + + + +_taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用 [`database/sql`](https://golang.org/pkg/database/sql/) 的接口。 + +使用 `taosSql` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`,DSN 支持的参数: + +- configPath 指定 taos.cfg 目录 + +示例: + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/taosdata/driver-go/v2/taosSql" +) + +func main() { + var taosUri = "root:taosdata@tcp(localhost:6030)/" + taos, err := sql.Open("taosSql", taosUri) + if err != nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } +} +``` + + + + +_taosRestful_ 通过 `http client` 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用[`database/sql`](https://golang.org/pkg/database/sql/)的接口。 + +使用 `taosRestful` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`,DSN 支持的参数: + +- `disableCompression` 是否接受压缩数据,默认为 true 不接受压缩数据,如果传输数据使用 gzip 压缩设置为 false。 +- `readBufferSize` 读取数据的缓存区大小默认为 4K(4096),当查询结果数据量多时可以适当调大该值。 + +示例: + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/taosdata/driver-go/v2/taosRestful" +) + +func main() { + var taosUri = "root:taosdata@http(localhost:6041)/" + taos, err := sql.Open("taosRestful", taosUri) + if err != nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } +} +``` + + + + +## 使用示例 + +### 写入数据 + +#### SQL 写入 + + + +#### InfluxDB 行协议写入 + + + +#### OpenTSDB Telnet 行协议写入 + + + +#### OpenTSDB JSON 行协议写入 + + + +### 查询数据 + + + +### 更多示例程序 + +- [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go) +- [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 + +## 使用限制 + +由于 REST 接口无状态所以 `use db` 语法不会生效,需要将 db 名称放到 SQL 语句中,如:`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`否则将报错`[0x217] Database not specified or available`。 + +也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`,此方法在 TDengine 2.4.0.5 版本的 taosAdapter 开始支持。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 + +完整示例如下: + +```go +package main + +import ( + "database/sql" + "fmt" + "time" + + _ "github.com/taosdata/driver-go/v2/taosRestful" +) + +func main() { + var taosDSN = "root:taosdata@http(localhost:6041)/test" + taos, err := sql.Open("taosRestful", taosDSN) + if err != nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } + defer taos.Close() + taos.Exec("create database if not exists test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + if err != nil { + fmt.Println("failed to insert, err:", err) + return + } + rows, err := taos.Query("select * from tb1") + if err != nil { + fmt.Println("failed to select from table, err:", err) + return + } + + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } +} +``` + +## 常见问题 + +1. database/sql 中 stmt(参数绑定)相关接口崩溃 + + REST 不支持参数绑定相关接口,建议使用`db.Exec`和`db.Query`。 + +2. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` + + 在 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 + +3. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` + + 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 + +4. 升级 `github.com/taosdata/driver-go/v2/taosRestful` + + 将 `go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 + +5. `readBufferSize` 参数调大后无明显效果 + + `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 + +6. `disableCompression` 参数设置为 `false` 时查询效率降低 + + 当 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 + +7. `go get` 命令无法获取包,或者获取包超时 + + 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`。 + +## 常用 API + +### database/sql API + +- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` + + 该 API 用来打开 DB,返回一个类型为 \*DB 的对象。 + + :::info + 该 API 成功创建的时候,并没有做权限等检查,只有在真正执行 Query 或者 Exec 的时候才能真正的去创建连接,并同时检查 user/password/host/port 是不是合法。 + ::: + +- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` + + `sql.Open` 内置的方法,用来执行非查询相关 SQL。 + +- `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` + + `sql.Open` 内置的方法,用来执行查询语句。 + +### 高级功能(af)API + +`af` 包封装了连接管理、订阅、schemaless、参数绑定等 TDengine 高级功能。 + +#### 连接管理 + +- `af.Open(host, user, pass, db string, port int) (*Connector, error)` + + 该 API 通过 cgo 创建与 taosd 的连接。 + +- `func (conn *Connector) Close() error` + + 关闭与 taosd 的连接。 + +#### 订阅 + +- `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` + + 订阅数据。 + +- `func (s *taosSubscriber) Consume() (driver.Rows, error)` + + 消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。 + +- `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` + + 取消订阅数据。 + +#### schemaless + +- `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` + + 写入 influxDB 行协议。 + +- `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` + + 写入 OpenTDSB telnet 协议数据。 + +- `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` + + 写入 OpenTSDB JSON 协议数据。 + +#### 参数绑定 + +- `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` + + 参数绑定单行插入。 + +- `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` + + 参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。 + +- `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` + + 初始化参数。 + +- `func (stmt *InsertStmt) Prepare(sql string) error` + + 参数绑定预处理 SQL 语句。 + +- `func (stmt *InsertStmt) SetTableName(name string) error` + + 参数绑定设置表名。 + +- `func (stmt *InsertStmt) SetSubTableName(name string) error` + + 参数绑定设置子表名。 + +- `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + + 参数绑定多行数据。 + +- `func (stmt *InsertStmt) AddBatch() error` + + 添加到参数绑定批处理。 + +- `func (stmt *InsertStmt) Execute() error` + + 执行参数绑定。 + +- `func (stmt *InsertStmt) GetAffectedRows() int` + + 获取参数绑定插入受影响行数。 + +- `func (stmt *InsertStmt) Close() error` + + 结束参数绑定。 + +## API 参考 + +全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v2) diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d1f8b2afc68bc02bb1bcabf8110a2dfd4be0eb2e --- /dev/null +++ b/docs/zh/14-reference/03-connector/java.mdx @@ -0,0 +1,849 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 2 +sidebar_label: Java +title: TDengine Java Connector +description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 + +![TDengine Database Connector Java](tdengine-jdbc-connector.webp) + +上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式: + +- JDBC 原生连接:Java 应用在物理节点 1(pnode1)上使用 TSDBDriver 直接调用客户端驱动(libtaos.so 或 taos.dll)的 API 将写入和查询请求发送到位于物理节点 2(pnode2)上的 taosd 实例。 +- JDBC REST 连接:Java 应用通过 RestfulDriver 将 SQL 封装成一个 REST 请求,发送给物理节点 2 的 REST 服务器(taosAdapter),通过 REST 服务器请求 taosd 并返回结果。 + +使用 REST 连接,不依赖 TDengine 客户端驱动,可以跨平台,更加方便灵活,但性能比原生连接器低约 30%。 + +:::info +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但 TDengine 与关系对象型数据库的使用场景和技术特征存在差异,所以`taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: + +- TDengine 目前不支持针对单条数据记录的删除操作。 +- 目前不支持事务操作。 + +::: + +## 支持的平台 + +原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 +REST 连接支持所有能运行 Java 的平台。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) | +| ----------------- | --------------------------------- | ---------------------------------- | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | +| JSON | - | java.lang.String | + +**注意**:JSON 类型仅在 tag 中支持。 + +## 安装步骤 + +### 安装前准备 + +使用 Java Connector 连接数据库前,需要具备以下条件: + +- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本 +- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +### 安装连接器 + + + + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。 + +- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [maven.aliyun](https://maven.aliyun.com/mvn/search) + +Maven 项目中,在 pom.xml 中添加以下依赖: + +```xml-dtd + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.** + +``` + + + + +可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector + +```shell +git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0 +cd taos-connector-jdbc +mvn clean install -Dmaven.test.skip=true +``` + +编译后,在 target 目录下会产生 taos-jdbcdriver-2.0.XX-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 + + + + +## 建立连接 + +TDengine 的 JDBC URL 规范格式为: +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +对于建立连接,原生连接与 REST 连接有细微不同。 + + + + +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 + +**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll)。 + +url 中的配置参数如下: + +- user:登录 TDengine 用户名,默认值 'root'。 +- password:用户登录密码,默认值 'taosdata'。 +- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 +- charset:客户端使用的字符集,默认值为系统字符集。 +- locale:客户端语言环境,默认值系统当前 locale。 +- timezone:客户端使用的时区,默认值为系统当前时区。 +- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。 +- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。 + +JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 + +**使用 TDengine 客户端驱动配置文件建立连接 ** + +当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示: + +1. 在 Java 应用中不指定 hostname 和 port + +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +2. 在配置文件中指定 firstEp 和 secondEp + +```shell +# first fully qualified domain name (FQDN) for TDengine system +firstEp cluster_node1:6030 + +# second fully qualified domain name (FQDN) for TDengine system, for cluster only +secondEp cluster_node2:6030 + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` + +以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。 + +TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。 + +> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。 + + + + +```java +Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 + +使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: + +1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; +2. jdbcUrl 以“jdbc:TAOS-RS://”开头; +3. 使用 6041 作为连接端口。 + +url 中的配置参数如下: + +- user:登录 TDengine 用户名,默认值 'root'。 +- password:用户登录密码,默认值 'taosdata'。 +- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 +- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 +- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 +- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。 +- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。 +- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。 +- useSSL: 连接中是否使用 SSL。 + +**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 + +:::note + +- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: + +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); +``` + +- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); + +::: + + + + +### 指定 URL 和 Properties 获取连接 + +除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数。 + +**注意**: + +- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。 +- 以下示例代码基于 taos-jdbcdriver-2.0.36。 + +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connProps.setProperty("debugFlag", "135"); + connProps.setProperty("maxSQLLength", "1048576"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} + +public Connection getRestConn() throws Exception{ + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030/6041,数据库名为 test 的连接。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区、是否开启批量拉取等信息。 + +properties 中的配置参数如下: + +- TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。 +- TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。 +- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 sq 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 +- TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 +- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 +- TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。 +- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。 +- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 5000。仅在 REST 连接时生效。 +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。 +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。 +- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。 + 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。 + +### 配置参数的优先级 + +通过前面三种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下: + +1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。 +2. Properties connProps +3. 使用原生连接时,TDengine 客户端驱动的配置文件 taos.cfg + +例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。 + +## 使用示例 + +### 创建数据库和表 + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` + +> **注意**:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 + +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` + +> now 为系统内部函数,默认为客户端所在计算机当前时间。 +> `now + 1s` 代表客户端当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒),s(秒),m(分),h(小时),d(天),w(周),n(月),y(年)。 + +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` + +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + +### 处理异常 + +在报错后,通过 SQLException 可以获取到错误的信息和错误码: + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间),原生连接方法的报错(错误码在 0x2351 到 0x2400 之间),TDengine 其他功能模块的报错。 + +具体的错误码请参考: + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) + +### 通过参数绑定写入数据 + +从 2.1.2.0 版本开始,TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 + +**注意**: + +- JDBC REST 连接目前不支持参数绑定 +- 以下示例代码基于 taos-jdbcdriver-2.0.36 +- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法 +- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽 + +```java +public class ParameterBindingDemo { + + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 30; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + conn.close(); + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_parabind"); + stmt.execute("create database if not exists test_parabind"); + stmt.execute("use test_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(3, random.nextLong()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setByte(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setShort(2, f2List); + + ArrayList f3List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f3List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(3, f3List); + + ArrayList f4List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f4List.add(random.nextLong()); + pstmt.setLong(4, f4List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute column + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(0, random.nextFloat()); + pstmt.setTagDouble(1, random.nextDouble()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextFloat()); + pstmt.setFloat(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(random.nextDouble()); + pstmt.setDouble(2, f2List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + // close if no try-with-catch statement is used + pstmt.close(); + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(0, random.nextBoolean()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextBoolean()); + pstmt.setBoolean(1, f1List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(0, new String("abc")); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(new String("abc")); + } + pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(0, "California.SanFrancisco"); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add("California.LosAngeles"); + } + pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } +} +``` + +用于设定 TAGS 取值的方法总共有: + +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +用于设定 VALUES 数据列的取值的方法总共有: + +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` + +### 无模式写入 + +从 2.2.0.0 版本开始,TDengine 增加了对无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](/reference/schemaless/)。 + +**注意**: + +- JDBC REST 连接目前不支持无模式写入 +- 以下示例代码基于 taos-jdbcdriver-2.0.36 + +```java +public class SchemalessInsertTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try (Connection connection = DriverManager.getConnection(url)) { + init(connection); + + SchemalessWriter writer = new SchemalessWriter(connection); + writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); + writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); + } + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.executeUpdate("drop database if exists test_schemaless"); + stmt.executeUpdate("create database if not exists test_schemaless"); + stmt.executeUpdate("use test_schemaless"); + } + } +} +``` + +### 订阅 + +TDengine Java 连接器支持订阅功能,应用 API 如下: + +#### 创建订阅 + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +`subscribe` 方法的三个参数含义如下: + +- topic:订阅的主题(即名称),此参数是订阅的唯一标识 +- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 +- restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + +如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 + +#### 订阅消费数据 + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 + +#### 关闭订阅 + +```java +sub.close(true); +``` + +`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` + +> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 + +### 与连接池使用 + +#### HikariCP + +使用示例如下: + +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` + +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 + +#### Druid + +使用示例如下: + +```java +public static void main(String[] args) throws Exception { + + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` + +> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 + +**注意事项:** + +- TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 + +如下所示,`select server_status()` 执行成功会返回 `1`。 + +```sql +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +### 更多示例程序 + +示例程序源码位于 `TDengine/examples/JDBC` 下: + +- JDBCDemo:JDBC 示例源程序。 +- JDBCConnectorChecker:JDBC 安装校验源程序及 jar 包。 +- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。 +- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。 +- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。 + +请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) + +## 最近更新记录 + +| taos-jdbcdriver 版本 | 主要变化 | +| :------------------: | :----------------------------: | +| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | +| 2.0.38 | JDBC REST 连接增加批量拉取功能 | +| 2.0.37 | 增加对 json tag 支持 | +| 2.0.36 | 增加对 schemaless 写入支持 | + +## 常见问题 + +1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升? + + **原因**:TDengine 的 JDBC 实现中,通过 `addBatch` 方法提交的 SQL 语句,会按照添加的顺序,依次执行,这种方式没有减少与服务端的交互次数,不会带来性能上的提升。 + + **解决方法**:1. 在一条 insert 语句中拼接多个 values 值;2. 使用多线程的方式并发插入;3. 使用参数绑定的写入方式 + +2. java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **原因**:程序没有找到依赖的本地函数库 taos。 + + **解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **原因**:目前 TDengine 只支持 64 位 JDK。 + + **解决方法**:重新安装 64 位 JDK。 + +4. 其它问题请参考 [FAQ](/train-faq/faq) + +## API 参考 + +[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) diff --git a/docs/zh/14-reference/03-connector/node.mdx b/docs/zh/14-reference/03-connector/node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d410dcfd96a12d6bbf4bcac705c60c30c94c7d0a --- /dev/null +++ b/docs/zh/14-reference/03-connector/node.mdx @@ -0,0 +1,252 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 6 +sidebar_label: Node.js +title: TDengine Node.js Connector +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import Preparition from "./_preparition.mdx"; +import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; + +`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 + +`td2.0-connector` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`td2.0-rest-connector` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。 + +Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node)。 + +## 支持的平台 + +原生连接器支持的平台和 TDengine 客户端驱动支持的平台一致。 +REST 连接器支持所有能运行 Node.js 的平台。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 支持的功能特性 + +### 原生连接器 + +1. 连接管理 +2. 普通查询 +3. 连续查询 +4. 参数绑定 +5. 订阅功能 +6. Schemaless + +### REST 连接器 + +1. 连接管理 +2. 普通查询 +3. 连续查询 + +## 安装步骤 + +### 安装前准备 + +- 安装 Node.js 开发环境 +- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 + + + + +- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) +- `td2.0-connector` 2.0.6 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;2.0.5 及更早版本支持 Node.js LTS v10.x 版本。其他版本可能存在包兼容性的问题 +- `make` +- C 语言编译器,[GCC](https://gcc.gnu.org) v4.8.5 或更高版本 + + + + +- 安装方法 1 + +使用微软的[ windows-build-tools ](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具。 + +- 安装方法 2 + +手动安装以下工具: + +- 安装 Visual Studio 相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` +- 进入`cmd`命令行界面,`npm config set msvs_version 2017` + +参考微软的 Node.js 用户手册[ Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)。 + +如果在 Windows 10 ARM 上使用 ARM64 Node.js,还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。 + + + + +### 使用 npm 安装 + + + + +```bash +npm install td2.0-connector +``` + + + + +```bash +npm i td2.0-rest-connector +``` + + + + +### 安装验证 + +在安装好 TDengine 客户端后,使用 nodejsChecker.js 程序能够验证当前环境是否支持 Node.js 方式访问 TDengine。 + +验证方法: + +- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/develop/examples/nodejs/nodejsChecker.js)到本地。 + +- 在命令行中执行以下命令。 + +```bash +npm init -y +npm install td2.0-connector +node nodejsChecker.js host=localhost +``` + +- 执行以上步骤后,在命令行会输出 nodejsChecker.js 连接 TDengine 实例,并执行简单插入和查询的结果。 + +## 建立连接 + +请选择使用一种连接器。 + + + + +安装并引用 `td2.0-connector` 包。 + +```javascript +//A cursor also needs to be initialized in order to interact with TDengine from Node.js. +const taos = require("td2.0-connector"); +var conn = taos.connect({ + host: "127.0.0.1", + user: "root", + password: "taosdata", + config: "/etc/taos", + port: 0, +}); +var cursor = conn.cursor(); // Initializing a new cursor + +//Close a connection +conn.close(); +``` + + + + +安装并引用 `td2.0-rest-connector` 包。 + +```javascript +//A cursor also needs to be initialized in order to interact with TDengine from Node.js. +import { options, connect } from "td2.0-rest-connector"; +options.path = "/rest/sqlt"; +// set host +options.host = "localhost"; +// set other options like user/passwd + +let conn = connect(options); +let cursor = conn.cursor(); +``` + + + + +## 使用示例 + +### 写入数据 + +#### SQL 写入 + + + +#### InfluxDB 行协议写入 + + + +#### OpenTSDB Telnet 行协议写入 + + + +#### OpenTSDB JSON 行协议写入 + + + +### 查询数据 + + + +## 更多示例程序 + +| 示例程序 | 示例程序描述 | +| ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- | +| [connection](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/cursorClose.js) | 建立连接的示例。 | +| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamBatchSample.js) | 绑定多行参数插入的示例。 | +| [stmtBind](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamSample.js) | 一行一行绑定参数插入的示例。 | +| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindSingleParamBatchSample.js) | 按列绑定参数插入的示例。 | +| [stmtUseResult](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtUseResultSample.js) | 绑定参数查询的示例。 | +| [json tag](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testJsonTag.js) | Json tag 的使用示例。 | +| [Nanosecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testNanoseconds.js) | 时间戳为纳秒精度的使用的示例。 | +| [Microsecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testMicroseconds.js) | 时间戳为微秒精度的使用的示例。 | +| [schemless insert](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSchemalessInsert.js) | schemless 插入的示例。 | +| [subscribe](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSubscribe.js) | 订阅的使用示例。 | +| [asyncQuery](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/tset.js) | 异步查询的使用示例。 | +| [REST](https://github.com/taosdata/taos-connector-node/blob/develop/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 | + +## 使用限制 + +Node.js 连接器 >= v2.0.6 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。 + +## 其他说明 + +Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)。 + +## 常见问题 + +1. 使用 REST 连接需要启动 taosadapter。 + + ```bash + sudo systemctl start taosadapter + ``` + +2. Node.js 版本 + + 连接器 >v2.0.6 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 + +3. "Unable to establish connection","Unable to resolve FQDN" + + 一般都是因为配置 FQDN 不正确。 可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html) 。 + +## 重要更新记录 + +### 原生连接器 + +| td2.0-connector 版本 | 说明 | +| -------------------- | ---------------------------------------------------------------- | +| 2.0.12 | 修复 cursor.close() 报错的 bug。 | +| 2.0.11 | 支持绑定参数、json tag、schemaless 接口等功能。 | +| 2.0.10 | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | + +### REST 连接器 + +| td2.0-rest-connector 版本 | 说明 | +| ------------------------- | ---------------------------------------------------------------- | +| 1.0.3 | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | + +## API 参考 + +[API 参考](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/zh/14-reference/03-connector/php.mdx b/docs/zh/14-reference/03-connector/php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d93a295627e543c70d0bfaf946c90ead93c491c7 --- /dev/null +++ b/docs/zh/14-reference/03-connector/php.mdx @@ -0,0 +1,150 @@ +--- +sidebar_position: 1 +sidebar_label: PHP +title: PHP Connector +--- + +`php-tdengine` 是由社区贡献的 PHP 连接器扩展,还特别支持了 Swoole 协程化。 + +PHP 连接器依赖 TDengine 客户端驱动。 + +项目地址: + +TDengine 服务端或客户端安装后,`taos.h` 位于: + +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` + +TDengine 客户端驱动的动态库位于: + +- Linux: `/usr/local/taos/driver/libtaos.so` +- Windows: `C:\TDengine\taos.dll` + +## 支持的平台 + +* Windows、Linux、MacOS + +* PHP >= 7.4 + +* TDengine >= 2.0 + +* Swoole >= 4.8 (可选) + +## 支持的版本 + +TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一对应的强对应关系,建议使用与 TDengine 服务端完全相同的客户端驱动。虽然低版本的客户端驱动在前三段版本号一致(即仅第四段版本号不同)的情况下也能够与高版本的服务端相兼容,但这并非推荐用法。强烈不建议使用高版本的客户端驱动访问低版本的服务端。 + +## 安装步骤 + +### 安装 TDengine 客户端驱动 + +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) + +### 编译安装 php-tdengine + +**下载代码并解压:** + +```shell +curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \ +&& mkdir php-tdengine \ +&& tar -xzf php-tdengine.tar.gz -C php-tdengine --strip-components=1 +``` + +> 版本 `v1.0.2` 可替换为任意更新的版本,可在 [TDengine PHP Connector 发布历史](https://github.com/Yurunsoft/php-tdengine/releases)。 + +**非 Swoole 环境:** + +```shell +phpize && ./configure && make -j && make install +``` + +**手动指定 tdengine 目录:** + +```shell +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +``` + +> `--with-tdengine-dir=` 后跟上 tdengine 目录。 +> 适用于默认找不到的情况,或者 MacOS 系统用户。 + +**Swoole 环境:** + +```shell +phpize && ./configure --enable-swoole && make -j && make install +``` + +**启用扩展:** + +方法一:在 `php.ini` 中加入 `extension=tdengine` + +方法二:运行带参数 `php -dextension=tdengine test.php` + +## 示例程序 + +本节展示了使用客户端驱动访问 TDengine 集群的常见访问方式的示例代码。 + +> 所有错误都会抛出异常: `TDengine\Exception\TDengineException` + +### 建立连接 + +
+建立连接 + +```c +{{#include docs/examples/php/connect.php}} +``` + +
+ +### 插入数据 + +
+插入数据 + +```c +{{#include docs/examples/php/insert.php}} +``` + +
+ +### 同步查询 + +
+同步查询 + +```c +{{#include docs/examples/php/query.php}} +``` + +
+ +### 参数绑定 + +
+参数绑定 + +```c +{{#include docs/examples/php/insert_stmt.php}} +``` + +
+ +## 常量 + +| 常量 | 说明 | +| ------------ | ------------ +| `TDengine\TSDB_DATA_TYPE_NULL` | null | +| `TDengine\TSDB_DATA_TYPE_BOOL` | bool | +| `TDengine\TSDB_DATA_TYPE_TINYINT` | tinyint | +| `TDengine\TSDB_DATA_TYPE_SMALLINT` | smallint | +| `TDengine\TSDB_DATA_TYPE_INT` | int | +| `TDengine\TSDB_DATA_TYPE_BIGINT` | bigint | +| `TDengine\TSDB_DATA_TYPE_FLOAT` | float | +| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double | +| `TDengine\TSDB_DATA_TYPE_BINARY` | binary | +| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp | +| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar | +| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint | +| `TDengine\TSDB_DATA_TYPE_USMALLINT` | usmallint | +| `TDengine\TSDB_DATA_TYPE_UINT` | uint | +| `TDengine\TSDB_DATA_TYPE_UBIGINT` | ubigint | diff --git a/docs/zh/14-reference/03-connector/python.mdx b/docs/zh/14-reference/03-connector/python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5e2af7d516456e9e7f034ea012da003471776c70 --- /dev/null +++ b/docs/zh/14-reference/03-connector/python.mdx @@ -0,0 +1,362 @@ +--- +sidebar_position: 3 +sidebar_label: Python +title: TDengine Python Connector +description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 +除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。 + +使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。 + +Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。 + +## 支持的平台 + +- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。 +- REST 连接支持所有能运行 Python 的平台。 + +## 版本选择 + +无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。 + +## 支持的功能 + +- 原生连接支持 TDeingine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入(schemaless)。 +- REST 连接支持的功能包括:连接管理、执行 SQL。 (通过执行 SQL 可以: 管理数据库、管理表和超级表、写入数据、查询数据、创建连续查询等)。 + +## 安装 + +### 准备 + +1. 安装 Python。建议使用 Python >= 3.6。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 +2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip docuemntation](https://pip.pypa.io/en/stable/installation/) 安装。 +3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 + +### 使用 pip 安装 + +#### 卸载旧版本 + +如果以前安装过旧版本的 Python 连接器, 请提前卸载。 + +``` +pip3 uninstall taos taospy +``` + +:::note +较早的 TDengine 客户端软件包含了 Python 连接器。如果从客户端软件的安装目录安装了 Python 连接器,那么对应的 Python 包名是 `taos`。 所以上述卸载命令包含了 `taos`, 不存在也没关系。 + +::: + +#### 安装 `taospy` + + + + +安装最新版本 + +``` +pip3 install taospy +``` + +也可以指定某个特定版本安装。 + +``` +pip3 install taospy==2.3.0 +``` + + + + +``` +pip3 install git+https://github.com/taosdata/taos-connector-python.git +``` + + + + +### 安装验证 + + + + +对于原生连接,需要验证客户端驱动和 Python 连接器本身是否都正确安装。如果能成功导入 `taos` 模块,则说明已经正确安装了客户端驱动和 Python 连接器。可在 Python 交互式 Shell 中输入: + +```python +import taos +``` + + + + +对于 REST 连接,只需验证是否能成功导入 `taosrest` 模块。可在 Python 交互式 Shell 中输入: + +```python +import taosrest +``` + + + + +:::tip +如果系统上有多个版本的 Python,则可能有多个 `pip` 命令。要确保使用的 `pip` 命令路径是正确的。上面我们用 `pip3` 命令安装,排除了使用 Python 2.x 版本对应的 `pip` 的可能性。但是如果系统上有多个 Python 3.x 版本,仍需检查安装路径是否正确。最简单的验证方式是,在命令再次输入 `pip3 install taospy`, 就会打印出 `taospy` 的具体安装位置,比如在 Windows 上: + +``` +C:\> pip3 install taospy +Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple +Requirement already satisfied: taospy in c:\users\username\appdata\local\programs\python\python310\lib\site-packages (2.3.0) +``` + +::: + +## 建立连接 + +### 连通性测试 + +在用连接器建立连接之前,建议先测试本地 TDengine CLI 到 TDengine 集群的连通性。 + + + + +请确保 TDengine 集群已经启动, 且集群中机器的 FQDN (如果启动的是单机版,FQDN 默认为 hostname)在本机能够解析, 可用 `ping` 命令进行测试: + +``` +ping +``` + +然后测试用 TDengine CLI 能否正常连接集群: + +``` +taos -h -p +``` + +上面的 FQDN 可以为集群中任意一个 dnode 的 FQDN, PORT 为这个 dnode 对应的 serverPort。 + + + + +对于 REST 连接, 除了确保集群已经启动,还要确保 taosAdapter 组件已经启动。可以使用如下 curl 命令测试: + +``` +curl -u root:taosdata http://:/rest/sql -d "select server_version()" +``` + +上面的 FQDN 为运行 taosAdapter 的机器的 FQDN, PORT 为 taosAdapter 配置的监听端口, 默认为 6041。 +如果测试成功,会输出服务器版本信息,比如: + +```json +{ + "status": "succ", + "head": ["server_version()"], + "column_meta": [["server_version()", 8, 8]], + "data": [["2.4.0.16"]], + "rows": 1 +} +``` + + + + +### 使用连接器建立连接 + +以下示例代码假设 TDengine 安装在本机, 且 FQDN 和 serverPort 都使用了默认配置。 + + + + +```python +{{#include docs/examples/python/connect_native_reference.py}} +``` + +`connect` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明: + +- `host` : 要连接的节点的 FQDN。 没有默认值。如果不同提供此参数,则会连接客户端配置文件中的 firstEP。 +- `user` :TDengine 用户名。 默认值是 root。 +- `password` : TDengine 用户密码。 默认值是 taosdata。 +- `port` : 要连接的数据节点的起始端口,即 serverPort 配置。默认值是 6030。只有在提供了 host 参数的时候,这个参数才生效。 +- `config` : 客户端配置文件路径。 在 Windows 系统上默认是 `C:\TDengine\cfg`。 在 Linux 系统上默认是 `/etc/taos/`。 +- `timezone` : 查询结果中 TIMESTAMP 类型的数据,转换为 python 的 datetime 对象时使用的时区。默认为本地时区。 + +:::warning +`config` 和 `timezone` 都是进程级别的配置。建议一个进程建立的所有连接都使用相同的参数值。否则可能产生无法预知的错误。 +::: + +:::tip +`connect` 函数返回 `taos.TaosConnection` 实例。 在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。 + +::: + + + + +```python +{{#include docs/examples/python/connect_rest_examples.py:connect}} +``` + +`connect()` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明: + +- `url`: taosAdapter REST 服务的 URL。默认是 。 +- `user`: TDenigne 用户名。默认是 root。 +- `password`: TDeingine 用户密码。默认是 taosdata。 +- `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。 + + + + +## 示例程序 + +### 基本使用 + + + + +##### TaosConnection 类的使用 + +`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。 + +```python title="execute 方法" +{{#include docs/examples/python/connection_usage_native_reference.py:insert}} +``` + +```python title="query 方法" +{{#include docs/examples/python/connection_usage_native_reference.py:query}} +``` + +:::tip +查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。 +::: + +##### TaosResult 类的使用 + +上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。 + +```python title="blocks_iter 方法" +{{#include docs/examples/python/result_set_examples.py}} +``` +##### TaosCursor 类的使用 + +`TaosConnection` 类和 `TaosResult` 类已经实现了原生接口的所有功能。如果你对 PEP249 规范中的接口比较熟悉也可以使用 `TaosCursor` 类提供的方法。 + +```python title="TaosCursor 的使用" +{{#include docs/examples/python/cursor_usage_native_reference.py}} +``` + +:::note +TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。 + +::: + + + + +##### TaosRestCursor 类的使用 + +`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 + +```python title="TaosRestCursor 的使用" +{{#include docs/examples/python/connect_rest_examples.py:basic}} +``` +- `cursor.execute` : 用来执行任意 SQL 语句。 +- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 +- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 + +##### RestClient 类的使用 + +`RestClient` 类是对于 [REST API](../../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 + +```python title="RestClient 的使用" +{{#include docs/examples/python/rest_client_example.py}} +``` + +对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 + + + + + + +### 与 pandas 一起使用 + + + + +```python +{{#include docs/examples/python/conn_native_pandas.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_rest_pandas.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_native_sqlalchemy.py}} +``` + + + + +```python +{{#include docs/examples/python/conn_rest_sqlalchemy.py}} +``` + + + + +### 其它示例程序 + +| 示例程序链接 | 示例程序内容 | +| ------------------------------------------------------------------------------------------------------------- | ----------------------- | +| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | 参数绑定, 一次绑定多行 | +| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 | +| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 | +| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 | +| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 | +| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 | + +## 其它说明 + +### 异常处理 + +所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如: + +```python +{{#include docs/examples/python/handle_exception.py}} +``` + +### 关于纳秒 (nanosecond) + +由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。 + +1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds +2. https://www.python.org/dev/peps/pep-0564/ + + +## 常见问题 + +欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。 + +## 重要更新 + +| 连接器版本 | 重要更新 | 发布日期 | +| ---------- | --------------------------------------------------------------------------------- | ---------- | +| 2.3.1 | 1. support TDengine REST API
2. remove support for Python version below 3.6 | 2022-04-28 | +| 2.2.5 | support timezone option when connect | 2022-04-13 | +| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | + + +[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases) + +## API 参考 + +- [taos](https://docs.taosdata.com/api/taospy/taos/) +- [taosrest](https://docs.taosdata.com/api/taospy/taosrest) diff --git a/docs/zh/14-reference/03-connector/rust.mdx b/docs/zh/14-reference/03-connector/rust.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fa6d496fa578799237648c1a6d314c1783e5c627 --- /dev/null +++ b/docs/zh/14-reference/03-connector/rust.mdx @@ -0,0 +1,388 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 5 +sidebar_label: Rust +title: TDengine Rust Connector +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +import Preparition from "./_preparition.mdx" +import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" +import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" +import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" +import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" +import RustQuery from "../../07-develop/04-query-data/_rust.mdx" + +[![Crates.io](https://img.shields.io/crates/v/libtaos)](https://crates.io/crates/libtaos) ![Crates.io](https://img.shields.io/crates/d/libtaos) [![docs.rs](https://img.shields.io/docsrs/libtaos)](https://docs.rs/libtaos) + +`libtaos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。 + +`libtaos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。另外一种是 **REST 连接**,它通过 taosAdapter 的 REST 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 features)” 来指定使用哪种连接器。REST 连接支持任何平台,但原生连接支持所有 TDengine 客户端能运行的平台。 + +`libtaos` 的源码托管在 [GitHub](https://github.com/taosdata/libtaos-rs)。 + +## 支持的平台 + +原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 +REST 连接支持所有能运行 Rust 的平台。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 2.4 版本以上的 TDengine,以避免已知问题。 + +## 安装 + +### 安装前准备 +* 安装 Rust 开发工具链 +* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +### 添加 libtaos 依赖 + +根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [libtaos][libtaos] 依赖: + + + + +在 `Cargo.toml` 文件中添加 [libtaos][libtaos]: + +```toml +[dependencies] +# use default feature +libtaos = "*" +``` + + + + +在 `Cargo.toml` 文件中添加 [libtaos][libtaos],并启用 `rest` 特性。 + +```toml +[dependencies] +# use rest feature +libtaos = { version = "*", features = ["rest"]} +``` + + + + + +### 使用连接池 + +请在 `Cargo.toml` 中启用 `r2d2` 特性。 + +```toml +[dependencies] +# with taosc +libtaos = { version = "*", features = ["r2d2"] } +# or rest +libtaos = { version = "*", features = ["rest", "r2d2"] } +``` + +## 建立连接 + +[TaosCfgBuilder] 为使用者提供构造器形式的 API,以便于后续创建连接或使用连接池。 + +```rust +let cfg: TaosCfg = TaosCfgBuilder::default() + .ip("127.0.0.1") + .user("root") + .pass("taosdata") + .db("log") // do not set if not require a default database. + .port(6030u16) + .build() + .expect("TaosCfg builder error"); +} +``` + +现在您可以使用该对象创建连接: + +```rust +let conn = cfg.connect()?; +``` + +连接对象可以创建多个: + +```rust +let conn = cfg.connect()?; +let conn2 = cfg.connect()?; +``` + +可以在应用中使用连接池: + +```rust +let pool = r2d2::Pool::builder() + .max_size(10000) // max connections + .build(cfg)?; + +// ... +// Use pool to get connection +let conn = pool.get()?; +``` + +之后您可以对数据库进行相关操作: + +```rust +async fn demo() -> Result<(), Error> { + // get connection ... + + // create database + conn.exec("create database if not exists demo").await?; + // change database context + conn.exec("use demo").await?; + // create table + conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?; + // insert + conn.exec("insert into tb1 values(now, 1)").await?; + // query + let rows = conn.query("select * from tb1").await?; + for row in rows.rows { + println!("{}", row.into_iter().join(",")); + } +} +``` + +## 使用示例 + +### 写入数据 + +#### SQL 写入 + + + +#### InfluxDB 行协议写入 + + + +#### OpenTSDB Telnet 行协议写入 + + + +#### OpenTSDB JSON 行协议写入 + + + +### 查询数据 + + + +### 更多示例程序 + +| 程序路径 | 程序说明 | +| -------------- | ----------------------------------------------------------------------------- | +| [demo.rs] | 基本API 使用示例 | +| [bailongma-rs] | 使用 TDengine 作为存储后端的 Prometheus 远程存储 API 适配器,使用 r2d2 连接池 | + +## API 参考 + +### 连接构造器 API + +[Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) 构造器模式是 Rust 处理复杂数据类型或可选配置类型的解决方案。[libtaos] 实现中,使用连接构造器 [TaosCfgBuilder] 作为 TDengine Rust 连接器的入口。[TaosCfgBuilder] 提供对服务器、端口、数据库、用户名和密码等的可选配置。 + +使用 `default()` 方法可以构建一个默认参数的 [TaosCfg],用于后续连接数据库或建立连接池。 + +```rust +let cfg = TaosCfgBuilder::default().build()?; +``` + +使用构造器模式,用户可按需设置: + +```rust +let cfg = TaosCfgBuilder::default() + .ip("127.0.0.1") + .user("root") + .pass("taosdata") + .db("log") + .port(6030u16) + .build()?; +``` + +使用 [TaosCfg] 对象创建 TDengine 连接: + +```rust +let conn: Taos = cfg.connect(); +``` + +### 连接池 + +在复杂应用中,建议启用连接池。[libtaos] 的连接池使用 [r2d2] 实现。 + +如下,可以生成一个默认参数的连接池。 + +```rust +let pool = r2d2::Pool::new(cfg)?; +``` + +同样可以使用连接池的构造器,对连接池参数进行设置: + +```rust + use std::time::Duration; + let pool = r2d2::Pool::builder() + .max_size(5000) // max connections + .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection + .min_idle(Some(1000)) // minimal idle connections + .connection_timeout(Duration::from_minutes(2)) + .build(cfg); +``` + +在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。 + +```rust +let taos = pool.get()?; +``` + +### 连接 + +[Taos] 结构体是 [libtaos] 中的连接管理者,主要提供了两个 API: + +1. `exec`: 执行某个非查询类 SQL 语句,例如 `CREATE`,`ALTER`,`INSERT` 等。 + + ```rust + taos.exec().await?; + ``` + +2. `query`:执行查询语句,返回 [TaosQueryData] 对象。 + + ```rust + let q = taos.query("select * from log.logs").await?; + ``` + + [TaosQueryData] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度): + + 列信息使用 [ColumnMeta] 存储: + + ```rust + let cols = &q.column_meta; + for col in cols { + println!("name: {}, type: {:?}, bytes: {}", col.name, col.type_, col.bytes); + } + ``` + + 逐行获取数据: + + ```rust + for (i, row) in q.rows.iter().enumerate() { + for (j, cell) in row.iter().enumerate() { + println!("cell({}, {}) data: {}", i, j, cell); + } + } + ``` + +需要注意的是,需要使用 Rust 异步函数和异步运行时。 + +[Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率: + +- `.describe(table: &str)`: 执行 `DESCRIBE` 并返回一个 Rust 数据结构。 +- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。 +- `.use_database(database: &str)`: 执行 `USE` 语句。 + +除此之外,该结构也是 [参数绑定](#参数绑定接口) 和 [行协议接口](#行协议接口) 的入口,使用方法请参考具体的 API 说明。 + +### 参数绑定接口 + +与 C 接口类似,Rust 提供参数绑定接口。首先,通过 [Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]: + +```rust +let mut stmt: Stmt = taos.stmt("insert into ? values(?,?)")?; +``` + +参数绑定对象提供了一组接口用于实现参数绑定: + +##### `.set_tbname(tbname: impl ToCString)` + +用于绑定表名。 + +##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)` + +当 SQL 语句使用超级表时,用于绑定子表表名和标签值: + +```rust +let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(?,?)")?; +// tags can be created with any supported type, here is an example using JSON +let v = Field::Json(serde_json::from_str("{\"tag1\":\"一二三四五六七八九十\"}").unwrap()); +stmt.set_tbname_tags("tb0", [&tag])?; +``` + +##### `.bind(params: impl IntoParams)` + +用于绑定值类型。使用 [Field] 结构体构建需要的类型并绑定: + +```rust +let ts = Field::Timestamp(Timestamp::now()); +let value = Field::Float(0.0); +stmt.bind(vec![ts, value].iter())?; +``` + +##### `.execute()` + +执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。 + +```rust +stmt.execute()?; + +// next bind cycle. +//stmt.set_tbname()?; +//stmt.bind()?; +//stmt.execute()?; +``` + +### 行协议接口 + +行协议接口支持多种模式和不同精度,需要引入 schemaless 模块中的常量以进行设置: + +```rust +use libtaos::*; +use libtaos::schemaless::*; +``` + +- InfluxDB 行协议 + + ```rust + let lines = [ + "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000" + "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000" + ]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)?; + ``` + +- OpenTSDB Telnet 协议 + + ```rust + let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?; + ``` + +- OpenTSDB JSON 协议 + + ```rust + let lines = [r#" + { + "metric": "st", + "timestamp": 1626006833, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + }"#]; + taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?; + ``` + +其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。 + +[libtaos]: https://github.com/taosdata/libtaos-rs +[tdengine]: https://github.com/taosdata/TDengine +[bailongma-rs]: https://github.com/taosdata/bailongma-rs +[r2d2]: https://crates.io/crates/r2d2 +[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs +[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html +[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html +[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html +[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html +[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html +[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp similarity index 100% rename from docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp rename to docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md new file mode 100644 index 0000000000000000000000000000000000000000..7888bbee7eac0b1f6e223d05637203aae314be8f --- /dev/null +++ b/docs/zh/14-reference/04-taosadapter.md @@ -0,0 +1,338 @@ +--- +title: "taosAdapter" +description: "taosAdapter 是一个 TDengine 的配套工具,是 TDengine 集群和应用程序之间的桥梁和适配器。它提供了一种易于使用和高效的方式来直接从数据收集代理软件(如 Telegraf、StatsD、collectd 等)摄取数据。它还提供了 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine" +sidebar_label: "taosAdapter" +--- + +import Prometheus from "./_prometheus.mdx" +import CollectD from "./_collectd.mdx" +import StatsD from "./_statsd.mdx" +import Icinga2 from "./_icinga2.mdx" +import TCollector from "./_tcollector.mdx" + +taosAdapter 是一个 TDengine 的配套工具,是 TDengine 集群和应用程序之间的桥梁和适配器。它提供了一种易于使用和高效的方式来直接从数据收集代理软件(如 Telegraf、StatsD、collectd 等)摄取数据。它还提供了 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。 + +taosAdapter 提供以下功能: + +- RESTful 接口 +- 兼容 InfluxDB v1 写接口 +- 兼容 OpenTSDB JSON 和 telnet 格式写入 +- 无缝连接到 Telegraf +- 无缝连接到 collectd +- 无缝连接到 StatsD +- 支持 Prometheus remote_read 和 remote_write + +## taosAdapter 架构图 + +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) + +## taosAdapter 部署方法 + +### 安装 taosAdapter + +taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/develop/BUILD-CN.md)文档。 + +### start/stop taosAdapter + +在 Linux 系统上 taosAdapter 服务默认由 systemd 管理。使用命令 `systemctl start taosadapter` 可以启动 taosAdapter 服务。使用命令 `systemctl stop taosadapter` 可以停止 taosAdapter 服务。 + +### 移除 taosAdapter + +使用命令 rmtaos 可以移除包括 taosAdapter 在内的 TDengine server 软件。 + +### 升级 taosAdapter + +taosAdapter 和 TDengine server 需要使用相同版本。请通过升级 TDengine server 来升级 taosAdapter。 +与 taosd 分离部署的 taosAdapter 必须通过升级其所在服务器的 TDengine server 才能得到升级。 + +## taosAdapter 参数列表 + +taosAdapter 支持通过命令行参数、环境变量和配置文件来进行配置。默认配置文件是 /etc/taos/taosadapter.toml。 + +命令行参数优先于环境变量优先于配置文件,命令行用法是 arg=val,如 taosadapter -p=30000 --debug=true,详细列表如下: + +```shell +Usage of taosAdapter: + --collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd") + --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) + --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") + --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) + --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") + --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) + -c, --config string config path default /etc/taos/taosadapter.toml + --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) + --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" + --cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS" + --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" + --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" + --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" + --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" + --help Print this help message and exit + --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") + --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) + --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") + --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") + --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) + --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" + --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" + --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") + --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) + --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") + --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) + --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" + --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" + --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") + --node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE" + --node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s) + --node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING" + --node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD" + --node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME" + --node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true) + --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" + --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") + --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) + --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) + --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") + --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) + --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) + --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") + --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) + --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" + --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") + --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) + --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) + --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) + -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) + --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) + --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) + --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" + --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" + --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" + --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) + --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") + --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) + --statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true) + --statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true) + --statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true) + --statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true) + --statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s) + --statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250) + --statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata") + --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) + --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") + --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" + --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") + --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) + --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" + --version Print the version and exit +``` + +备注: +使用浏览器进行接口调用请根据实际情况设置如下跨源资源共享(CORS)参数: + +```text +AllowAllOrigins +AllowOrigins +AllowHeaders +ExposeHeaders +AllowCredentials +AllowWebSockets +``` + +如果不通过浏览器进行接口调用无需关心这几项配置。 + +关于 CORS 协议细节请参考:[https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) 或 [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS)。 + +示例配置文件参见 [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml)。 + +## 功能列表 + +- 与 RESTful 接口兼容 + [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- 兼容 InfluxDB v1 写接口 + [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) +- 兼容 OpenTSDB JSON 和 telnet 格式写入 + - + - +- 与 collectd 无缝连接 + collectd 是一个系统统计收集守护程序,请访问 [https://collectd.org/](https://collectd.org/) 了解更多信息。 +- Seamless connection with StatsD + StatsD 是一个简单而强大的统计信息汇总的守护程序。请访问 [https://github.com/statsd/statsd](https://github.com/statsd/statsd) 了解更多信息。 +- 与 icinga2 的无缝连接 + icinga2 是一个收集检查结果指标和性能数据的软件。请访问 [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) 了解更多信息。 +- 与 tcollector 无缝连接 + TCollector 是一个客户端进程,从本地收集器收集数据,并将数据推送到 OpenTSDB。请访问 [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) 了解更多信息。 +- 无缝连接 node_exporter + node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。 +- 支持 Prometheus remote_read 和 remote_write + remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。 + +## 接口 + +### TDengine RESTful 接口 + +您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../connector#restful)。支持如下 EndPoint : + +```text +/rest/sql +/rest/sqlt +/rest/sqlutc +``` + +### InfluxDB + +您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://:6041/` 来写入 InfluxDB 兼容格式的数据到 TDengine。EndPoint 如下: + +```text +/influxdb/v1/write +``` + +支持 InfluxDB 查询参数如下: + +- `db` 指定 TDengine 使用的数据库名 +- `precision` TDengine 使用的时间精度 +- `u` TDengine 用户名 +- `p` TDengine 密码 + +注意: 目前不支持 InfluxDB 的 token 验证方式只支持 Basic 验证和查询参数验证。 + +### OpenTSDB + +您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://:6041/` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下: + +```text +/opentsdb/v1/put/json/ +/opentsdb/v1/put/telnet/ +``` + +### collectd + + + +### StatsD + + + +### icinga2 OpenTSDB writer + + + +### TCollector + + + +### node_exporter + +Prometheus 使用的由\*NIX 内核暴露的硬件和操作系统指标的输出器 + +- 启用 taosAdapter 的配置 node_exporter.enable +- 设置 node_exporter 的相关配置 +- 重新启动 taosAdapter + +### prometheus + + + +## 内存使用优化方法 + +taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。 + +- pauseQueryMemoryThreshold +- pauseAllMemoryThreshold + +当超过 pauseQueryMemoryThreshold 阈值时时停止处理查询请求。 + +http 返回内容: + +- code 503 +- body "query memory exceeds threshold" + +当超过 pauseAllMemoryThreshold 阈值时停止处理所有写入和查询请求。 + +http 返回内容: + +- code 503 +- body "memory exceeds threshold" + +当内存回落到阈值之下时恢复对应功能。 + +状态检查接口 `http://:6041/-/ping` + +- 正常返回 `code 200` +- 无参数 如果内存超过 pauseAllMemoryThreshold 将返回 `code 503` +- 请求参数 `action=query` 如果内存超过 pauseQueryMemoryThreshold 或 pauseAllMemoryThreshold 将返回 `code 503` + +对应配置参数 + +```text + monitor.collectDuration 监测间隔 环境变量 "TAOS_MONITOR_COLLECT_DURATION" (默认值 3s) + monitor.incgroup 是否是cgroup中运行(容器中运行设置为 true) 环境变量 "TAOS_MONITOR_INCGROUP" + monitor.pauseAllMemoryThreshold 不再进行插入和查询的内存阈值 环境变量 "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (默认值 80) + monitor.pauseQueryMemoryThreshold 不再进行查询的内存阈值 环境变量 "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (默认值 70) +``` + +您可以根据具体项目应用场景和运营策略进行相应调整,并建议使用运营监控软件及时进行系统内存状态监控。负载均衡器也可以通过这个接口检查 taosAdapter 运行状态。 + +## taosAdapter 监控指标 + +taosAdapter 采集 http 相关指标、cpu 百分比和内存百分比。 + +### http 接口 + +提供符合 [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md) 接口: + +```text +http://:6041/metrics +``` + +### 写入 TDengine + +taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengine。 + +有关配置参数 + +| **配置项** | **描述** | **默认值** | +| ----------------------- | --------------------------------------------------------- | ---------- | +| monitor.collectDuration | cpu 和内存采集间隔 | 3s | +| monitor.identity | 当前 taosadapter 的标识符如果不设置将使用 'hostname:port' | | +| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false | +| monitor.writeToTD | 是否写入到 TDengine | true | +| monitor.user | TDengine 连接用户名 | root | +| monitor.password | TDengine 连接密码 | taosdata | +| monitor.writeInterval | 写入 TDengine 间隔 | 30s | + +## 结果返回条数限制 + +taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 代表无限制,默认无限制。 + +该参数控制以下接口返回 + +- `http://:6041/rest/sql` +- `http://:6041/rest/sqlt` +- `http://:6041/rest/sqlutc` +- `http://:6041/prometheus/v1/remote_read/:db` + +## 故障解决 + +您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。 + +您也可以通过设置 --logLevel 参数或者环境变量 TAOS_ADAPTER_LOG_LEVEL 来调节 taosAdapter 日志输出详细程度。有效值包括: panic、fatal、error、warn、warning、info、debug 以及 trace。 + +## 如何从旧版本 TDengine 迁移到 taosAdapter + +在 TDengine server 2.2.x.x 或更早期版本中,taosd 进程包含一个内嵌的 http 服务。如前面所述,taosAdapter 是一个使用 systemd 管理的独立软件,拥有自己的进程。并且两者有一些配置参数和行为是不同的,请见下表: + +| **#** | **embedded httpd** | **taosAdapter** | **comment** | +| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | +| 1 | httpEnableRecordSql | --logLevel=debug | | +| 2 | httpMaxThreads | n/a | taosAdapter 自动管理线程池,无需此参数 | +| 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 | | +| 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 | +| 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 | +| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | diff --git a/docs-cn/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md similarity index 100% rename from docs-cn/14-reference/05-taosbenchmark.md rename to docs/zh/14-reference/05-taosbenchmark.md diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md new file mode 100644 index 0000000000000000000000000000000000000000..625499a94926ac3f86e4d34976c70a0bfe7b0954 --- /dev/null +++ b/docs/zh/14-reference/06-taosdump.md @@ -0,0 +1,123 @@ +--- +title: taosdump +description: "taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序" +--- + +## 简介 + +taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 + +taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 +表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 +不指定位置,taosdump 默认会将数据备份到当前目录。 + +如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 +如果看到相关提示,请小心操作。 + +taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 +硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 +[ Apache AVRO ](https://avro.apache.org/)作为数据文件格式来存储备份数据。 + +## 安装 + +taosdump 有两种安装方式: + +- 安装 taosTools 官方安装包, 请从[所有下载链接](https://www.taosdata.com/all-downloads)页面找到 taosTools 并下载安装。 + +- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 + +## 常用使用场景 + +### taosdump 备份数据 + +1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数; +2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数; +3. 备份指定数据库中的某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; +4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。 +5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](/taos-sql/escape)。 + +:::tip +- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 +- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 +- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。 +- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。 + +::: + +### taosdump 恢复数据 + +恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 + +:::tip +taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。 + +::: + +## 详细命令行参数列表 + +以下为 taosdump 详细命令行参数列表: + +``` +Usage: taosdump [OPTION...] dbname [tbname ...] + or: taosdump [OPTION...] --databases db1,db2,... + or: taosdump [OPTION...] --all-databases + or: taosdump [OPTION...] -i inpath + or: taosdump [OPTION...] -o outpath + + -h, --host=HOST Server host dumping data from. Default is + localhost. + -p, --password User password to connect to server. Default is + taosdata. + -P, --port=PORT Port to connect + -u, --user=USER User name used to connect to server. Default is + root. + -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos + -i, --inpath=INPATH Input file path. + -o, --outpath=OUTPATH Output file path. + -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. + -a, --allow-sys Allow to dump system database + -A, --all-databases Dump all databases. + -D, --databases=DATABASES Dump inputted databases. Use comma to separate + databases' name. + -N, --without-property Dump database without its properties. + -s, --schemaonly Only dump tables' schema. + -y, --answer-yes Input yes for prompt. It will skip data file + checking! + -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, + and lzma. + -S, --start-time=START_TIME Start time to dump. Either epoch or + ISO8601/RFC3339 format is acceptable. ISO8601 + format example: 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00:000+0800 or '2017-10-01 + 00:00:00.000+0800' + -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 + format is acceptable. ISO8601 format example: + 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00.000+0800 or '2017-10-01 + 00:00:00.000+0800' + -B, --data-batch=DATA_BATCH Number of data per query/insert statement when + backup/restore. Default value is 16384. If you see + 'error actual dump .. batch ..' when backup or if + you see 'WAL size exceeds limit' error when + restore, please adjust the value to a smaller one + and try. The workable value is related to the + length of the row and type of table schema. + -I, --inspect inspect avro file content and print on screen + -L, --loose-mode Using loose mode if the table name and column name + use letter and number only. Default is NOT. + -n, --no-escape No escape char '`'. Default is using it. + -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is + 8. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -t, --timeout=SECONDS The timeout seconds for websocket to interact. + -g, --debug Print debug info. + -?, --help Give this help list + --usage Give a short usage message + -V, --version Print program version + +Mandatory or optional arguments to long options are also mandatory or optional +for any corresponding short options. + +Report bugs to . +``` diff --git a/docs-cn/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs/zh/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json rename to docs/zh/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json diff --git a/docs-en/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json b/docs/zh/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json rename to docs/zh/14-reference/07-tdinsight/assets/15155-tdengine-alert-demo.json diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-full.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp rename to docs/zh/14-reference/07-tdinsight/assets/TDinsight-full.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs/zh/14-reference/07-tdinsight/assets/alert-manager-status.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp rename to docs/zh/14-reference/07-tdinsight/assets/alert-manager-status.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs/zh/14-reference/07-tdinsight/assets/alert-notification-channel.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp rename to docs/zh/14-reference/07-tdinsight/assets/alert-notification-channel.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs/zh/14-reference/07-tdinsight/assets/alert-query-demo.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp rename to docs/zh/14-reference/07-tdinsight/assets/alert-query-demo.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs/zh/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp rename to docs/zh/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs/zh/14-reference/07-tdinsight/assets/alert-rule-test.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp rename to docs/zh/14-reference/07-tdinsight/assets/alert-rule-test.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-dashboard-display.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-dashboard-display.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-import-dashboard.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp rename to docs/zh/14-reference/07-tdinsight/assets/howto-import-dashboard.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs/zh/14-reference/07-tdinsight/assets/import-dashboard-15167.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp rename to docs/zh/14-reference/07-tdinsight/assets/import-dashboard-15167.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs/zh/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp rename to docs/zh/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs/zh/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp rename to docs/zh/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp rename to docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs/zh/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json similarity index 100% rename from docs-cn/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json rename to docs/zh/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine-grafana.json b/docs/zh/14-reference/07-tdinsight/assets/tdengine-grafana.json similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/tdengine-grafana.json rename to docs/zh/14-reference/07-tdinsight/assets/tdengine-grafana.json diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs/zh/14-reference/07-tdinsight/assets/tdengine_dashboard.webp similarity index 100% rename from docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp rename to docs/zh/14-reference/07-tdinsight/assets/tdengine_dashboard.webp diff --git a/docs-cn/14-reference/07-tdinsight/index.md b/docs/zh/14-reference/07-tdinsight/index.md similarity index 100% rename from docs-cn/14-reference/07-tdinsight/index.md rename to docs/zh/14-reference/07-tdinsight/index.md diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md new file mode 100644 index 0000000000000000000000000000000000000000..dd91cbdff70ace25a6a09e2867b0d15f8979d103 --- /dev/null +++ b/docs/zh/14-reference/08-taos-shell.md @@ -0,0 +1,88 @@ +--- +title: TDengine 命令行(CLI) +sidebar_label: TDengine CLI +description: TDengine CLI 的使用说明和技巧 +--- + +TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 + +## 安装 + +如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [安装客户端驱动](../connector/#安装客户端驱动)。 + +## 执行 + +要进入 TDengine CLI,您只要在 Linux 终端或 Windows 终端执行 `taos` 即可。 + +```bash +taos +``` + +如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](../../train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下: + +```cmd +taos> +``` + +进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。 + +## 执行 SQL 脚本 + +在 TDengine CLI 里可以通过 `source` 命令来运行脚本文件中的多条 SQL 命令。 + +```sql +taos> source ; +``` + +## 在线修改显示字符宽度 + +可以在 TDengine CLI 里使用如下命令调整字符显示宽度 + +```sql +taos> SET MAX_BINARY_DISPLAY_WIDTH ; +``` + +如显示的内容后面以 ... 结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。 + +## 命令行参数 + +您可通过配置命令行参数来改变 TDengine CLI 的行为。以下为常用的几个命令行参数: + +- -h, --host=HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务 +- -P, --port=PORT: 指定服务端所用端口号 +- -u, --user=USER: 连接时使用的用户名 +- -p, --password=PASSWORD: 连接服务端时使用的密码 +- -?, --help: 打印出所有命令行参数 + +还有更多其他参数: + +- -c, --config-dir: 指定配置文件目录,Linux 环境下默认为 `/etc/taos`,该目录下的配置文件默认名称为 `taos.cfg` +- -C, --dump-config: 打印 -c 指定的目录中 `taos.cfg` 的配置参数 +- -d, --database=DATABASE: 指定连接到服务端时使用的数据库 +- -D, --directory=DIRECTORY: 导入指定路径中的 SQL 脚本文件 +- -f, --file=FILE: 以非交互模式执行 SQL 脚本文件。文件中一个 SQL 语句只能占一行 +- -k, --check=CHECK: 指定要检查的表 +- -l, --pktlen=PKTLEN: 网络测试时使用的测试包大小 +- -n, --netrole=NETROLE: 网络连接测试时的测试范围,默认为 `startup`, 可选值为 `client`、`server`、`rpc`、`startup`、`sync`、`speed` 和 `fqdn` 之一 +- -r, --raw-time: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t) +- -s, --commands=COMMAND: 以非交互模式执行的 SQL 命令 +- -S, --pkttype=PKTTYPE: 指定网络测试所用的包类型,默认为 TCP。只有 netrole 为 `speed` 时既可以指定为 TCP 也可以指定为 UDP +- -T, --thread=THREADNUM: 以多线程模式导入数据时的线程数 +- -s, --commands: 在不进入终端的情况下运行 TDengine 命令 +- -z, --timezone=TIMEZONE: 指定时区,默认为本地时区 +- -V, --version: 打印出当前版本号 + +示例: + +```bash +taos -h h1.taos.com -s "use db; show tables;" +``` + +## TDengine CLI 小技巧 + +- 可以使用上下光标键查看历史输入的指令 +- 在 TDengine CLI 中使用 `alter user` 命令可以修改用户密码,缺省密码为 `taosdata` +- Ctrl+C 中止正在进行中的查询 +- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 +- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 +- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI diff --git a/docs-cn/14-reference/09-support-platform/_category_.yml b/docs/zh/14-reference/09-support-platform/_category_.yml similarity index 100% rename from docs-cn/14-reference/09-support-platform/_category_.yml rename to docs/zh/14-reference/09-support-platform/_category_.yml diff --git a/docs-cn/14-reference/09-support-platform/index.md b/docs/zh/14-reference/09-support-platform/index.md similarity index 100% rename from docs-cn/14-reference/09-support-platform/index.md rename to docs/zh/14-reference/09-support-platform/index.md diff --git a/docs-cn/14-reference/11-docker/_category_.yml b/docs/zh/14-reference/11-docker/_category_.yml similarity index 100% rename from docs-cn/14-reference/11-docker/_category_.yml rename to docs/zh/14-reference/11-docker/_category_.yml diff --git a/docs-cn/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md similarity index 100% rename from docs-cn/14-reference/11-docker/index.md rename to docs/zh/14-reference/11-docker/index.md diff --git a/docs-cn/14-reference/12-config/_category_.yml b/docs/zh/14-reference/12-config/_category_.yml similarity index 100% rename from docs-cn/14-reference/12-config/_category_.yml rename to docs/zh/14-reference/12-config/_category_.yml diff --git a/docs-cn/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md similarity index 100% rename from docs-cn/14-reference/12-config/index.md rename to docs/zh/14-reference/12-config/index.md diff --git a/docs/zh/14-reference/12-directory.md b/docs/zh/14-reference/12-directory.md new file mode 100644 index 0000000000000000000000000000000000000000..0caf7e03c32b475e82b6f0bcf58ba2d9225aa6bc --- /dev/null +++ b/docs/zh/14-reference/12-directory.md @@ -0,0 +1,41 @@ +--- +title: 文件目录结构 +description: "TDengine 安装目录说明" +--- + +安装 TDengine 后,默认会在操作系统中生成下列目录或文件: + +| 目录/文件 | 说明 | +| ------------------------- | -------------------------------------------------------------------- | +| /usr/local/taos/bin | TDengine 可执行文件目录。其中的执行文件都会软链接到/usr/bin 目录下。 | +| /usr/local/taos/driver | TDengine 动态链接库目录。会软链接到/usr/lib 目录下。 | +| /usr/local/taos/examples | TDengine 各种语言应用示例目录。 | +| /usr/local/taos/include | TDengine 对外提供的 C 语言接口的头文件。 | +| /etc/taos/taos.cfg | TDengine 默认[配置文件] | +| /var/lib/taos | TDengine 默认数据文件目录。可通过[配置文件]修改位置。 | +| /var/log/taos | TDengine 默认日志文件目录。可通过[配置文件]修改位置。 | + +## 可执行文件 + +TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括: + +- _taosd_:TDengine 服务端可执行文件 +- _taos_:TDengine Shell 可执行文件 +- _taosdump_:数据导入导出工具 +- _taosBenchmark_:TDengine 测试工具 +- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos +- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件 +- _tarbitrator_: 提供双节点集群部署的仲裁功能 +- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本 +- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本 +- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。 + +:::note +2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。 + +::: + +:::tip +您可以通过修改系统配置文件 taos.cfg 来配置不同的数据目录和日志目录。 + +::: diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md new file mode 100644 index 0000000000000000000000000000000000000000..ac356d54439823a7f1558bd2ecd3c1f3f6f8bcdb --- /dev/null +++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md @@ -0,0 +1,166 @@ +--- +title: Schemaless 写入 +description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构' +--- + +在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine +从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless +将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 + +无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 + +## 无模式写入行协议 + +TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 + +对于 InfluxDB、OpenTSDB 的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。 + +Schemaless 采用一个字符串来表达一个数据行(可以向写入 API 中一次传入多行字符串来实现多个数据行的批量写入),其格式约定如下: + +```json +measurement,tag_set field_set timestamp +``` + +其中: + +- measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。 +- tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。 +- field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 +- timestamp 即本行数据对应的主键时间戳。 + +tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。 + +在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: + +- 如果两边有英文双引号,表示 BINARY(32) 类型。例如 `"abc"`。 +- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 +- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) +- 数值类型将通过后缀来区分数据类型: + +| **序号** | **后缀** | **映射类型** | **大小(字节)** | +| -------- | -------- | ------------ | -------------- | +| 1 | 无或 f64 | double | 8 | +| 2 | f32 | float | 4 | +| 3 | i8/u8 | TinyInt/UTinyInt | 1 | +| 4 | i16/u16 | SmallInt/USmallInt | 2 | +| 5 | i32/u32 | Int/UInt | 4 | +| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 | + +- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 + +例如如下数据行表示:向名为 st 的超级表下的 t1 标签为 "3"(NCHAR)、t2 标签为 "4"(NCHAR)、t3 +标签为 "t3"(NCHAR)的数据子表,写入 c1 列为 3(BIGINT)、c2 列为 false(BOOL)、c3 +列为 "passit"(BINARY)、c4 列为 4(DOUBLE)、主键时间戳为 1626006833639000000 的一行数据。 + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` + +需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 + +## 无模式写入的主要处理逻辑 + +无模式写入按照如下原则来处理行数据: + +1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串 + +```json +"measurement,tag_key1=tag_value1,tag_key2=tag_value2" +``` + +需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 +排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 + +2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。 +3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 +5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 + NULL。 +6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 +8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则, + 数据写入按照相同顺序写入,库中数据会异常。 + +:::tip +无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 +48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) + +::: + +## 时间分辨率识别 + +无模式写入过程中支持三个指定的模式,具体如下 + +| **序号** | **值** | **说明** | +| -------- | ------------------- | ------------------------------- | +| 1 | SML_LINE_PROTOCOL | InfluxDB 行协议(Line Protocol) | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 | +| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON 协议格式 | + +在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示: + +| **序号** | **时间分辨率定义** | **含义** | +| -------- | --------------------------------- | -------------- | +| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | 未定义(无效) | +| 2 | TSDB_SML_TIMESTAMP_HOURS | 小时 | +| 3 | TSDB_SML_TIMESTAMP_MINUTES | 分钟 | +| 4 | TSDB_SML_TIMESTAMP_SECONDS | 秒 | +| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | 毫秒 | +| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | 微秒 | +| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | 纳秒 | + +在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 + +## 数据模式映射规则 + +本节将说明行协议的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为 +超级表名称。tag_set 中的 标签名称为 数据模式中的标签名,field_set 中的名称为列名称。以如下数据为例,说明映射规则: + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` + +该行数据映射生成一个超级表: st, 其包含了 3 个类型为 nchar 的标签,分别是:t1, t2, t3。五个数据列,分别是 ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint)。映射成为如下 SQL 语句: + +```json +create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) +``` + +## 数据模式变更处理 + +本节将说明不同行数据写入情况下,对于数据模式的影响。 + +在使用行协议写入一个明确的标识的字段类型的时候,后续更改该字段的类型定义,会出现明确的数据模式错误,即会触发写入 API 报告错误。如下所示, + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 +``` + +第一行的数据类型映射将 c4 列定义为 Double, 但是第二行的数据又通过数值后缀方式声明该列为 BigInt, 由此会触发无模式写入的解析错误。 + +如果列前面的行协议将数据列声明为了 binary, 后续的要求长度更长的 binary 长度,此时会触发超级表模式的变更。 + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 +``` + +第一行中行协议解析会声明 c5 列是一个 binary(4)的字段,第二次行数据写入会提取列 c5 仍然是 binary 列,但是其宽度为 6,此时需要将 binary 的宽度增加到能够容纳 新字符串的宽度。 + +```json +st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 +``` + +第二行数据相对于第一行来说增加了一个列 c6,类型为 binary(6)。那么此时会自动增加一个列 c6, 类型为 binary(6)。 + +## 写入完整性 + +TDengine 提供数据写入的幂等性保证,即您可以反复调用 API 进行出错数据的写入操作。但是不提供多行数据写入的原子性保证。即在多行数据一批次写入过程中,会出现部分数据写入成功,部分数据写入失败的情况。 + +## 错误码 + +如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR +错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 +taos_errstr 获取具体的错误原因。 diff --git a/docs-cn/14-reference/13-schemaless/_category_.yml b/docs/zh/14-reference/13-schemaless/_category_.yml similarity index 100% rename from docs-cn/14-reference/13-schemaless/_category_.yml rename to docs/zh/14-reference/13-schemaless/_category_.yml diff --git a/docs-cn/14-reference/_category_.yml b/docs/zh/14-reference/_category_.yml similarity index 100% rename from docs-cn/14-reference/_category_.yml rename to docs/zh/14-reference/_category_.yml diff --git a/docs-cn/14-reference/_collectd.mdx b/docs/zh/14-reference/_collectd.mdx similarity index 100% rename from docs-cn/14-reference/_collectd.mdx rename to docs/zh/14-reference/_collectd.mdx diff --git a/docs-cn/14-reference/_icinga2.mdx b/docs/zh/14-reference/_icinga2.mdx similarity index 100% rename from docs-cn/14-reference/_icinga2.mdx rename to docs/zh/14-reference/_icinga2.mdx diff --git a/docs-cn/14-reference/_prometheus.mdx b/docs/zh/14-reference/_prometheus.mdx similarity index 100% rename from docs-cn/14-reference/_prometheus.mdx rename to docs/zh/14-reference/_prometheus.mdx diff --git a/docs-cn/14-reference/_statsd.mdx b/docs/zh/14-reference/_statsd.mdx similarity index 100% rename from docs-cn/14-reference/_statsd.mdx rename to docs/zh/14-reference/_statsd.mdx diff --git a/docs-cn/14-reference/_tcollector.mdx b/docs/zh/14-reference/_tcollector.mdx similarity index 100% rename from docs-cn/14-reference/_tcollector.mdx rename to docs/zh/14-reference/_tcollector.mdx diff --git a/docs-cn/14-reference/_telegraf.mdx b/docs/zh/14-reference/_telegraf.mdx similarity index 100% rename from docs-cn/14-reference/_telegraf.mdx rename to docs/zh/14-reference/_telegraf.mdx diff --git a/docs-cn/14-reference/index.md b/docs/zh/14-reference/index.md similarity index 100% rename from docs-cn/14-reference/index.md rename to docs/zh/14-reference/index.md diff --git a/docs-en/14-reference/taosAdapter-architecture.webp b/docs/zh/14-reference/taosAdapter-architecture.webp similarity index 100% rename from docs-en/14-reference/taosAdapter-architecture.webp rename to docs/zh/14-reference/taosAdapter-architecture.webp diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d7a03ba6c878c293bf7f1786a57e9e0808fed711 --- /dev/null +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -0,0 +1,215 @@ +--- +sidebar_label: Grafana +title: Grafana +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/) 快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表的内容可以在仪表盘(DashBoard)上进行可视化展现。关于 TDengine 插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。 + +## 前置条件 + +要让 Grafana 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 + +- TDengine 集群已经部署并正常运行 +- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter) + +记录以下信息: + +- TDengine 集群 REST API 地址,如:`http://tdengine.local:6041`。 +- TDengine 集群认证信息,可使用用户名及密码。 + +## 安装 Grafana + +目前 TDengine 支持 Grafana 7.5 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:。 + +## 配置 Grafana + +### 安装 Grafana Plugin 并配置数据源 + + + + +使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。 + +![Search tdengine in grafana plugins](grafana-plugin-search-tdengine.png) + +如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。 + +![Install and configure Grafana data source](grafana-install-and-config.png) + +输入 TDengine 相关配置,完成数据源配置。 + +![TDengine Database Grafana plugin add data source](./grafana-data-source.png) + +配置完毕,现在可以使用 TDengine 创建 Dashboard 了。 + + + + +对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。 + +```sh +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata +``` + +安装完毕后,需要重启 Grafana 服务后方可生效。 + +保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。 + + + + +使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。 + +```bash +grafana-cli plugins install tdengine-datasource +# with sudo +sudo -u grafana grafana-cli plugins install tdengine-datasource +``` + +或者从 [GitHub](https://github.com/taosdata/grafanaplugin/tags) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下: + +```bash +GF_VERSION=3.2.2 +# from GitHub +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +# from Grafana +wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download +``` + +以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 + +```bash +sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ +``` + +如果 Grafana 在 Docker 环境下运行,可以使用如下的环境变量设置自动安装 TDengine 数据源插件: + +```bash +GF_INSTALL_PLUGINS=tdengine-datasource +``` + +之后,用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: + +![TDengine Database Grafana plugin add data source](./add_datasource1.webp) + +点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: + +![TDengine Database Grafana plugin add data source](./add_datasource2.webp) + +进入数据源配置页面,按照默认提示修改相应配置即可: + +![TDengine Database Grafana plugin add data source](./add_datasource3.webp) + +- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 。 +- User:TDengine 用户名。 +- Password:TDengine 用户密码。 + +点击 `Save & Test` 进行测试,成功会有如下提示: + +![TDengine Database Grafana plugin add data source](./add_datasource4.webp) + + + + +参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动: + +1. 保存该文件为 `tdengine.yml`。 + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. 保存该文件为 `docker-compose.yml`。 + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。 + +打开 Grafana ,现在可以添加 Dashboard 了。 + + + + +### 创建 Dashboard + +回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: + +![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp) + +如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: + +- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。 +- ALIAS BY:可设置当前查询别名。 +- GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 + +按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: + +![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp) + +> 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。 + +### 导入 Dashboard + +在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。 + +使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表: + +- [15146](https://grafana.com/grafana/dashboards/15146): 监控多个 TDengine 集群 +- [15155](https://grafana.com/grafana/dashboards/15155): TDengine 告警示例 +- [15167](https://grafana.com/grafana/dashboards/15167): TDinsight +- [16388](https://grafana.com/grafana/dashboards/16388): Telegraf 采集节点信息的数据展示 diff --git a/docs-cn/20-third-party/02-prometheus.md b/docs/zh/20-third-party/02-prometheus.md similarity index 100% rename from docs-cn/20-third-party/02-prometheus.md rename to docs/zh/20-third-party/02-prometheus.md diff --git a/docs-cn/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md similarity index 100% rename from docs-cn/20-third-party/03-telegraf.md rename to docs/zh/20-third-party/03-telegraf.md diff --git a/docs-cn/20-third-party/05-collectd.md b/docs/zh/20-third-party/05-collectd.md similarity index 100% rename from docs-cn/20-third-party/05-collectd.md rename to docs/zh/20-third-party/05-collectd.md diff --git a/docs-cn/20-third-party/06-statsd.md b/docs/zh/20-third-party/06-statsd.md similarity index 100% rename from docs-cn/20-third-party/06-statsd.md rename to docs/zh/20-third-party/06-statsd.md diff --git a/docs-cn/20-third-party/07-icinga2.md b/docs/zh/20-third-party/07-icinga2.md similarity index 100% rename from docs-cn/20-third-party/07-icinga2.md rename to docs/zh/20-third-party/07-icinga2.md diff --git a/docs-cn/20-third-party/08-tcollector.md b/docs/zh/20-third-party/08-tcollector.md similarity index 100% rename from docs-cn/20-third-party/08-tcollector.md rename to docs/zh/20-third-party/08-tcollector.md diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md new file mode 100644 index 0000000000000000000000000000000000000000..a30064463367f1cf12839baba2f8c74e8b526fc9 --- /dev/null +++ b/docs/zh/20-third-party/09-emq-broker.md @@ -0,0 +1,149 @@ +--- +sidebar_label: EMQX Broker +title: EMQX Broker 写入 +--- + +MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。 + +## 前置条件 + +要让 EMQX 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 + +- TDengine 集群已经部署并正常运行 +- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](../../reference/taosadapter) +- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js,推荐安装 v12 + +## 安装并启动 EMQX + +用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。 + +注意:本文基于 EMQX v4.4.5 版本,其他版本由于相关配置界面、配置方法以及功能可能随着版本升级有所区别。 + +## 创建数据库和表 + +在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构。进入 TDengine CLI 复制并执行以下 SQL 语句: + +```sql +CREATE DATABASE test; +USE test; +CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); +``` + +注:表结构以博客[数据传输、存储、展现,EMQX + TDengine 搭建 MQTT 物联网数据可视化平台](https://www.taosdata.com/blog/2020/08/04/1722.html)为例。后续操作均以此博客场景为例进行,请你根据实际应用场景进行修改。 + +## 配置 EMQX 规则 + +由于 EMQX 不同版本配置界面所有不同,这里仅以 v4.4.5 为例,其他版本请参考相应官网文档。 + +### 登录 EMQX Dashboard + +使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`。 + +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) + +### 创建规则(Rule) + +选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮: + +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) + +### 编辑 SQL 字段 + +复制以下内容输入到 SQL 编辑框: + +```sql +SELECT + payload +FROM + "sensor/data" +``` + +其中 `payload` 代表整个消息体, `sensor/data` 为本规则选取的消息主题。 + +![TDengine Database EMQX create rule](./emqx/create-rule.webp) + +### 新增“动作(action handler)” + +![TDengine Database EMQX](./emqx/add-action-handler.webp) + +### 新增“资源(Resource)” + +![TDengine Database EMQX create resource](./emqx/create-resource.webp) + +选择“发送数据到 Web 服务”并点击“新建资源”按钮: + +### 编辑“资源(Resource)” + +选择“WebHook”并填写“请求 URL”为 taosAdapter 提供 REST 服务的地址,如果是本地启动的 taosadapter, 那么默认地址为: + +``` +http://127.0.0.1:6041/rest/sql +``` + +其他属性请保持默认值。 + +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) + +### 编辑“动作(action)” + +编辑资源配置,增加 Authorization 认证的键/值配对项。默认用户名和密码对应的 Authorization 值为: +``` +Basic cm9vdDp0YW9zZGF0YQ== +``` +相关文档请参考[ TDengine REST API 文档](../../reference/rest-api/)。 + +在消息体中输入规则引擎替换模板: + +```sql +INSERT INTO test.sensor_data VALUES( + now, + ${payload.temperature}, + ${payload.humidity}, + ${payload.volume}, + ${payload.PM10}, + ${payload.pm25}, + ${payload.SO2}, + ${payload.NO2}, + ${payload.CO}, + '${payload.id}', + ${payload.area}, + ${payload.ts} +) +``` + +![TDengine Database EMQX edit action](./emqx/edit-action.webp) + +最后点击左下方的 “Create” 按钮,保存规则。 +## 编写模拟测试程序 + +```javascript +{{#include docs/examples/other/mock.js}} +``` + +注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 + +![TDengine Database EMQX client num](./emqx/client-num.webp) + +## 执行测试模拟发送 MQTT 数据 + +``` +npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org +node mock.js +``` + +![TDengine Database EMQX run-mock](./emqx/run-mock.webp) + +## 验证 EMQX 接收到数据 + +在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到: + +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) + +## 验证数据写入到 TDengine + +使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中: + +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) + +TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 +EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md new file mode 100644 index 0000000000000000000000000000000000000000..1944b97cb05103d888bebba48998b163135dc50c --- /dev/null +++ b/docs/zh/20-third-party/10-hive-mq-broker.md @@ -0,0 +1,6 @@ +--- +sidebar_label: HiveMQ Broker +title: HiveMQ Broker 写入 +--- + +[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/taosdata/hivemq-tdengine-extension/blob/master/README.md)。 diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md new file mode 100644 index 0000000000000000000000000000000000000000..2536e090494b5efbb93f0f53649eefe18d855b43 --- /dev/null +++ b/docs/zh/20-third-party/11-kafka.md @@ -0,0 +1,448 @@ +--- +sidebar_label: Kafka +title: TDengine Kafka Connector 使用教程 +--- + +TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。 + +## 什么是 Kafka Connect? + +Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 + +![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) + +TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。 + +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) + +## 什么是 Confluent? + +[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: + +1. Schema Registry +2. REST 代理 +3. 非 Java 客户端 +4. 很多打包好的 Kafka Connect 插件 +5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 + +这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 +![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) + +Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 + +## 前置条件 + +运行本教程中示例的前提条件。 + +1. Linux 操作系统 +2. 已安装 Java 8 和 Maven +3. 已安装 Git +4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install) + +## 安装 Confluent + +Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。 + +在任意目录下执行: + +``` +curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz +tar xzf confluent-7.1.1.tar.gz -C /opt/test +``` + +然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。 + +```title=".profile" +export CONFLUENT_HOME=/opt/confluent-7.1.1 +PATH=$CONFLUENT_HOME/bin +export PATH +``` + +以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) + +安装完成之后,可以输入`confluent version`做简单验证: + +``` +# confluent version +confluent - Confluent CLI + +Version: v2.6.1 +Git Ref: 6d920590 +Build Date: 2022-02-18T06:14:21Z +Go Version: go1.17.6 (linux/amd64) +Development: false +``` + +## 安装 TDengine Connector 插件 + +### 从源码安装 + +``` +git clone https://github.com:taosdata/kafka-connect-tdengine.git +cd kafka-connect-tdengine +mvn clean package +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip +``` + +以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 + +### 用 confluent-hub 安装 + +[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。 +**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。 + +## 启动 Confluent + +``` +confluent local services start +``` + +:::note +一定要先安装插件再启动 Confluent, 否则加载插件会失败。 +::: + +:::tip +若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 : + +```title="控制台输出日志" {1} +Using CONFLUENT_CURRENT: /tmp/confluent.106668 +Starting ZooKeeper +ZooKeeper is [UP] +Starting Kafka +Kafka is [UP] +Starting Schema Registry +Schema Registry is [UP] +Starting Kafka REST +Kafka REST is [UP] +Starting Connect +Connect is [UP] +Starting ksqlDB Server +ksqlDB Server is [UP] +Starting Control Center +Control Center is [UP] +``` + +清空数据可执行 `rm -rf /tmp/confluent.106668`。 +::: + +### 验证各个组件是否启动成功 + +输入命令: + +``` +confluent local services status +``` + +如果各组件都启动成功,会得到如下输出: + +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### 验证插件是否安装成功 + +在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: + +``` +confluent local services connect plugin list +``` + +如果成功安装,会输出如下: + +```txt {4,9} +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 + +与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 + + +## TDengine Sink Connector 的使用 + +TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 + +TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](../../develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](../../develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](../../develop/insert-data/opentsdb-json)。 + +下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 + +### 添加配置文件 + +``` +mkdir ~/test +cd ~/test +vi sink-demo.properties +``` + +sink-demo.properties 内容如下: + +```ini title="sink-demo.properties" +name=TDengineSinkConnector +connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector +tasks.max=1 +topics=meters +connection.url=jdbc:TAOS://127.0.0.1:6030 +connection.user=root +connection.password=taosdata +connection.database=power +db.schemaless=line +data.precision=ns +key.converter=org.apache.kafka.connect.storage.StringConverter +value.converter=org.apache.kafka.connect.storage.StringConverter +``` + +关键配置说明: + +1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。 +2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。 + +### 创建 Connector 实例 + +``` +confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties +``` + +若以上命令执行成功,则有如下输出: + +```json +{ + "name": "TDengineSinkConnector", + "config": { + "connection.database": "power", + "connection.password": "taosdata", + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.user": "root", + "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", + "db.schemaless": "line", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "tasks.max": "1", + "topics": "meters", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + "name": "TDengineSinkConnector" + }, + "tasks": [], + "type": "sink" +} +``` + +### 写入测试数据 + +准备测试数据的文本文件,内容如下: + +```txt title="test-data.txt" +meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 +meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 +meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 +``` + +使用 kafka-console-producer 向主题 meters 添加测试数据。 + +``` +cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters +``` + +:::note +如果目标数据库 power 不存在,那么 TDengine Sink Connector 会自动创建数据库。自动创建数据库使用的时间精度为纳秒,这就要求写入数据的时间戳精度也是纳秒。如果写入数据的时间戳精度不是纳秒,将会抛异常。 +::: + +### 验证同步是否成功 + +使用 TDengine CLI 验证同步是否成功。 + +``` +taos> use power; +Database changed. + +taos> select * from meters; + ts | current | voltage | phase | groupid | location | +=============================================================================================================================================================== + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | +Query OK, 4 row(s) in set (0.004208s) +``` + +若看到了以上数据,则说明同步成功。若没有,请检查 Kafka Connect 的日志。配置参数的详细说明见[配置参考](#配置参考)。 + +## TDengine Source Connector 的使用 + +TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻之后的数据全部推送到 Kafka。TDengine Source Connector 的实现原理是,先分批拉取历史数据,再用定时查询的策略同步增量数据。同时会监控表的变化,可以自动同步新增的表。如果重启 Kafka Connect, 会从上次中断的位置继续同步。 + +TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。 + +下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。 + +### 添加配置文件 + +``` +vi source-demo.properties +``` + +输入以下内容: + +```ini title="source-demo.properties" +name=TDengineSourceConnector +connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector +tasks.max=1 +connection.url=jdbc:TAOS://127.0.0.1:6030 +connection.username=root +connection.password=taosdata +connection.database=test +connection.attempts=3 +connection.backoff.ms=5000 +topic.prefix=tdengine-source- +poll.interval.ms=1000 +fetch.max.rows=100 +out.format=line +key.converter=org.apache.kafka.connect.storage.StringConverter +value.converter=org.apache.kafka.connect.storage.StringConverter +``` + +### 准备测试数据 + +准备生成测试数据的 SQL 文件。 + +```sql title="prepare-source-data.sql" +DROP DATABASE IF EXISTS test; +CREATE DATABASE test; +USE test; +CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); +INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); +``` + +使用 TDengine CLI, 执行 SQL 文件。 + +``` +taos -f prepare-source-data.sql +``` + +### 创建 Connector 实例 + +``` +confluent local services connect connector load TDengineSourceConnector --config source-demo.properties +``` + +### 查看 topic 数据 + +使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 + +``` +kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test +``` + +输出: + +``` +...... +meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 +...... +``` + +此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据: + +``` +USE test; +INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); +INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); +``` + +再切换回 kafka-console-consumer, 此时命令行窗口已经打印出刚插入的 2 条数据。 + +### unload 插件 + +测试完毕之后,用 unload 命令停止已加载的 connector。 + +查看当前活跃的 connector: + +``` +confluent local services connect connector status +``` + +如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload: + +``` +confluent local services connect connector unload TDengineSourceConnector +confluent local services connect connector unload TDengineSourceConnector +``` + +## 配置参考 + +### 通用配置 + +以下配置项对 TDengine Sink Connector 和 TDengine Source Connector 均适用。 + +1. `name`: connector 名称。 +2. `connector.class`: connector 的完整类名, 如: com.taosdata.kafka.connect.sink.TDengineSinkConnector。 +3. `tasks.max`: 最大任务数, 默认 1。 +4. `topics`: 需要同步的 topic 列表, 多个用逗号分隔, 如 `topic1,topic2`。 +5. `connection.url`: TDengine JDBC 连接字符串, 如 `jdbc:TAOS://127.0.0.1:6030`。 +6. `connection.user`: TDengine 用户名, 默认 root。 +7. `connection.password` :TDengine 用户密码, 默认 taosdata。 +8. `connection.attempts` :最大尝试连接次数。默认 3。 +9. `connection.backoff.ms` : 创建连接失败重试时间隔时间,单位为 ms。 默认 5000。 + +### TDengine Sink Connector 特有的配置 + +1. `connection.database`: 目标数据库名。如果指定的数据库不存在会则自动创建。自动建库使用的时间精度为纳秒。默认值为 null。为 null 时目标数据库命名规则参考 `connection.database.prefix` 参数的说明 +2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。 +3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。 +4. `max.retries`: 发生错误时的最大重试次数。默认为 1。 +5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。 +6. `db.schemaless`: 数据格式,可选值为: + 1. line :代表 InfluxDB 行协议格式 + 2. json : 代表 OpenTSDB JSON 格式 + 3. telnet :代表 OpenTSDB Telnet 行协议格式 +7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: + 1. ms : 表示毫秒 + 2. us : 表示微秒 + 3. ns : 表示纳秒。默认为纳秒。 + +### TDengine Source Connector 特有的配置 + +1. `connection.database`: 源数据库名称,无缺省值。 +2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。 +3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。 +4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。 +5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 +6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。 + +## 其他说明 + +1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 +2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 + +## 问题反馈 + +无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 + +## 参考 + +1. https://www.confluent.io/what-is-apache-kafka +2. https://developer.confluent.io/learn-kafka/kafka-connect/intro +3. https://docs.confluent.io/platform/current/platform.html diff --git a/docs/zh/20-third-party/12-IDEA.mdx b/docs/zh/20-third-party/12-IDEA.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7d45c7f2ed385bbc3e8cea77c976cb6f0b418d08 --- /dev/null +++ b/docs/zh/20-third-party/12-IDEA.mdx @@ -0,0 +1,84 @@ +--- +sidebar_label: IDEA +title: 通过 IDEA 数据库管理工具连接 TDengine +--- + +IDEA 全称 IntelliJ IDEA,是 Java 语言开发的集成环境,被公认为最友好且使用范围最广的 Java 开发工具之一。 + +IDEA Ultimate 版自带数据库管理工具,类似于一个小型 Navicat。这个工具让我们能在 IDEA 上对数据库做简单操作,不需要再切换到其他工具上。对于 TDengine 来说,用户可以通过 JDBC 驱动建立与 IDEA 的连接,不需要再到命令行去写 SQL 语句,直接在 IDEA 中执行即可。 + +此处以 2.0.40 版本的 JDBC Connector 为例,给大家介绍如何使用源码编译、打包,以及如何使用 IDEA 数据库工具连接 TDengine。 + +## 前置条件 + +要让 IDEA 能正常连接 TDengine ,需要以下几方面的准备工作。 + +- TDengine 集群已经部署并正常运行。 +- 若使用 TSDBDriver 驱动类连接请在本地安装 TDengine 客户端。 +- 若使用 RestfulDriver 驱动类连接 TDengine,请确保 taosAdapter 已经正常运行。 + +## 配置步骤 + +### 源码编译 JDBC-Connector + +去各大仓库下载 [dist-jar 包](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)或者通过源码编译,此处介绍源码编译方法。 + +- 首先从 GitHub 仓库克隆 JDBC 连接器的源码,`git clone https://github.com/taosdata/taos-connector-jdbc.git -b 2.0.40`(此处推荐 -b 指定发布了的 Tags 版本),也可以在 IDEA 上操作: + +![image](https://user-images.githubusercontent.com/70138133/180187698-395762d1-fcac-4cea-b44f-cc8cd07ea0c8.png) + +- 克隆完源码后,若是编译 2.0.40 及以下版本的 JDBC-Connector 需要修改 taos-connector-jdbc 目录下 pom.xml 文件,将 dependencies 下的 commons-logging 依赖包的 scope 值由 test 改为 compile,否则编译完后导入 IDEA database 管理工具可能提醒缺少此驱动类。 + +![image](https://user-images.githubusercontent.com/70138133/180206650-561f9e24-ebb9-4cd2-8868-6f1cede54803.png) + +- 在 taos-connector-jdbc 目录下执行:`mvn clean package -Dmaven.test.skip=true` + +![image](https://user-images.githubusercontent.com/70138133/180353366-f515a6ae-904d-42d6-9967-1c298112fe88.png) + +![image](https://user-images.githubusercontent.com/70138133/180353831-cb0b2c5e-b9a3-4182-ba78-58abfa81e1b4.png) + +- 此时 taos-connector-jdbc 目录的 target 文件夹内产生了 taos-jdbcdriver-2.0.40-dist.jar 等驱动包。 + +### 使用 IDEA database 工具连接 TDengine + +- 打开 IDEA database 工具,新建驱动,驱动程序文件选择 target 文件夹下的 taos-jdbcdriver-2.0.40-dist.jar。 + +- 选择 RESTful 方式进行连接(注意:若使用 com.taosdata.jdbc.TSDBDriver 驱动类需要安装 TDengine 客户端)。 + +![image](https://user-images.githubusercontent.com/70138133/180208261-34e7ed91-217f-46b5-80f9-f65f67d67662.png) + +- 然后通过驱动创建数据源。TDengine 的 JDBC URL 规范为: +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +- 此处使用 RESTful 连接,URL 示例为:jdbc:TAOS-RS://VM-24-8-centos:6041/log(需要在 Hosts 文件内添加域名解析;URL 内的 locale、timezone 参数在 RESTful 连接中不生效) + +![image](https://user-images.githubusercontent.com/70138133/180354534-7d73fe33-c4d3-400d-922b-28b20aadfb1b.png) + +- 点击测试连接,出现黄色感叹号不影响使用。 + +![image](https://user-images.githubusercontent.com/70138133/180197251-98764434-bb7b-4e3a-9674-0620ab6d8bad.png) + + +## 验证方法 + +- 配置完后进行验证,刷新后点击显示所有数据库,看是否出现了所有的数据库: + +![image](https://user-images.githubusercontent.com/70138133/180202803-6e277132-44bd-4b22-8921-a54d16190d2b.png) + +- 右击数据源,新建查询控制台测试能否查询。需要注意的是,RESTful 请求是无状态的,查询、写入需要在表名前带上数据库名。 + +- 2.X 版本中默认带 log 库,我们可以使用 `SHOW log.stables` 查看包含哪些超级表后对特定表进行查询、调试: + +![image](https://user-images.githubusercontent.com/70138133/180202329-6734c874-d4f5-40a3-be7d-c4fabbe73a19.png) + +- 可以看到有个超级表叫做 vgroups_info,执行 `DESCRIBE log.vgroups_info` 查看表结构: + +![image](https://user-images.githubusercontent.com/70138133/180204391-36fd0806-8cd6-43b8-97eb-1e7ff235846a.png) + + +- 再执行`SELECT last_row(*) FROM log.vgroups_info GROUP BY vgroup_id`通过 vgroup_id 分组能查看各 VgroupId 下的最新一条数据: + +![image](https://user-images.githubusercontent.com/70138133/180205161-7f0314eb-cdaa-442c-acb5-d33931c32648.png) + + + diff --git a/docs/zh/20-third-party/13-Google-Data-Studio.mdx b/docs/zh/20-third-party/13-Google-Data-Studio.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0d1ec84aa3d1b93b2fb9a0eec9ae0b6cefc51e4d --- /dev/null +++ b/docs/zh/20-third-party/13-Google-Data-Studio.mdx @@ -0,0 +1,44 @@ +--- +sidebar_label: Google Data Studio 连接器 +title: 如何通过 Google Data Studio 可视化处理 TDengine 数据 +--- + +Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。 + +Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。 + +目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。 + +![image](./gds/GDS-2-2.png) + +接下来选择 AUTHORIZE 按钮。 + +![image](./gds/GDS-3-2.png) + +设置允许连接自己的账号到外部服务。 + +![image](./gds/GDS-4-1.png) + +在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。 + +注意:查询时间范围为可选输入项,如果不设置查询开始时间和结束时间,那么返回的数据为截至当前时间前30天的数据。如果30天内没有数据,生成的报告的会没数据。 + +![image](./gds/GDS-5-1024x426.png) + +连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。 + +![image](./gds/GDS-6-1024x368.png) + +目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。 + +以下为使用 GDS 对 TDengine 提供数据进行可视化图表设计的过程示例。 + +![image](./gds/GDS-7-1024x528.png) + +![image](./gds/GDS-8-1024x531.png) + +![image](./gds/GDS-9-1024x531.png) + +![image](./gds/GDS-10-1-1024x531.png) + +![image](./gds/GDS-11-1024x531.png) diff --git a/docs/zh/20-third-party/14-DBeaver.mdx b/docs/zh/20-third-party/14-DBeaver.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bbaa468f87d815b3b3b38f83074bc7e6d1696124 --- /dev/null +++ b/docs/zh/20-third-party/14-DBeaver.mdx @@ -0,0 +1,71 @@ +--- +sidebar_label: DBeaver +title: 通过开源数据库管理工具 DBeaver 连接 TDengine +--- + +DBeaver 是一款流行、开源的数据库管理工具以及 SQL 客户端,其功能强大,并且支持任何拥有 JDBC-Driver 的数据库(这意味着几乎所有数据库都支持)。 +其官网的介绍是这样的: +>Free multi-platform database tool for developers, database administrators, analysts and all people who need to work with databases. Supports all popular databases: MySQL, PostgreSQL, SQLite, Oracle, DB2, SQL Server, Sybase, MS Access, Teradata, Firebird, Apache Hive, Phoenix, Presto, etc. + +只需要简单的配置即可使用 DBeaver 来连接、管理 TDengine。 + +## 前置条件 +1. DBeaver 依赖 Java (JDK) 11 ,不过其安装包中已包含。可选安装 Maven、Git。 +2. 已安装并启动了 TDengine。 +3. 若使用原生连接(选择 TSDBDriver 驱动类),请在本地安装 TDengine 客户端。 +4. 若使用 REST 连接(选择 RestfulDriver 驱动类),请确保 taosAdapter 已经正常运行。 + +## 配置步骤 +- 可以克隆 DBeaver 在 [GitHub](https://github.com/dbeaver/dbeaver) 上的源码,执行 `mvn package`,也可以直接下载打包好的安装包。此处选择直接下载安装包。 +- 在 GitHub DBeaver 仓库的 [Releases](https://github.com/dbeaver/dbeaver/releases) 处下载对应版本的 DBeaver,比如系统为 macOS,处理器芯片是 M1 ,此处下载 dbeaver-ce-22.1.2-macos-aarch64.dmg 进行安装。推荐使用 22.1.2 版本的 DBeaver,后续版本未进行验证。 +- 点击数据库标签,选择驱动管理器: + +![image](https://user-images.githubusercontent.com/70138133/181191577-7bb91c96-b4fc-455f-9f6c-545993f0a445.png) + +- 新建驱动,选择 TDengine 的 JDBC Connector 驱动包(其中的 dist.jar 包),此驱动包可以下载或者自行编译、打包,参考 [IDEA-源码编译 JDBC-Connector](../IDEA/#%E6%BA%90%E7%A0%81%E7%BC%96%E8%AF%91-jdbc-connector): + +![image](https://user-images.githubusercontent.com/70138133/181191709-fcc3faa3-cfe9-4b0b-8c1b-5074c9411c8b.png) + +- 添加后点击找到类,此处使用 RESTful 驱动类演示(注意:若使用 com.taosdata.jdbc.TSDBDriver 驱动类,则需要安装 TDengine 客户端): + +![image](https://user-images.githubusercontent.com/70138133/181191776-fc1ab7ff-b323-4913-92d7-aa5a10a5a6d8.png) + +- 填写一下驱动名称,简单填下配置: + +![image](https://user-images.githubusercontent.com/70138133/181191846-2c16b98a-c171-4936-a894-3fdaf96cfba1.png) + +- TDengine 的 JDBC URL 规范为: +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +- 点击“新建连接”,搜索配置好的驱动名称,点击后进入下一步: + +![image](https://user-images.githubusercontent.com/70138133/181191887-cc13a397-64a0-4dfc-b42f-d65608e71eae.png) + +- 输入密码后,点击“测试连接”(注:需要在本机 hosts 文件上添加 URL 内域名的解析,URL 内的 locale、timezone 参数在 RESTful 连接中不生效): + +![image](https://user-images.githubusercontent.com/70138133/181191921-e5a05f93-0ef5-45fb-8707-5697bcdef64b.png) + +## 验证方法 +- 点击“测试连接”若弹出“已连接”的提示代表连接成功。界面左侧能看到刷新出来的数据库,点击特定的表可以查看表的结构及数据: + +![image](https://user-images.githubusercontent.com/70138133/181192410-e7509c1a-0f9c-4282-a69a-82685e659fd7.png) + +- 点击界面左上方的新建 SQL 编辑器,默认,输入 SQL 进行验证。需要注意的是,RESTful 请求是无状态的,查询、写入需要在表名前带上数据库名。 +- 2.X 版本中默认带 log 库,我们可以使用`SHOW log.stables;`查看包含哪些超级表后对特定表进行查询、调试: + +![image](https://user-images.githubusercontent.com/70138133/181192488-727c3c1d-906a-4fbe-bd2e-41678d5e1627.png) + +- 可以看到有个超级表叫做 dnodes_info,执行`describe log.dnodes_info;`查看表结构: + +![image](https://user-images.githubusercontent.com/70138133/181192651-0adaafeb-c7ff-4d02-93b4-e1e3aebb39ea.png) + +- 再执行`select last_row(*) from log.dnodes_info group by dnode_id;`通过 dnode_id 能分组查询各 dnode_id 下的最新一条数据: + +![image](https://user-images.githubusercontent.com/70138133/181192918-e71d6482-3901-4b14-956c-2894658e2b67.png) + +- 还有其他操作也可以自行测试,比如写入一条数据后进行查询: + +![image](https://user-images.githubusercontent.com/70138133/181192959-27b8ded5-9719-424a-ae08-3b5635dd7fbf.png) + +好了,到这里我们就大功告成了。DBeaver 功能强大,其他常用功能还包括导入导出 SQL 脚本、配置表过滤器、建立数据库任务等,大家可以慢慢体验。 + diff --git a/docs-cn/20-third-party/_category_.yml b/docs/zh/20-third-party/_category_.yml similarity index 100% rename from docs-cn/20-third-party/_category_.yml rename to docs/zh/20-third-party/_category_.yml diff --git a/docs-cn/20-third-party/_deploytaosadapter.mdx b/docs/zh/20-third-party/_deploytaosadapter.mdx similarity index 100% rename from docs-cn/20-third-party/_deploytaosadapter.mdx rename to docs/zh/20-third-party/_deploytaosadapter.mdx diff --git a/docs-en/20-third-party/grafana/add_datasource1.webp b/docs/zh/20-third-party/add_datasource1.webp similarity index 100% rename from docs-en/20-third-party/grafana/add_datasource1.webp rename to docs/zh/20-third-party/add_datasource1.webp diff --git a/docs-en/20-third-party/grafana/add_datasource2.webp b/docs/zh/20-third-party/add_datasource2.webp similarity index 100% rename from docs-en/20-third-party/grafana/add_datasource2.webp rename to docs/zh/20-third-party/add_datasource2.webp diff --git a/docs-en/20-third-party/grafana/add_datasource3.webp b/docs/zh/20-third-party/add_datasource3.webp similarity index 100% rename from docs-en/20-third-party/grafana/add_datasource3.webp rename to docs/zh/20-third-party/add_datasource3.webp diff --git a/docs-en/20-third-party/grafana/add_datasource4.webp b/docs/zh/20-third-party/add_datasource4.webp similarity index 100% rename from docs-en/20-third-party/grafana/add_datasource4.webp rename to docs/zh/20-third-party/add_datasource4.webp diff --git a/docs-en/20-third-party/grafana/create_dashboard1.webp b/docs/zh/20-third-party/create_dashboard1.webp similarity index 100% rename from docs-en/20-third-party/grafana/create_dashboard1.webp rename to docs/zh/20-third-party/create_dashboard1.webp diff --git a/docs-en/20-third-party/grafana/create_dashboard2.webp b/docs/zh/20-third-party/create_dashboard2.webp similarity index 100% rename from docs-en/20-third-party/grafana/create_dashboard2.webp rename to docs/zh/20-third-party/create_dashboard2.webp diff --git a/docs-cn/20-third-party/dashboard-15146.webp b/docs/zh/20-third-party/dashboard-15146.webp similarity index 100% rename from docs-cn/20-third-party/dashboard-15146.webp rename to docs/zh/20-third-party/dashboard-15146.webp diff --git a/docs-en/20-third-party/emqx/add-action-handler.webp b/docs/zh/20-third-party/emqx/add-action-handler.webp similarity index 100% rename from docs-en/20-third-party/emqx/add-action-handler.webp rename to docs/zh/20-third-party/emqx/add-action-handler.webp diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.webp b/docs/zh/20-third-party/emqx/check-result-in-taos.webp similarity index 100% rename from docs-en/20-third-party/emqx/check-result-in-taos.webp rename to docs/zh/20-third-party/emqx/check-result-in-taos.webp diff --git a/docs-en/20-third-party/emqx/check-rule-matched.webp b/docs/zh/20-third-party/emqx/check-rule-matched.webp similarity index 100% rename from docs-en/20-third-party/emqx/check-rule-matched.webp rename to docs/zh/20-third-party/emqx/check-rule-matched.webp diff --git a/docs-en/20-third-party/emqx/client-num.webp b/docs/zh/20-third-party/emqx/client-num.webp similarity index 100% rename from docs-en/20-third-party/emqx/client-num.webp rename to docs/zh/20-third-party/emqx/client-num.webp diff --git a/docs-en/20-third-party/emqx/create-resource.webp b/docs/zh/20-third-party/emqx/create-resource.webp similarity index 100% rename from docs-en/20-third-party/emqx/create-resource.webp rename to docs/zh/20-third-party/emqx/create-resource.webp diff --git a/docs-en/20-third-party/emqx/create-rule.webp b/docs/zh/20-third-party/emqx/create-rule.webp similarity index 100% rename from docs-en/20-third-party/emqx/create-rule.webp rename to docs/zh/20-third-party/emqx/create-rule.webp diff --git a/docs-en/20-third-party/emqx/edit-action.webp b/docs/zh/20-third-party/emqx/edit-action.webp similarity index 100% rename from docs-en/20-third-party/emqx/edit-action.webp rename to docs/zh/20-third-party/emqx/edit-action.webp diff --git a/docs-en/20-third-party/emqx/edit-resource.webp b/docs/zh/20-third-party/emqx/edit-resource.webp similarity index 100% rename from docs-en/20-third-party/emqx/edit-resource.webp rename to docs/zh/20-third-party/emqx/edit-resource.webp diff --git a/docs-en/20-third-party/emqx/login-dashboard.webp b/docs/zh/20-third-party/emqx/login-dashboard.webp similarity index 100% rename from docs-en/20-third-party/emqx/login-dashboard.webp rename to docs/zh/20-third-party/emqx/login-dashboard.webp diff --git a/docs-en/20-third-party/emqx/rule-engine.webp b/docs/zh/20-third-party/emqx/rule-engine.webp similarity index 100% rename from docs-en/20-third-party/emqx/rule-engine.webp rename to docs/zh/20-third-party/emqx/rule-engine.webp diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.webp b/docs/zh/20-third-party/emqx/rule-header-key-value.webp similarity index 100% rename from docs-en/20-third-party/emqx/rule-header-key-value.webp rename to docs/zh/20-third-party/emqx/rule-header-key-value.webp diff --git a/docs-en/20-third-party/emqx/run-mock.webp b/docs/zh/20-third-party/emqx/run-mock.webp similarity index 100% rename from docs-en/20-third-party/emqx/run-mock.webp rename to docs/zh/20-third-party/emqx/run-mock.webp diff --git a/docs/zh/20-third-party/gds/GDS-10-1-1024x531.png b/docs/zh/20-third-party/gds/GDS-10-1-1024x531.png new file mode 100644 index 0000000000000000000000000000000000000000..121ca63b18b0f6a7af58d823482d95b648e171b9 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-10-1-1024x531.png differ diff --git a/docs/zh/20-third-party/gds/GDS-11-1024x531.png b/docs/zh/20-third-party/gds/GDS-11-1024x531.png new file mode 100644 index 0000000000000000000000000000000000000000..cf61951be65bd2461301725b07d2b20eae9576df Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-11-1024x531.png differ diff --git a/docs/zh/20-third-party/gds/GDS-2-2.png b/docs/zh/20-third-party/gds/GDS-2-2.png new file mode 100644 index 0000000000000000000000000000000000000000..81b305ca7a262472adb9995c0c4864add32aa1b2 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-2-2.png differ diff --git a/docs/zh/20-third-party/gds/GDS-3-2.png b/docs/zh/20-third-party/gds/GDS-3-2.png new file mode 100644 index 0000000000000000000000000000000000000000..333cc7d8b549a43f27129b5895b96ee0bfb285ca Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-3-2.png differ diff --git a/docs/zh/20-third-party/gds/GDS-4-1.png b/docs/zh/20-third-party/gds/GDS-4-1.png new file mode 100644 index 0000000000000000000000000000000000000000..18a52caf274c0c582670731966bc04387d960b05 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-4-1.png differ diff --git a/docs/zh/20-third-party/gds/GDS-5-1024x426.png b/docs/zh/20-third-party/gds/GDS-5-1024x426.png new file mode 100644 index 0000000000000000000000000000000000000000..a8bbd461b0e34c779547262ad8547ab42a0773c7 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-5-1024x426.png differ diff --git a/docs/zh/20-third-party/gds/GDS-6-1024x368.png b/docs/zh/20-third-party/gds/GDS-6-1024x368.png new file mode 100644 index 0000000000000000000000000000000000000000..af9afd8c33d6874de2086d77b9e67c17aeefbe32 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-6-1024x368.png differ diff --git a/docs/zh/20-third-party/gds/GDS-7-1024x528.png b/docs/zh/20-third-party/gds/GDS-7-1024x528.png new file mode 100644 index 0000000000000000000000000000000000000000..34e0dab351e684b049c4ed291d9e253b86c3c222 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-7-1024x528.png differ diff --git a/docs/zh/20-third-party/gds/GDS-8-1024x531.png b/docs/zh/20-third-party/gds/GDS-8-1024x531.png new file mode 100644 index 0000000000000000000000000000000000000000..4c6d627bdcde0ea12d07bf0fe9f672241686d416 Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-8-1024x531.png differ diff --git a/docs/zh/20-third-party/gds/GDS-9-1024x531.png b/docs/zh/20-third-party/gds/GDS-9-1024x531.png new file mode 100644 index 0000000000000000000000000000000000000000..b3262778479bda8a645536d4eefcc4a3528d3edb Binary files /dev/null and b/docs/zh/20-third-party/gds/GDS-9-1024x531.png differ diff --git a/docs/zh/20-third-party/grafana-data-source.png b/docs/zh/20-third-party/grafana-data-source.png new file mode 100644 index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a Binary files /dev/null and b/docs/zh/20-third-party/grafana-data-source.png differ diff --git a/docs/zh/20-third-party/grafana-install-and-config.png b/docs/zh/20-third-party/grafana-install-and-config.png new file mode 100644 index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e Binary files /dev/null and b/docs/zh/20-third-party/grafana-install-and-config.png differ diff --git a/docs/zh/20-third-party/grafana-plugin-search-tdengine.png b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e Binary files /dev/null and b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png differ diff --git a/docs-cn/20-third-party/import_dashboard1.webp b/docs/zh/20-third-party/import_dashboard1.webp similarity index 100% rename from docs-cn/20-third-party/import_dashboard1.webp rename to docs/zh/20-third-party/import_dashboard1.webp diff --git a/docs-cn/20-third-party/import_dashboard2.webp b/docs/zh/20-third-party/import_dashboard2.webp similarity index 100% rename from docs-cn/20-third-party/import_dashboard2.webp rename to docs/zh/20-third-party/import_dashboard2.webp diff --git a/docs-cn/20-third-party/index.md b/docs/zh/20-third-party/index.md similarity index 100% rename from docs-cn/20-third-party/index.md rename to docs/zh/20-third-party/index.md diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.webp b/docs/zh/20-third-party/kafka/Kafka_Connect.webp similarity index 100% rename from docs-en/20-third-party/kafka/Kafka_Connect.webp rename to docs/zh/20-third-party/kafka/Kafka_Connect.webp diff --git a/docs-en/20-third-party/kafka/confluentPlatform.webp b/docs/zh/20-third-party/kafka/confluentPlatform.webp similarity index 100% rename from docs-en/20-third-party/kafka/confluentPlatform.webp rename to docs/zh/20-third-party/kafka/confluentPlatform.webp diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs/zh/20-third-party/kafka/streaming-integration-with-kafka-connect.webp similarity index 100% rename from docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp rename to docs/zh/20-third-party/kafka/streaming-integration-with-kafka-connect.webp diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md new file mode 100644 index 0000000000000000000000000000000000000000..f1fd75d4f3ea22dd64cb7dce95170aa3074d3068 --- /dev/null +++ b/docs/zh/21-tdinternal/01-arch.md @@ -0,0 +1,302 @@ +--- +sidebar_label: 整体架构 +title: 整体架构 +--- + +## 集群与基本逻辑单元 + +TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以自动化负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。 + +### 主要逻辑单元 + +TDengine 分布式架构的逻辑结构图如下: + +![TDengine Database 架构示意图](./structure.webp) + +
图 1 TDengine架构示意图
+ +一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine 应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过 taosc 的 API 与 TDengine 集群进行互动。下面对每个逻辑单元进行简要介绍。 + +**物理节点(pnode):** pnode 是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有 OS 的物理机、虚拟机或 Docker 容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine 完全依赖 FQDN 来进行网络通讯,如果不了解 FQDN,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 + +**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode 包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode 在系统中的唯一标识由实例的 End Point(EP)决定。EP 是 dnode 所在物理节点的 FQDN(Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 + +**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。 + +**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 + +**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。 + +**Taosc** taosc 是 TDengine 给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供 C/C++ 语言原生接口,内嵌于 JDBC、C#、Python、Go、Node.js 语言连接库里。应用都是通过 taosc 而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于 JDBC、C/C++、C#、Python、Go、Node.js 接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的 RESTful 接口,taosc 在 TDengine 集群的每个 dnode 上都有一运行实例。 + +### 节点之间的通讯 + +**通讯方式:**TDengine 系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过 TCP/UDP 进行的。因为考虑到物联网场景,数据写入的包一般不大,因此 TDengine 除采用 TCP 做传输之外,还采用 UDP 方式,因为 UDP 更加高效,而且不受连接数的限制。TDengine 实现了自己的超时、重传、确认等机制,以确保 UDP 的可靠传输。对于数据量不到 15K 的数据包,采取 UDP 的方式进行传输,超过 15K 的,或者是查询类的操作,自动采取 TCP 的方式进行传输。同时,TDengine 根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用 TCP 方式进行数据传输。 + +**FQDN 配置:**一个数据节点有一个或多个 FQDN,可以在系统配置文件 taos.cfg 通过参数“fqdn”进行指定,如果没有指定,系统将自动获取计算机的 hostname 作为其 FQDN。如果节点没有配置 FQDN,可以直接将该节点的配置参数 fqdn 设置为它的 IP 地址。但不建议使用 IP,因为 IP 地址可变,一旦变化,将让集群无法正常工作。一个数据节点的 EP(End Point)由 FQDN + Port 组成。采用 FQDN,需要保证 DNS 服务正常工作,或者在节点以及应用所在的节点配置好 hosts 文件。另外,这个参数值的长度需要控制在 96 个字符以内。 + +**端口配置:**一个数据节点对外的端口由 TDengine 的系统配置参数 serverPort 决定,对集群内部通讯的端口是 serverPort+5。为支持多线程高效的处理 UDP 数据,每个对内和对外的 UDP 连接,都需要占用 5 个连续的端口。 + +- 集群内数据节点之间的数据复制操作占用一个 TCP 端口,是 serverPort+10。 +- 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。 +- 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。 + +因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) + +**集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。 + +**集群内部通讯:**各个数据节点之间通过 TCP/UDP 进行连接。一个数据节点启动时,将获取 mnode 所在的 dnode 的 EP 信息,然后与系统中的 mnode 建立起连接,交换信息。获取 mnode 的 EP 信息有三步: + +1. 检查 mnodeEpSet.json 文件是否存在,如果不存在或不能正常打开获得 mnode EP 信息,进入第二步; +2. 检查系统配置文件 taos.cfg,获取节点配置参数 firstEp、secondEp(这两个参数指定的节点可以是不带 mnode 的普通节点,这样的话,节点被连接时会尝试重定向到 mnode 节点),如果不存在或者 taos.cfg 里没有这两个配置参数,或无效,进入第三步; +3. 将自己的 EP 设为 mnode EP,并独立运行起来。 + +获取 mnode EP 列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试 mnode EP 列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 + +**Mnode 的选择:**TDengine 逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码 taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的 End Point,并与获取的 mnode EP List 进行比对,如果在其中,该数据节点认为自己应该启动 mnode 模块,成为 mnode。如果自己的 EP 不在 mnode EP List 里,则不启动 mnode 模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode 有可能迁移至新的 dnode,但一切都是透明的,无需人工干预,配置参数的修改,是 mnode 自己根据资源做出的决定。 + +**新数据节点的加入:**系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用 TDengine CLI 连接到现有工作的数据节点,然后用命令“CREATE DNODE”将新的数据节点的 End Point 添加进去;第二步:在新的数据节点的系统配置参数文件 taos.cfg 里,将 firstEp,secondEp 参数设置为现有集群中任意两个数据节点的 EP 即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 + +**重定向:**无论是 dnode 还是 taosc,最先都是要发起与 mnode 的连接,但 mnode 是系统自动创建并维护的,因此对于用户来说,并不知道哪个 dnode 在运行 mnode。TDengine 只要求向系统中任何一个工作的 dnode 发起连接即可。因为任何一个正在运行的 dnode,都维护有目前运行的 mnode EP List。当收到一个来自新启动的 dnode 或 taosc 的连接请求,如果自己不是 mnode,则将 mnode EP List 回复给对方,taosc 或新启动的 dnode 收到这个 list,就重新尝试建立连接。当 mnode EP List 发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知 taosc。 + +### 一个典型的消息流程 + +为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 + +![TDengine Database 典型的操作流程](./message.webp) + +
图 2 TDengine 典型的操作流程
+ +1. 应用通过 JDBC 或其他 API 接口发起插入数据的请求。 +2. taosc 会检查缓存,看是否保存有该表的 meta data。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get meta-data 请求。 +3. mnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema,而且还有该表所属的 vgroup 信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point)。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode,taosc 将向下一个 mnode 发出请求。 +4. taosc 向 master vnode 发起插入请求。 +5. vnode 插入数据后,给 taosc 一个应答,表示插入成功。如果 taosc 迟迟得不到 vnode 的回应,taosc 会认为该节点已经离线。这种情况下,如果被插入的数据库有多个副本,taosc 将向 vgroup 里下一个 vnode 发出插入请求。 +6. taosc 通知 APP,写入成功。 + +对于第二和第三步,taosc 启动时,并不知道 mnode 的 End Point,因此会直接向配置的集群对外服务的 End Point 发起请求。如果接收到该请求的 dnode 并没有配置 mnode,该 dnode 会在回复的消息中告知 mnode EP 列表,这样 taosc 会重新向新的 mnode 的 EP 发出获取 meta-data 的请求。 + +对于第四和第五步,没有缓存的情况下,taosc 无法知道虚拟节点组里谁是 master,就假设第一个 vnodeID 就是 master,向它发出请求。如果接收到请求的 vnode 并不是 master,它会在回复中告知谁是 master,这样 taosc 就向建议的 master vnode 发出请求。一旦得到插入成功的回复,taosc 会缓存 master 节点的信息。 + +上述是插入数据的流程,查询、计算的流程也完全一致。taosc 把这些复杂的流程全部封装屏蔽了,对于应用来说无感知也无需任何特别处理。 + +通过 taosc 缓存机制,只有在第一次对一张表操作时,才需要访问 mnode,因此 mnode 不会成为系统瓶颈。但因为 schema 有可能变化,而且 vgroup 有可能发生改变(比如负载均衡发生),因此 taosc 会定时和 mnode 交互,自动更新缓存。 + +## 存储模型与数据分区、分片 + +### 存储模型 + +TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: + +- 时序数据:存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 标签数据:存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量不大,有 N 张表,就有 N 条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此 TDengine 支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。 +- 元数据:存放于 mnode 里,包含系统节点、用户、DB、Table Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 + +与典型的 NoSQL 存储模型相比,TDengine 将标签数据与时序数据完全分离存储,它具有两大优势: + +- 能够极大地降低标签数据存储的冗余度:一般的 NoSQL 数据库或时序数据库,采用的 K-V 存储,其中的 Key 包含时间戳、设备 ID、各种标签。每条记录都带有这些重复的内容,浪费存储空间。而且如果应用要在历史数据上增加、修改或删除标签,需要遍历数据,重写一遍,操作成本极其昂贵。 +- 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。而且标签数据采用全内存的结构进行管理和维护,千万级别规模的标签数据查询可以在毫秒级别返回。 + +### 数据分片 + +对于海量的数据管理,为实现水平扩展,一般都需要采取分片(Sharding)分区(Partitioning)策略。TDengine 是通过 vnode 来实现数据分片的,通过一个时间段一个数据文件来实现时序数据分区的。 + +vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和计算功能。为便于负载均衡、数据恢复、支持异构环境,TDengine 将一个数据节点根据其计算和存储资源切分为多个 vnode。这些 vnode 的管理是 TDengine 自动完成的,对应用完全透明。 + +对于单独一个数据采集点,无论其数据量多大,一个 vnode(或 vgroup,如果副本数大于 1)有足够的计算资源和存储资源来处理(如果每秒生成一条 16 字节的记录,一年产生的原始数据不到 0.5G),因此 TDengine 将一张表(一个数据采集点)的所有数据都存放在一个 vnode 里,而不会让同一个采集点的数据分布到两个或多个 dnode 上。而且一个 vnode 可存储多个数据采集点(表)的数据,一个 vnode 可容纳的表的数目的上限为一百万。设计上,一个 vnode 里所有的表都属于同一个 DB。一个数据节点上,除非特殊配置,一个 DB 拥有的 vnode 数目不会超过系统核的数目。 + +创建 DB 时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的 vnode,且该 vnode 是否有空余的表空间,如果有,立即在该有空位的 vnode 创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个 dnode 上创建一新的 vnode,然后创建表。如果 DB 有多个副本,系统不是只创建一个 vnode,而是一个 vgroup(虚拟数据节点组)。系统对 vnode 的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。 + +每张表的 meta data(包含 schema,标签等)也存放于 vnode 里,而不是集中存放于 mnode,实际上这是对 Meta 数据的分片,这样便于高效并行的进行标签过滤操作。 + +### 数据分区 + +TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区。每个数据文件只包含一个时间段的时序数据,时间段的长度由 DB 的配置参数 days 决定。这种按时间段分区的方法还便于高效实现数据的保留策略,只要数据文件超过规定的天数(系统配置参数 keep),将被自动删除。而且不同的时间段可以存放于不同的路径和存储介质,以便于大数据的冷热管理,实现多级存储。 + +总的来说,**TDengine 是通过 vnode 以及时间两个维度,对大数据进行切分**,便于并行高效的管理,实现水平扩展。 + +### 负载均衡 + +每个 dnode 都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此 mnode 了解整个集群的状态。基于整体状态,当 mnode 发现某个 dnode 负载过重,它会将 dnode 上的一个或多个 vnode 挪到其他 dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。 + +如果 mnode 一段时间没有收到 dnode 的状态报告,mnode 会认为这个 dnode 已经离线。如果离线时间超过一定时长(时长由配置参数 offlineThreshold 决定),该 dnode 将被 mnode 强制剔除出集群。该 dnode 上的 vnodes 如果副本数大于 1,系统将自动在其他 dnode 上创建新的副本,以保证数据的副本数。如果该 dnode 上还有 mnode,而且 mnode 的副本数大于 1,系统也将自动在其他 dnode 上创建新的 mnode,以保证 mnode 的副本数。 + +当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。 + +负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 + +**提示:负载均衡由参数 balance 控制,决定开启/关闭自动负载均衡。** + +## 数据写入与复制流程 + +如果一个数据库有 N 个副本,那一个虚拟节点组就有 N 个虚拟节点,但是只有一个是 master,其他都是 slave。当应用将新的记录写入系统时,只有 master vnode 能接受写的请求。如果 slave vnode 收到写的请求,系统将通知 taosc 需要重新定向。 + +### Master Vnode 写入流程 + +Master Vnode 遵循下面的写入流程: + +![TDengine Database Master写入流程](./write_master.webp) + +
图 3 TDengine Master 写入流程
+ +1. master vnode 收到应用的数据插入请求,验证 OK,进入下一步; +2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; +3. 如果有多个副本,vnode 将把数据包转发给同一虚拟节点组内的 slave vnodes,该转发包带有数据的版本号(version); +4. 写入内存,并将记录加入到 skip list; +5. master vnode 返回确认信息给应用,表示写入成功; +6. 如果第 2、3、4 步中任何一步失败,将直接返回错误给应用。 + +### Slave Vnode 写入流程 + +对于 slave vnode,写入流程是: + +![TDengine Database Slave 写入流程](./write_slave.webp) + +
图 4 TDengine Slave 写入流程
+ +1. slave vnode 收到 Master vnode 转发了的数据插入请求。检查 last version 是否与 master 一致,如果一致,进入下一步。如果不一致,需要进入同步状态。 +2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。 +3. 写入内存,更新内存中的 skip list。 + +与 master vnode 相比,slave vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。 + +### 主从选择 + +Vnode 会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加 1。 + +一个 vnode 启动时,角色(master、slave)是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立 TCP 连接,并互相交换 status,其中包括 version 和自己的角色。通过 status 的交换,系统进入选主流程,规则如下: + +1. 如果只有一个副本,该副本永远就是 master +2. 所有副本都在线时,版本最高的被选为 master +3. 在线的虚拟节点数过半,而且有虚拟节点是 slave 的话,该虚拟节点自动成为 master +4. 对于 2 和 3,如果多个虚拟节点满足成为 master 的要求,那么虚拟节点组的节点列表里,最前面的选为 master + +更多的关于数据复制的流程,请见[《TDengine 2.0 数据复制模块设计》](../replica/)。 + +### 同步复制 + +对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 replica 之外,用户还需要指定新的参数 quorum。如果 quorum 大于 1,它表示每次 master 转发给副本时,需要等待 quorum-1 个回复确认,才能通知应用,数据在 slave 已经写入成功。如果在一定的时间内,得不到 quorum-1 个回复确认,master vnode 将返回错误给应用。 + +采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致,mnode 之间的数据同步缺省就是采用的同步复制。 + +## 缓存与持久化 + +### 缓存 + +TDengine 采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine 充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。 + +TDengine 通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将 TDengine 作为数据缓存来使用,而不需要再部署 Redis 或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine 重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的 key-value 缓存系统再将之前缓存的数据重新加载到缓存中。 + +每个 vnode 有自己独立的内存,而且由多个固定大小的内存块组成,不同 vnode 之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个 vnode 维护有自己的 skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个 vnode 里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个 vnode 的内存块的个数由配置参数 blocks 决定,内存块的大小由配置参数 cache 决定。 + +### 持久化存储 + +TDengine 采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当 vnode 中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine 也会拉起落盘线程将缓存的数据写入持久化存储。TDengine 在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制地增长。 + +为充分利用时序数据特点,TDengine 将一个 vnode 保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数 days 决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。 + +对于采集的数据,一般有保留时长,这个时长由系统配置参数 keep 决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。 + +给定 days 与 keep 两个参数,一个典型工作状态的 vnode 中总的数据文件数为:向上取整 `(keep/days)+1` 个。总的数据文件个数不宜过大,也不宜过小。10 到 100 以内合适。基于这个原则,可以设置合理的 days。目前的版本,参数 keep 可以修改,但对于参数 days,一旦设置后,不可修改。 + +在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数 maxRows (每块最大记录条数)决定,缺省值为 4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。 + +每个数据文件(.data 结尾)都有一个对应的索引文件(.head 结尾),该索引文件对每张表都有一数据块的摘要信息,记录了每个数据块在数据文件中的偏移量,数据的起止时间等信息,以帮助系统迅速定位需要查找的数据。每个数据文件还有一对应的 last 文件(.last 结尾),该文件是为防止落盘时数据块碎片化而设计的。如果一张表落盘的记录条数没有达到系统配置参数 minRows(每块最小记录条数),将被先存储到 last 文件,等下次落盘时,新落盘的记录将与 last 文件的记录进行合并,再写入数据文件。 + +数据写入磁盘时,根据系统配置参数 comp 决定是否压缩数据。TDengine 提供了三种压缩选项:无压缩、一阶段压缩和两阶段压缩,分别对应 comp 值为 0、1 和 2 的情况。一阶段压缩根据数据的类型进行了相应的压缩,压缩算法包括 delta-delta 编码、simple 8B 方法、zig-zag 编码、LZ4 等算法。二阶段压缩在一阶段压缩的基础上又用通用压缩算法进行了压缩,压缩率更高。 + +### 多级存储 + +说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。 + +在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。 + +除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。 + +多级存储支持 3 级,每级最多可配置 16 个挂载点。 + +TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中): + +``` +dataDir [path] +``` + +- path: 挂载点的文件夹路径 +- level: 介质存储等级,取值为 0,1,2。 + 0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。 + 各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。 + 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。 + 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。 +- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。 + +在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式: + +``` +dataDir /mnt/data1 0 1 +dataDir /mnt/data2 0 0 +dataDir /mnt/data3 1 0 +dataDir /mnt/data4 1 0 +dataDir /mnt/data5 2 0 +dataDir /mnt/data6 2 0 +``` + +:::note + +1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。 +2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。 +3. 多级存储目前不支持删除已经挂载的硬盘的功能。 + +::: + +## 数据查询 + +TDengine 提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine 的查询处理需要客户端、vnode、mnode 节点协同完成。 + +### 单表查询 + +SQL 语句的解析和校验工作在客户端完成。解析 SQL 语句并生成抽象语法树(Abstract Syntax Tree,AST),然后对其进行校验和检查。以及向管理节点(mnode)请求查询中指定表的元数据信息(table metadata)。 + +根据元数据信息中的 End Point 信息,将查询请求序列化后发送到该表所在的数据节点(dnode)。dnode 接收到查询请求后,识别出该查询请求指向的虚拟节点(vnode),将消息转发到 vnode 的查询执行队列。vnode 的查询执行线程建立基础的查询执行环境,并立即返回该查询请求,同时开始执行该查询。 + +客户端在获取查询结果的时候,dnode 的查询执行队列中的工作线程会等待 vnode 执行线程执行完成,才能将查询结果返回到请求的客户端。 + +### 按时间轴聚合、降采样、插值 + +时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳的数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。 + +在 TDengine 中引入关键词 interval 来进行时间轴上固定长度时间窗口的切分,并按照时间窗口对数据进行聚合,对窗口范围内的数据按需进行聚合。例如: + +```sql +SELECT COUNT(*) FROM d1001 INTERVAL(1h); +``` + +针对 d1001 设备采集的数据,按照 1 小时的时间窗口返回每小时存储的记录数量。 + +在需要连续获得查询结果的应用场景下,如果给定的时间区间存在数据缺失,会导致该区间数据结果也丢失。TDengine 提供策略针对时间轴聚合计算的结果进行插值,通过使用关键词 fill 就能够对时间轴聚合结果进行插值。例如: + +```sql +SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 23:59:59' INTERVAL(1h) FILL(PREV); +``` + +针对 d1001 设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine 提供前向插值(prev)、线性插值(linear)、空值填充(NULL)、特定值填充(value)。 + +### 多表聚合查询 + +TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: + +![TDengine Database 多表聚合查询原理图](./multi_tables.webp) + +
图 5 多表聚合查询原理图
+ +1. 应用将一个查询条件发往系统; +2. taosc 将超级表的名字发往 meta node(管理节点); +3. 管理节点将超级表所拥有的 vnode 列表发回 taosc; +4. taosc 将计算的请求连同标签过滤条件发往这些 vnode 对应的多个数据节点; +5. 每个 vnode 先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给 taosc; +6. taosc 将多个数据节点返回的结果做最后的聚合,将其返回给应用。 + +由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 + +### 预计算 + +为有效提升查询处理的性能,针对物联网数据的不可更改的特点,在数据块头部记录该数据块中存储数据的统计信息:包括最大值、最小值、和。我们称之为预计算单元。如果查询处理涉及整个数据块的全部数据,直接使用预计算结果,完全不需要读取数据块的内容。由于预计算数据量远小于磁盘上存储的数据块数据的大小,对于磁盘 I/O 为瓶颈的查询处理,使用预计算结果可以极大地减小读取 I/O 压力,加速查询处理的流程。预计算机制与 PostgreSQL 的索引 BRIN(block range index)有异曲同工之妙。 diff --git a/docs-cn/21-tdinternal/02-replica.md b/docs/zh/21-tdinternal/02-replica.md similarity index 100% rename from docs-cn/21-tdinternal/02-replica.md rename to docs/zh/21-tdinternal/02-replica.md diff --git a/docs/zh/21-tdinternal/03-taosd.md b/docs/zh/21-tdinternal/03-taosd.md new file mode 100644 index 0000000000000000000000000000000000000000..92677e57007128949dd1e9c954870bc7a9b419b8 --- /dev/null +++ b/docs/zh/21-tdinternal/03-taosd.md @@ -0,0 +1,119 @@ +--- +sidebar_label: taosd 的设计 +title: taosd的设计 +--- + +逻辑上,TDengine 系统包含 dnode,taosc 和 App,dnode 是服务器侧执行代码 taosd 的一个运行实例,因此 taosd 是 TDengine 的核心,本文对 taosd 的设计做一简单的介绍,模块内的实现细节请见其他文档。 + +## 系统模块图 + +taosd 包含 rpc,dnode,vnode,tsdb,query,cq,sync,wal,mnode,http,monitor 等模块,具体如下图: + +![TDengine Database module](./modules.webp) + +taosd 的启动入口是 dnode 模块,dnode 然后启动其他模块,包括可选配置的 http,monitor 模块。taosc 或 dnode 之间交互的消息都是通过 rpc 模块进行,dnode 模块根据接收到的消息类型,将消息分发到 vnode 或 mnode 的消息队列,或由 dnode 模块自己消费。dnode 的工作线程(worker)消费消息队列里的消息,交给 mnode 或 vnode 进行处理。下面对各个模块做简要说明。 + +## RPC 模块 + +该模块负责 taosd 与 taosc,以及其他数据节点之间的通讯。TDengine 没有采取标准的 HTTP 或 gRPC 等第三方工具,而是实现了自己的通讯模块 RPC。 + +考虑到物联网场景下,数据写入的包一般不大,因此除支持 TCP 连接之外,RPC 还支持 UDP 连接。当数据包小于 15K 时,RPC 将采用 UDP 方式进行连接,否则将采用 TCP 连接。对于查询类的消息,RPC 不管包的大小,总是采取 TCP 连接。对于 UDP 连接,RPC 实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。 + +RPC 模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数 compressMsgSize,RPC 在传输中将自动压缩数据,以节省带宽。 + +为保证数据的安全和数据的 integrity,RPC 模块采用 MD5 做数字签名,对数据的真实性和完整性进行认证。 + +## DNODE 模块 + +该模块是整个 taosd 的入口,它具体负责如下任务: + +- 系统的初始化,包括 + - 从文件 taos.cfg 读取系统配置参数,从文件 dnodeCfg.json 读取数据节点的配置参数; + - 启动 RPC 模块,并建立起与 taosc 通讯的 server 连接,与其他数据节点通讯的 server 连接; + - 启动并初始化 dnode 的内部管理,该模块将扫描该数据节点已有的 vnode ,并打开它们; + - 初始化可配置的模块,如 mnode,http,monitor 等。 +- 数据节点的管理,包括 + - 定时的向 mnode 发送 status 消息,报告自己的状态; + - 根据 mnode 的指示,创建、改变、删除 vnode; + - 根据 mnode 的指示,修改自己的配置参数; +- 消息的分发、消费,包括 + - 为每一个 vnode 和 mnode 的创建并维护一个读队列、一个写队列; + - 将从 taosc 或其他数据节点来的消息,根据消息类型,将其直接分发到不同的消息队列,或由自己的管理模块直接消费; + - 维护一个读的线程池,消费读队列的消息,交给 vnode 或 mnode 处理。为支持高并发,一个读线程(worker)可以消费多个队列的消息,一个读队列可以由多个 worker 消费; + - 维护一个写的线程池,消费写队列的消息,交给 vnode 或 mnode 处理。为保证写操作的序列化,一个写队列只能由一个写线程负责,但一个写线程可以负责多个写队列。 + +taosd 的消息消费由 dnode 通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: + +![TDengine Database dnode](./dnode.webp) + +## VNODE 模块 + +vnode 是一独立的数据存储查询逻辑单元,但因为一个 vnode 只能容许一个 DB ,因此 vnode 内部没有 account,DB,user 等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的 TSDB,负责查询的 query,负责数据复制的 sync,负责数据库日志的的 WAL,负责连续查询的 cq(continuous query),负责事件触发的流计算的 event 等模块,这些子模块只与 vnode 模块发生关系,与其他模块没有任何调用关系。模块图如下: + +![TDengine Database vnode](./vnode.webp) + +vnode 模块向下,与 dnodeVRead,dnodeVWrite 发生互动,向上,与子模块发生互动。它主要的功能有: + +- 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过 vnode 模块进行; +- 对于来自 taosc 或 mnode 的写操作,vnode 模块将其分解为写日志(WAL),转发(sync),本地存储(TSDB)子模块的操作; +- 对于查询操作,分发到 query 模块进行。 + +一个数据节点里有多个 vnode,因此 vnode 模块是有多个运行实例的。每个运行实例是完全独立的。 + +vnode 与其子模块是通过 API 直接调用,而不是通过消息队列传递。而且各个子模块只与 vnode 模块有交互,不与 dnode,rpc 等模块发生任何直接关联。 + +## MNODE 模块 + +mnode 是整个系统的大脑,负责整个系统的资源调度,负责 meta data 的管理与存储。 + +一个运行的系统里,只有一个 mnode,但它有多个副本(由系统配置参数 numOfMnodes 控制)。这些副本分布在不同的 dnode 里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个 Master,其他副本是 slave。所有数据更新类的操作,都只能在 master 上进行,而查询类的可以在 slave 节点上进行。代码实现上,同步模块与 vnode 共享,但 mnode 被分配一个特殊的 vgroup ID: 1,而且 quorum 大于 1。整个集群系统是由多个 dnode 组成的,运行的 mnode 的副本数不可能超过 dnode 的个数,但不会超过配置的副本数。如果某个 mnode 副本宕机一段时间,只要超过半数的 mnode 副本仍在运行,运行的 mnode 会自动根据整个系统的资源情况,在其他 dnode 里再启动一个 mnode,以保证运行的副本数。 + +各个 dnode 通过信息交换,保存有 mnode 各个副本的 End Point 列表,并向其中的 master 节点定时(间隔由系统配置参数 statusInterval 控制)发送 status 消息,消息体里包含该 dnode 的 CPU、内存、剩余存储空间、vnode 个数,以及各个 vnode 的状态(存储空间、原始数据大小、记录条数、角色等)。这样 mnode 就了解整个系统的资源情况,如果用户创建新的表,就可以决定需要在哪个 dnode 创建;如果增加或删除 dnode,或者监测到某 dnode 数据过热、或离线太长,就可以决定需要挪动那些 vnode,以实现负载均衡。 + +mnode 里还负责 account,user,DB,stable,table,vgroup,dnode 的创建、删除与更新。mnode 不仅把这些 entity 的 meta data 保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在 mnode(保存在 vnode),而且子表不维护自己的 schema,而是与 stable 共享。为减小 mnode 的查询压力,taosc 会缓存 table、stable 的 schema。对于查询类的操作,各个 slave mnode 也可以提供,以减轻 master 压力。 + +## TSDB 模块 + +TSDB 模块是 vnode 中的负责快速高并发地存储和读取属于该 vnode 的表的元数据及采集的时序数据的引擎。除此之外,TSDB 还提供了表结构的修改、表标签值的修改等功能。TSDB 提供 API 供 vnode 和 query 等模块调用。TSDB 中存储了两类数据,1:元数据信息;2:时序数据 + +### 元数据信息 + +TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schema 的定义等。对于超级表和超级表下的子表而言,又包含了 tag 的 schema 定义以及子表的 tag 值等。对于元数据信息而言,TSDB 就相当于一个全内存的 KV 型数据库,属于该 vnode 的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB 还对其中的子表,按照 tag 的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB 中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到 meta 文件中。meta 文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB 也提供了对于元数据的修改操作,如表 schema 的修改,tag schema 的修改以及 tag 值的修改等。 + +### 时序数据 + +每个 TSDB 在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入 TSDB 时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的 1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。 + +而时序数据在写入到 TSDB 的数据文件时,是以列(column)的形式存储的。TSDB 中的数据文件包含多个数据文件组,每个数据文件组中又包含 .head、.data 和 .last 三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB 中的数据文件组是按照时间跨度进行分片的,默认是 10 天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在 TSDB 的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head 文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在 .head 文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head 和 .last 文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入 .data 文件中,否则,会写入 .last 文件中,等待下次落盘时合并数据写入 .data 文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。 + +## Query 模块 + +该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。 + +## SYNC 模块 + +该模块实现数据的多副本复制,包括 vnode 与 mnode 的数据复制,支持异步和同步两种复制方式,以满足 meta data 与时序数据不同复制的需求。因为它为 mnode 与 vnode 共享,系统为 mnode 副本预留了一个特殊的 vgroup ID:1。因此 vnode group 的 ID 是从 2 开始的。 + +每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](../replica/) + +## WAL 模块 + +该模块负责将新插入的数据写入 write ahead log(WAL),为 vnode,mnode 共享。以保证服务器 crash 或其他故障,能从 WAL 中恢复数据。 + +每个 vnode/mnode 模块实例会有一对应的 WAL 模块实例,是完全一一对应的。WAL 的落盘操作由两个参数 walLevel,fsync 控制。看具体场景,如果要 100% 保证数据不会丢失,需要将 walLevel 配置为 2,fsync 设置为 0,每条数据插入请求,都会实时落盘后,才会给应用确认 + +## HTTP 模块 + +该模块负责处理系统对外的 RESTful 接口,可以通过配置,由 dnode 启动或停止 。(仅 2.2 及之前的版本中存在) + +该模块将接收到的 RESTful 请求,做了各种合法性检查后,将其变成标准的 SQL 语句,通过 taosc 的异步接口,将请求发往整个系统中的任一 dnode 。收到处理后的结果后,再翻译成 HTTP 协议,返回给应用。 + +如果 HTTP 模块启动,就意味着启动了一个 taosc 的实例。任一一个 dnode 都可以启动该模块,以实现对 RESTful 请求的分布式处理。 + +## Monitor 模块 + +该模块负责检测一个 dnode 的运行状态,可以通过配置,由 dnode 启动或停止。原则上,每个 dnode 都应该启动一个 monitor 实例。 + +Monitor 采集 TDengine 里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集 CPU、内存、网络等资源的使用情况(采集周期由系统配置参数 monitorInterval 控制)。获得这些数据后,monitor 模块将采集的数据写入系统的日志库(DB 名字由系统配置参数 monitorDbName 控制)。 + +Monitor 模块使用 taosc 来将采集的数据写入系统,因此每个 monitor 实例,都有一个 taosc 运行实例。 diff --git a/docs-cn/21-tdinternal/12-tsz-compress.md b/docs/zh/21-tdinternal/12-tsz-compress.md similarity index 100% rename from docs-cn/21-tdinternal/12-tsz-compress.md rename to docs/zh/21-tdinternal/12-tsz-compress.md diff --git a/docs-cn/21-tdinternal/30-iot-big-data.md b/docs/zh/21-tdinternal/30-iot-big-data.md similarity index 100% rename from docs-cn/21-tdinternal/30-iot-big-data.md rename to docs/zh/21-tdinternal/30-iot-big-data.md diff --git a/docs-cn/21-tdinternal/_category_.yml b/docs/zh/21-tdinternal/_category_.yml similarity index 100% rename from docs-cn/21-tdinternal/_category_.yml rename to docs/zh/21-tdinternal/_category_.yml diff --git a/docs-en/21-tdinternal/dnode.webp b/docs/zh/21-tdinternal/dnode.webp similarity index 100% rename from docs-en/21-tdinternal/dnode.webp rename to docs/zh/21-tdinternal/dnode.webp diff --git a/docs-cn/21-tdinternal/index.md b/docs/zh/21-tdinternal/index.md similarity index 100% rename from docs-cn/21-tdinternal/index.md rename to docs/zh/21-tdinternal/index.md diff --git a/docs-en/21-tdinternal/message.webp b/docs/zh/21-tdinternal/message.webp similarity index 100% rename from docs-en/21-tdinternal/message.webp rename to docs/zh/21-tdinternal/message.webp diff --git a/docs-en/21-tdinternal/modules.webp b/docs/zh/21-tdinternal/modules.webp similarity index 100% rename from docs-en/21-tdinternal/modules.webp rename to docs/zh/21-tdinternal/modules.webp diff --git a/docs-en/21-tdinternal/multi_tables.webp b/docs/zh/21-tdinternal/multi_tables.webp similarity index 100% rename from docs-en/21-tdinternal/multi_tables.webp rename to docs/zh/21-tdinternal/multi_tables.webp diff --git a/docs-en/21-tdinternal/replica-forward.webp b/docs/zh/21-tdinternal/replica-forward.webp similarity index 100% rename from docs-en/21-tdinternal/replica-forward.webp rename to docs/zh/21-tdinternal/replica-forward.webp diff --git a/docs-en/21-tdinternal/replica-master.webp b/docs/zh/21-tdinternal/replica-master.webp similarity index 100% rename from docs-en/21-tdinternal/replica-master.webp rename to docs/zh/21-tdinternal/replica-master.webp diff --git a/docs-en/21-tdinternal/replica-restore.webp b/docs/zh/21-tdinternal/replica-restore.webp similarity index 100% rename from docs-en/21-tdinternal/replica-restore.webp rename to docs/zh/21-tdinternal/replica-restore.webp diff --git a/docs-en/21-tdinternal/structure.webp b/docs/zh/21-tdinternal/structure.webp similarity index 100% rename from docs-en/21-tdinternal/structure.webp rename to docs/zh/21-tdinternal/structure.webp diff --git a/docs-en/21-tdinternal/vnode.webp b/docs/zh/21-tdinternal/vnode.webp similarity index 100% rename from docs-en/21-tdinternal/vnode.webp rename to docs/zh/21-tdinternal/vnode.webp diff --git a/docs-en/21-tdinternal/write_master.webp b/docs/zh/21-tdinternal/write_master.webp similarity index 100% rename from docs-en/21-tdinternal/write_master.webp rename to docs/zh/21-tdinternal/write_master.webp diff --git a/docs-en/21-tdinternal/write_slave.webp b/docs/zh/21-tdinternal/write_slave.webp similarity index 100% rename from docs-en/21-tdinternal/write_slave.webp rename to docs/zh/21-tdinternal/write_slave.webp diff --git a/docs-cn/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md similarity index 100% rename from docs-cn/25-application/01-telegraf.md rename to docs/zh/25-application/01-telegraf.md diff --git a/docs-cn/25-application/02-collectd.md b/docs/zh/25-application/02-collectd.md similarity index 100% rename from docs-cn/25-application/02-collectd.md rename to docs/zh/25-application/02-collectd.md diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/03-immigrate.md new file mode 100644 index 0000000000000000000000000000000000000000..6978d47754b648b6c27ec11cbb9a7278d27ebf47 --- /dev/null +++ b/docs/zh/25-application/03-immigrate.md @@ -0,0 +1,423 @@ +--- +sidebar_label: OpenTSDB 迁移到 TDengine +title: OpenTSDB 应用迁移到 TDengine 的最佳实践 +--- + +作为一个分布式、可伸缩、基于 HBase 的分布式时序数据库系统,得益于其先发优势,OpenTSDB 被 DevOps 领域的人员引入并广泛地应用在了运维监控领域。但最近几年,随着云计算、微服务、容器化等新技术快速落地发展,企业级服务种类变得越来越多,架构也越来越复杂,应用运行基础环境日益多样化,给系统和运行监控带来的压力也越来越大。从这一现状出发,使用 OpenTSDB 作为 DevOps 的监控后端存储,越来越受困于其性能问题以及迟缓的功能升级,以及由此而衍生出来的应用部署成本上升和运行效率降低等问题,这些问题随着系统规模的扩大日益严重。 + +在这一背景下,为满足高速增长的物联网大数据市场和技术需求,在吸取众多传统关系型数据库、NoSQL 数据库、流计算引擎、消息队列等软件的优点之后,涛思数据自主开发出创新型大数据处理产品 TDengine。在时序大数据处理上,TDengine 有着自己独特的优势。就 OpenTSDB 当前遇到的问题来说,TDengine 能够有效解决。 + +相对于 OpenTSDB,TDengine 具有如下显著特点: + +- 数据写入和查询的性能远超 OpenTSDB; +- 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; +- 安装部署非常简单,单一安装包完成安装部署,不依赖其他的第三方软件,整个安装部署过程秒级搞定; +- 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 +- 支持多达 128 个标签,标签总长度可达到 16 KB; +- 除 REST 接口之外,还提供 C/C++、Java、Python、Go、Rust、Node.js、C#、Lua(社区贡献)、PHP(社区贡献)等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 + +如果我们将原本运行在 OpenTSDB 上的应用迁移到 TDengine 上,不仅可以有效地降低计算和存储资源的占用、减少部署服务器的规模,还能够极大减少运行维护的成本的输出,让运维管理工作更简单、更轻松,大幅降低总拥有成本。与 OpenTSDB 一样,TDengine 也已经进行了开源,不同的是,除了单机版,后者还实现了集群版开源,被厂商绑定的顾虑一扫而空。 + +在下文中我们将就“使用最典型并广泛应用的运维监控(DevOps)场景”来说明,如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。后续的章节会做更深度的介绍,以便于进行非 DevOps 场景的迁移。 + +## DevOps 应用快速迁移 + +### 1、典型应用场景 + +一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。 + +**图 1. DevOps 场景中典型架构** +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "图1. DevOps 场景中典型架构") + +在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。 + +其中,部署在应用节点的 Agents 负责向 collectd/Statsd 提供不同来源的运行指标,collectd/StatsD 则负责将汇聚的数据推送到 OpenTSDB 集群系统,然后使用可视化看板 Grafana 将数据可视化呈现出来。 + +### 2、迁移服务 + +- **TDengine 安装部署** + +首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 + +注意,安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后再启动。 + +- **调整数据收集器配置** + +在 TDengine 2.4 版本中,包含一个组件 taosAdapter。taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 + +用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 + +通过 taosAdapter,用户可以将 collectd 或 StatsD 收集的数据直接推送到 TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter 还支持 Telegraf、Icinga、TCollector 、node_exporter 的数据接入,使用详情参考[taosAdapter](/reference/taosadapter/)。 + +如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为 192.168.1.130,端口为 6046,配置如下: + +```html +LoadPlugin write_tsdb + + + Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates + false AlwaysAppendDS false + + +``` + +即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 TDengine 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 + +- **调整看板(Dashboard)系统** + +在数据能够正常写入 TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用 TDengine 提供的 Grafana 插件请参考[与其他工具的连接](/third-party/grafana)。 + +TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。 + +**图 2. 导入 Grafana 模板** +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "图2. 导入 Grafana 模板") + +操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。 + +### 3、迁移后架构 + +完成迁移以后,此时的系统整体的架构如下图(图 3)所示,而整个过程中采集端、数据写入端、以及监控呈现端均保持了稳定,除了极少的配置调整外,不涉及任何重要的更改和变动。OpenTSDB 大量的应用场景均为 DevOps ,这种场景下,简单的参数设置即可完成 OpenTSDB 到 TDengine 迁移动作,使用上 TDengine 更加强大的处理能力和查询性能。 + +在绝大多数的 DevOps 场景中,如果你拥有一个小规模的 OpenTSDB 集群(3 台及以下的节点)作为 DevOps 的存储端,依赖于 OpenTSDB 为系统持久化层提供数据存储和查询功能,那么你可以安全地将其替换为 TDengine,并节约更多的计算和存储资源。在同等计算资源配置情况下,单台 TDengine 即可满足 3 ~ 5 台 OpenTSDB 节点提供的服务能力。如果规模比较大,那便需要采用 TDengine 集群。 + +如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。 + +**图 3. 迁移完成后的系统架构** +![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "图 3. 迁移完成后的系统架构") + +## 其他场景的迁移评估与策略 + +### 1、TDengine 与 OpenTSDB 的差异 + +本章将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本章的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 + +TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 + +在 2.3.0.x 版本中,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 + +此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: + +1. `/api/stats`:如果你的应用中使用了该项特性来监控 OpenTSDB 的服务状态,并在应用中建立了相关的逻辑来联动处理,那么这部分状态读取和获取的逻辑需要重新适配到 TDengine。TDengine 提供了全新的处理集群状态监控机制,来满足你的应用对其进行的监控和维护的需求。 +2. `/api/tree`:如果你依赖于 OpenTSDB 的该项特性来进行时间线的层级化组织和维护,那么便无法将其直接迁移至 TDengine。TDengine 采用了数据库->超级表->子表这样的层级来组织和维护时间线,归属于同一个超级表的所有的时间线在系统中同一个层级,但是可以通过不同标签值的特殊构造来模拟应用逻辑上的多级结构。 +3. `Rollup And PreAggregates`:采用了 Rollup 和 PreAggregates 需要应用来决定在合适的地方访问 Rollup 的结果,在某些场景下又要访问原始的结果,这种结构的不透明性让应用处理逻辑变得极为复杂而且完全不具有移植性。我们认为这种策略是时序数据库无法提供高性能聚合情况下的妥协与折中。TDengine 暂不支持多个时间线的自动降采样和(时间段范围的)预聚合,由于 其拥有的高性能查询处理逻辑,即使不依赖于 Rollup 和 (时间段)预聚合计算结果,也能够提供很高性能的查询响应,而且让你的应用查询处理逻辑更加简单。 +4. `Rate`: TDengine 提供了两个计算数值变化率的函数,分别是 Derivative(其计算结果与 InfluxDB 的 Derivative 行为一致)和 IRate(其计算结果与 Prometheus 中的 IRate 函数计算结果一致)。但是这两个函数的计算结果与 Rate 有细微的差别,但整体上功能更强大。此外,**OpenTSDB 提供的所有计算函数,TDengine 均有对应的查询函数支持,并且 TDengine 的查询函数功能远超过 OpenTSDB 支持的查询函数,**可以极大地简化你的应用处理逻辑。 + +通过上面的介绍,相信你应该能够了解 OpenTSDB 迁移到 TDengine 带来的变化,这些信息也有助于你正确地判断是否可以接受将应用 迁移到 TDengine 之上,体验 TDengine 提供的强大的时序数据处理能力和便捷的使用体验。 + +### 2、迁移策略 + +首先将基于 OpenTSDB 的系统进行迁移涉及到的数据模式设计、系统规模估算、数据写入端改造,进行数据分流、应用适配工作;之后将两个系统并行运行一段时间,再将历史数据迁移到 TDengine 中。当然如果你的应用中有部分功能强依赖于上述 OpenTSDB 特性,同时又不希望停止使用,可以考虑保持原有的 OpenTSDB 系统运行,同时启动 TDengine 来提供主要的服务。 + +## 数据模型设计 + +一方面,TDengine 要求其入库的数据具有严格的模式定义。另一方面,TDengine 的数据模型相对于 OpenTSDB 来说又更加丰富,多值模型能够兼容全部的单值模型的建立需求。 + +现在让我们假设一个 DevOps 的场景,我们使用了 collectd 收集设备的基础度量(metrics),包含了 memory 、swap、disk 等几个度量,其在 OpenTSDB 中的模式如下: + +| 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 | +| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | +| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | + +TDengine 要求存储的数据具有数据模式,即写入数据之前需创建超级表并指定超级表的模式。对于数据模式的建立,你有两种方式来完成此项工作:1)充分利用 TDengine 对 OpenTSDB 的数据原生写入的支持,调用 TDengine 提供的 API 将(文本行或 JSON 格式)数据写入,并自动化地建立单值模型。采用这种方式不需要对数据写入应用进行较大的调整,也不需要对写入的数据格式进行转换。 + +在 C 语言层面,TDengine 提供了 `taos_schemaless_insert()` 函数来直接写入 OpenTSDB 格式的数据(在更早版本中该函数名称是 `taos_insert_lines()`)。其代码参考示例请参见安装包目录下示例代码 schemaless.c。 + +2)在充分理解 TDengine 的数据模型基础上,结合生成数据的特点,手动方式建立 OpenTSDB 到 TDengine 的数据模型调整的映射关系。TDengine 能够支持多值模型和单值模型,考虑到 OpenTSDB 均为单值映射模型,这里推荐使用单值模型在 TDengine 中进行建模。 + +- **单值模型**。 + +具体步骤如下:将度量(metrics)的名称作为 TDengine 超级表的名称,该超级表建成后具有两个基础的数据列—时间戳(timestamp)和值(value),超级表的标签等效于 度量 的标签信息,标签数量等同于度量 的标签的数量。子表的表名采用具有固定规则的方式进行命名:`metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...`作为子表名称。 + +在 TDengine 中建立 3 个超级表: + +```sql +create stable memory(ts timestamp, val float) tags(host binary(12),memory_type binary(20), memory_type_instance binary(20), source binary(20)); +create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); +create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); +``` + +对于子表使用动态建表的方式创建如下所示: + +```sql +insert into memory_vm130_memory_buffered_collectd using memory tags(‘vm130’, ‘memory’, 'buffer', 'collectd') values(1632979445, 3.0656); +``` + +最终系统中会建立 340 个左右的子表,3 个超级表。需要注意的是,如果采用串联标签值的方式导致子表名称超过系统限制(191 字节),那么需要采用一定的编码方式(例如 MD5)将其转化为可接受长度。 + +- **多值模型** + +如果你想要利用 TDengine 的多值模型能力,需要首先满足以下要求:不同的采集量具有相同的采集频率,且能够通过消息队列**同时到达**数据写入端,从而确保使用 SQL 语句将多个指标一次性写入。将度量的名称作为超级表的名称,建立具有相同采集频率且能够同时到达的数据多列模型。子表的表名采用具有固定规则的方式进行命名。上述每个度量均只包含一个测量值,因此无法将其转化为多值模型。 + +## 数据分流与应用适配 + +从消息队列中订阅数据,并启动调整后的写入程序写入数据。 + +数据开始写入持续一段时间后,可以采用 SQL 语句检查写入的数据量是否符合预计的写入要求。统计数据量使用如下 SQL 语句: + +```sql +select count(*) from memory +``` + +完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。 + +TDengine 不支持采用 OpenTSDB 的查询语法进行查询或数据获取处理,但是针对 OpenTSDB 的每种查询都提供对应的支持。可以用检查附录 1 获取对应的查询处理的调整和应用使用的方式,如果需要全面了解 TDengine 支持的查询类型,请参阅 TDengine 的用户手册。 + +TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 + +## 历史数据迁移 + +### 1、使用工具自动迁移数据 + +为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 + +DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 + +在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 + +| DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | +| ----------------------------- | --------------------- | +| 1 | 约 13.9 万 | +| 2 | 约 21.8 万 | +| 3 | 约 24.9 万 | +| 5 | 约 29.5 万 | +| 10 | 约 33 万 | + +
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) + +### 2、手动迁移数据 + +如果你需要使用多值模型进行数据写入,就需要自行开发一个将数据从 OpenTSDB 导出的工具,然后确认哪些时间线能够合并导入到同一个时间线,再将可以同时导入的时间通过 SQL 语句的写入到数据库中。 + +手动迁移数据需要注意以下两个问题: + +1)在磁盘中存储导出数据时,磁盘需要有足够的存储空间以便能够充分容纳导出的数据文件。为了避免全量数据导出后导致磁盘文件存储紧张,可以采用部分导入的模式,对于归属于同一个超级表的时间线优先导出,然后将导出部分的数据文件导入到 TDengine 系统中。 + +2)在系统全负载运行下,如果有足够的剩余计算和 IO 资源,可以建立多线程的导入机制,最大限度地提升数据迁移的效率。考虑到数据解析对于 CPU 带来的巨大负载,需要控制最大的并行任务数量,以避免因导入历史数据而触发的系统整体过载。 + +由于 TDengine 本身操作简易性,所以不需要在整个过程中进行索引维护、数据格式的变化处理等工作,整个过程只需要顺序执行即可。 + +当历史数据完全导入到 TDengine 以后,此时两个系统处于同时运行的状态,之后便可以将查询请求切换到 TDengine 上,从而实现无缝的应用切换。 + +## 附录 1: OpenTSDB 查询函数对应表 + +### Avg + +等效函数:avg + +示例: + +```sql +SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) +``` + +备注: + +1. Interval 内的数值与外层查询的 interval 数值需要相同。 +2. 在 TDengine 中插值处理需要使用子查询来协助完成,如上所示,在内层查询中指明插值类型即可,由于 OpenTSDB 中数值的插值使用了线性插值,因此在插值子句中使用 fill(linear) 来声明插值类型。以下有相同插值计算需求的函数,均采用该方法处理。 +3. Interval 中参数 20s 表示将内层查询按照 20 秒一个时间窗口生成结果。在真实的查询中,需要调整为不同的记录之间的时间间隔。这样可确保等效于原始数据生成了插值结果。 +4. 由于 OpenTSDB 特殊的插值策略和机制,聚合查询(Aggregate)中先插值再计算的方式导致其计算结果与 TDengine 不可能完全一致。但是在降采样(Downsample)的情况下,TDengine 和 OpenTSDB 能够获得一致的结果(由于 OpenTSDB 在聚合查询和降采样查询中采用了完全不同的插值策略)。 + +### Count + +等效函数:count + +示例: + +```sql +select count(\*) from super_table_name; +``` + +### Dev + +等效函数:stddev + +示例: + +```sql +Select stddev(val) from table_name +``` + +### Estimated percentiles + +等效函数:apercentile + +示例: + +```sql +Select apercentile(col1, 50, “t-digest”) from table_name +``` + +备注: + +1. 近似查询处理过程中,OpenTSDB 默认采用 t-digest 算法,所以为了获得相同的计算结果,需要在 apercentile 函数中指明使用的算法。TDengine 能够支持两种不同的近似处理算法,分别通过“default”和“t-digest”来声明。 +### First + +等效函数:first + +示例: + +```sql +Select first(col1) from table_name +``` + +### Last + +等效函数:last + +示例: + +```sql +Select last(col1) from table_name +``` + +### Max + +等效函数:max + +示例: + +```sql +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) +``` + +备注:Max 函数需要插值,原因见上。 + +### Min + +等效函数:min + +示例: + +```sql +Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); +``` + +### MinMax + +等效函数:max + +```sql +Select max(val) from table_name +``` + +备注:该函数无插值需求,因此可用直接计算。 + +### MimMin + +等效函数:min + +```sql +Select min(val) from table_name +``` + +备注:该函数无插值需求,因此可用直接计算。 + +### Percentile + +等效函数:percentile + +备注: + +### Sum + +等效函数:sum + +```sql +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) +``` + +备注:该函数无插值需求,因此可用直接计算。 + +### Zimsum + +等效函数:sum + +```sql +Select sum(val) from table_name +``` + +备注:该函数无插值需求,因此可用直接计算。 + +完整示例: + +```json +// OpenTSDB 查询 JSON +query = { +“start”:1510560000, +“end”: 1515000009, +“queries”:[{ +“aggregator”: “count”, +“metric”:”cpu.usage_user”, +}] +} + +//等效查询 SQL: +SELECT count(*) +FROM `cpu.usage_user` +WHERE ts>=1510560000 AND ts<=1515000009 +``` + +## 附录 2: 资源估算方法 + +### 数据生成环境 + +我们仍然使用第 4 章中的假设环境,3 个测量值。分别是:温度和湿度的数据写入的速率是每 5 秒一条记录,时间线 10 万个。空气质量的写入速率是 10 秒一条记录,时间线 1 万个,查询的请求频率 500 QPS。 + +### 存储资源估算 + +假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `86400×n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(86400×n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源: + +```matlab +(86400×n×t×L)×(365×1.5)×(1+20%)/C +``` + +结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。 + +### 存储设备选型考虑 + +硬盘应该选用具有较好随机读性能的硬盘设备,如果能够有 SSD,尽可能考虑使用 SSD。较好的随机读性能的磁盘对于提升系统查询性能具有极大的帮助,能够整体上提升系统的查询响应性能。为了获得较好的查询性能,硬盘设备的单线程随机读 IOPS 的性能指标不应该低于 1000,能够达到 5000 IOPS 以上为佳。为了获得当前的设备随机读取的 IO 性能的评估,建议使用 `fio` 软件对其进行运行性能评估(具体的使用方式请参阅附录 1),确认其是否能够满足大文件随机读性能要求。 + +硬盘写性能对于 TDengine 的影响不大。TDengine 写入过程采用了追加写的模式,所以只要有较好的顺序写性能即可,一般意义上的 SAS 硬盘和 SSD 均能够很好地满足 TDengine 对于磁盘写入性能的要求。 + +### 计算资源估算 + +由于物联网数据的特殊性,数据产生的频率固定以后,TDengine 写入的过程对于(计算和存储)资源消耗都保持一个相对固定的量。《[TDengine 运维指南](/operation/)》上的描述,该系统中每秒 22000 个写入,消耗 CPU 不到 1 个核。 + +在针对查询所需要消耗的 CPU 资源的估算上,假设应用要求数据库提供的 QPS 为 10000,每次查询消耗的 CPU 时间约 1 ms,那么每个核每秒提供的查询为 1000 QPS,满足 10000 QPS 的查询请求,至少需要 10 个核。为了让系统整体上 CPU 负载小于 50%,整个集群需要 10 个核的两倍,即 20 个核。 + +### 内存资源估算 + +数据库默认为每个 Vnode 分配内存 16MB\*3 缓冲区,集群系统包括 22 个 CPU 核,则默认会建立 22 个虚拟节点 Vnode,每个 Vnode 包含 1000 张表,则可以容纳所有的表。则约 1 个半小时写满一个 block,从而触发落盘,可以不做调整。22 个 Vnode 共计需要内存缓存约 1GB。考虑到查询所需要的内存,假设每次查询的内存开销约 50MB,则 500 个查询并发需要的内存约 25GB。 + +综上所述,可使用单台 16 核 32GB 的机器,或者使用 2 台 8 核 16GB 机器构成的集群。 + +## 附录 3: 集群部署及启动 + +TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方面的内容,这里提供相应的文档列表,供你参考。 + +### 集群部署 + +首先是安装 TDengine,从官网上下载 TDengine 最新稳定版,解压缩后运行 install.sh 进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 + +注意安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后才启动 `taosd` 服务。 + +### 设置运行参数并启动服务 + +为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数: + +FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](../../cluster/)》 + +按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。 + +最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](../../cluster/)》 + +## 附录 4: 超级表名称 + +由于 OpenTSDB 的 metric 名称中带有点号(“.”),例如“cpu.usage_user”这种名称的 metric。但是点号在 TDengine 中具有特殊含义,是用来分隔数据库和表名称的分隔符。TDengine 也提供转义符,以允许用户在(超级)表名称中使用关键词或特殊分隔符(如:点号)。为了使用特殊字符,需要采用转义字符将表的名称括起来,例如:`cpu.usage_user`这样就是合法的(超级)表名称。 + +## 附录 5:参考文章 + +1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](/application/collectd/) +2. [通过 collectd 将采集数据直接写入 TDengine](/third-party/collectd/) diff --git a/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs/zh/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp rename to docs/zh/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp b/docs/zh/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp rename to docs/zh/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp b/docs/zh/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp rename to docs/zh/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp b/docs/zh/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp rename to docs/zh/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs/zh/25-application/IT-DevOps-Solutions-Telegraf.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp rename to docs/zh/25-application/IT-DevOps-Solutions-Telegraf.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp b/docs/zh/25-application/IT-DevOps-Solutions-collectd-dashboard.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp rename to docs/zh/25-application/IT-DevOps-Solutions-collectd-dashboard.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp b/docs/zh/25-application/IT-DevOps-Solutions-statsd-dashboard.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp rename to docs/zh/25-application/IT-DevOps-Solutions-statsd-dashboard.webp diff --git a/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs/zh/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp similarity index 100% rename from docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp rename to docs/zh/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp diff --git a/docs-cn/25-application/_category_.yml b/docs/zh/25-application/_category_.yml similarity index 100% rename from docs-cn/25-application/_category_.yml rename to docs/zh/25-application/_category_.yml diff --git a/docs-cn/25-application/index.md b/docs/zh/25-application/index.md similarity index 100% rename from docs-cn/25-application/index.md rename to docs/zh/25-application/index.md diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md new file mode 100644 index 0000000000000000000000000000000000000000..11458b565dde027c9e818619c2bff78415e3a12c --- /dev/null +++ b/docs/zh/27-train-faq/01-faq.md @@ -0,0 +1,263 @@ +--- +title: 常见问题及反馈 +--- + +## 问题反馈 + +如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: + +1. /var/log/taos (如果没有修改过默认路径) +2. /etc/taos + +附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 + +为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 + +``` + alter dnode debugFlag 135; +``` + +但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 + +## 常见问题列表 + +### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆ + +2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: + +1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` +2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` +3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` +4. 安装最新稳定版本的 TDengine +5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 + +### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 + +### 3. 创建数据表时提示 more dnodes are needed + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 + +### 4. 如何让 TDengine crash 时生成 core 文件? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 + +### 5. 遇到错误“Unable to establish connection” 怎么办? + +客户端遇到连接故障,请按照下面的步骤进行检查: + +1. 检查网络环境 + + - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限 + - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname + - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 + +2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 + +3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* + +4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 + +5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 + +6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 + +7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 + +8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) + +9. 如果仍不能排除连接故障 + + - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 + 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` + 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` + 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` + + - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 + +10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 + +### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办? + +产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: + +1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) +2. 如果网络配置有 DNS server,请检查是否正常工作 +3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 +4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 +5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 +6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN + +### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误? + +如果你确认语法正确,2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过,也会返回这个错误。 + +### 8. 是否支持 validation queries? + +TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。 + + + +### 9. 我可以删除或更新一条记录吗? + +TDengine 删除功能只在 2.6.0.0 及以后的企业版中提供。 + +从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。 + +另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。 + +此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。 + +### 10. 我怎么创建超过 1024 列的表? + +使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) + +### 11. 最有效的写入数据的方法是什么? + +批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 + +### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? + +Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: + +```JAVA +Class.forName("com.taosdata.jdbc.TSDBDriver"); +Properties properties = new Properties(); +properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); +Connection = DriverManager.getConnection(url, properties); +``` + +### 13. Windows 系统下客户端无法正常显示中文字符? + +Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 + +【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: + +``` +locale C +charset UTF-8 +``` + +### 14. JDBC 报错: the executed SQL is not a DML or a DDL? + +请更新至最新的 JDBC 驱动,参考 [Java 连接器](../../reference/connector/java) + +### 15. taos connect failed, reason: invalid timestamp + +常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 + +### 16. 表名显示不全 + +由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 + +### 17. 如何进行数据迁移? + +TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: + + - 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。 + - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 + - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 + +### 18. 如何在命令行程序 taos 中临时调整日志级别 + +为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: + +```sql +ALTER LOCAL flag_name flag_value; +``` + +其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): + + - flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag + - flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) + +```sql +ALTER LOCAL RESETLOG; +``` + +其含义是,清空本机所有由客户端生成的日志文件。 + + + +### 19. go 语言编写组件编译失败怎样解决? + +TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 + +目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: + +```sh +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +``` + +如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 +`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 + +### 20. 如何查询数据占用的存储空间大小? + +默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 + +若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 + +若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 + +若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) + +### 21. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 22. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 23. TDengine 2.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + +### 25. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 + +### 26. 为何批量写入数据时,时间戳使用 NOW 函数拼接会导致数据丢失? + +首先需要强调一个概念,TDengine 作为一个时序数据库(Time-Series Database),首个时间戳字段起到主键的作用,内存索引的构建、磁盘数据的存储与其密切相关,不能有重复的时间戳。 + +NOW 函数(以及 NOW 关键字)返回客户端当前时间。当执行批量写入时,若首列时间戳给的值都是 NOW,在数据库默认毫秒的时间级别下是区分不开的(建库时可选择更高的时间精度),后续写入的重复时间戳将会丢失或更新,处理重复时间戳的具体逻辑由在 TDengine 中建库时的 Update 参数决定。 + +### 27. 扩容集群后,DNode 状态为 Offline 怎么办? + +新的节点正常加入集群后,数据节点列表中会显示该节点处于 Ready 状态。若该节点状态为 Offline,可按照如下内容进行排查: + +1. 查看该节点 taosd 是否已启动、防火墙是否关闭; +2. 确认新增节点的数据文件夹是否清空; +3. 检查所有节点 /etc/hosts 域名解析是否完整、有效(需要有所有节点的解析,包括 arbitrator); +4. 该节点 firstEP、fqdn 参数是否正确配置。 + +### 28. 能提供 TDengine 的建模实例吗? + +在社区支持的过程中,能发现很多新手小伙伴在部署 TDengine 后不知道如何进一步体验,我们的建议是跑一跑官网文档的语句。文档内容较多,为了方便新手小伙伴快速上手,我们将官网文档的示例模型浓缩、汇总了一下,希望尽可能快的让大家了解 TDengine 建模方法:[建模入门](https://github.com/taosdata/tdengine-modeling-and-querying-101/blob/main/cases/001-electricity-meter-monitoring.zh-hans.md) + +同时也欢迎社区的用户们为仓库 [tdengine-modeling-and-querying-101](https://github.com/taosdata/tdengine-modeling-and-querying-101) 提交 PR,展现 TDengine 在各行各业的建模实例。 + diff --git a/docs-cn/27-train-faq/02-video.mdx b/docs/zh/27-train-faq/02-video.mdx similarity index 100% rename from docs-cn/27-train-faq/02-video.mdx rename to docs/zh/27-train-faq/02-video.mdx diff --git a/docs/zh/27-train-faq/03-docker.md b/docs/zh/27-train-faq/03-docker.md new file mode 100644 index 0000000000000000000000000000000000000000..1a0285fe4a783fbb09aafe1df09892b3f604a5fa --- /dev/null +++ b/docs/zh/27-train-faq/03-docker.md @@ -0,0 +1,330 @@ +--- +title: 通过 Docker 快速体验 TDengine +--- + +虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 macOS 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从 2.0.14.0 版本开始,TDengine 提供的镜像已经可以同时支持 X86-64、X86、arm64、arm32 平台,像 NAS、树莓派、嵌入式开发板之类可以运行 docker 的非主流计算机也可以基于本文档轻松体验 TDengine。 + +下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 + +## 下载 Docker + +Docker 工具自身的下载请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。 + +安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。 + +```bash +$ docker -v +Docker version 20.10.3, build 48d30b5 +``` + +## 使用 Docker 在容器中运行 TDengine + +### 在 Docker 容器中运行 TDengine server + +```bash +$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd +``` + +这条命令,启动一个运行了 TDengine server 的 docker 容器,并且将容器的 6030 到 6049 端口映射到宿主机的 6030 到 6049 端口上。如果宿主机已经运行了 TDengine server 并占用了相同端口,需要映射容器的端口到不同的未使用端口段。(详情参见 [TDengine 2.0 端口说明](/train-faq/faq#port)。为了支持 TDengine 客户端操作 TDengine server 服务, TCP 和 UDP 端口都需要打开。 + +- **docker run**:通过 Docker 运行一个容器 +- **-d**:让容器在后台运行 +- **-p**:指定映射端口。注意:如果不是用端口映射,依然可以进入 Docker 容器内部使用 TDengine 服务或进行应用开发,只是不能对容器外部提供服务 +- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像 +- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器 + +进一步,还可以使用 docker run 命令启动运行 TDengine server 的 docker 容器,并使用 `--name` 命令行参数将容器命名为 `tdengine`,使用 `--hostname` 指定 hostname 为 `tdengine-server`,通过 `-v` 挂载本地目录到容器,实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 + +```bash +docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +``` + +- **--name tdengine**:设置容器名称,我们可以通过容器名称来访问对应的容器 +- **--hostname=tdengine-server**:设置容器内 Linux 系统的 hostname,我们可以通过映射 hostname 和 IP 来解决容器 IP 可能变化的问题。 +- **-v**:设置宿主机文件目录映射到容器内目录,避免容器删除后数据丢失。 + +### 使用 docker ps 命令确认容器是否已经正确运行 + +```bash +docker ps +``` + +输出示例如下: + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS ··· +c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +``` + +- **docker ps**:列出所有正在运行状态的容器信息。 +- **CONTAINER ID**:容器 ID。 +- **IMAGE**:使用的镜像。 +- **COMMAND**:启动容器时运行的命令。 +- **CREATED**:容器创建时间。 +- **STATUS**:容器状态。UP 表示运行中。 + +### 通过 docker exec 命令,进入到 docker 容器中去做开发 + +```bash +$ docker exec -it tdengine /bin/bash +root@tdengine-server:~/TDengine-server-2.4.0.4# +``` + +- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 +- **-i**:进入交互模式。 +- **-t**:指定一个终端。 +- **tdengine**:容器名称,需要根据 docker ps 指令返回的值进行修改。 +- **/bin/bash**:载入容器后运行 bash 来进行交互。 + +进入容器后,执行 taos shell 客户端程序。 + +```bash +root@tdengine-server:~/TDengine-server-2.4.0.4# taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 + +在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](/taos-sql/)。 + +### 在宿主机访问 Docker 容器中的 TDengine server + +在使用了 -p 命令行参数映射了正确的端口启动了 TDengine Docker 容器后,就在宿主机使用 taos shell 命令即可访问运行在 Docker 容器中的 TDengine。 + +``` +$ taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 + +``` +curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql +``` + +输出示例如下: + +``` +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} +``` + +这条命令,通过 REST API 访问 TDengine server,这时连接的是本机的 6041 端口,可见连接成功。 + +TDengine REST API 详情请参考[官方文档](../../reference/rest-api/)。 + +### 使用 Docker 容器运行 TDengine server 和 taosAdapter + +在 TDengine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认启用了 taosAdapter,也可以使用 docker run 命令中设置 TAOS_DISABLE_ADAPTER=true 来禁用 taosAdapter;也可以在 docker run 命令中单独使用 taosAdapter,而不运行 taosd 。 + +注意:如果容器中运行 taosAdapter,需要根据需要映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter 文档](/reference/taosadapter/)。 + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(taosd + taosAdapter): + +```bash +docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 +``` + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosAdapter,需要设置 firstEp 配置项 或 TAOS_FIRST_EP 环境变量): + +```bash +docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter +``` + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosd): + +```bash +docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 +``` + +使用 curl 命令验证 RESTful 接口可以正常工作: + +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql +``` + +输出示例如下: + +``` +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +1. 在宿主机命令行界面执行 taosBenchmark (曾命名为 taosdemo)写入数据到 Docker 容器中的 TDengine server + + ```bash + $ taosBenchmark + + taosBenchmark is simulating data generated by power equipments monitoring... + + host: 127.0.0.1:6030 + user: root + password: taosdata + configDir: + resultFile: ./output.txt + thread num of insert data: 10 + thread num of create table: 10 + top insert interval: 0 + number of records per req: 30000 + max sql length: 1048576 + database count: 1 + database[0]: + database[0] name: test + drop: yes + replica: 1 + precision: ms + super table count: 1 + super table[0]: + stbName: meters + autoCreateTable: no + childTblExists: no + childTblCount: 10000 + childTblPrefix: d + dataSource: rand + iface: taosc + insertRows: 10000 + interlaceRows: 0 + disorderRange: 1000 + disorderRatio: 0 + maxSqlLen: 1048576 + timeStampStep: 1 + startTimestamp: 2017-07-14 10:40:00.000 + sampleFormat: + sampleFile: + tagsFile: + columnCount: 3 + column[0]:FLOAT column[1]:INT column[2]:FLOAT + tagCount: 2 + tag[0]:INT tag[1]:BINARY(16) + + Press enter key to continue or Ctrl-C to stop + ``` + + 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。 + + 最后共插入 1 亿条记录。 + +2. 进入 TDengine 终端,查看 taosBenchmark 生成的数据。 + + - **进入命令行。** + + ```bash + $ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos + + Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 + Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + + taos> + ``` + + - **查看数据库。** + + ```bash + $ taos> show databases; + name | created_time | ntables | vgroups | ··· + test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· + log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· + + ``` + + - **查看超级表。** + + ```bash + $ taos> use test; + Database changed. + + $ taos> show stables; + name | created_time | columns | tags | tables | + ============================================================================================ + meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | + Query OK, 1 row(s) in set (0.003259s) + + ``` + + - **查看表,限制输出十条。** + + ```bash + $ taos> select * from test.t0 limit 10; + + DB error: Table does not exist (0.002857s) + taos> select * from test.d0 limit 10; + ts | current | voltage | phase | + ====================================================================================== + 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | + 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | + 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | + 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | + 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | + 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | + 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | + 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | + 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | + 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | + Query OK, 10 row(s) in set (0.016791s) + + ``` + + - **查看 d0 表的标签值。** + + ```bash + $ taos> select groupid, location from test.d0; + groupid | location | + ================================= + 0 | California.SanDieo | + Query OK, 1 row(s) in set (0.003490s) + ``` + +### 应用示例:使用数据收集代理软件写入 TDengine + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: + +``` +echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: + +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + +## 停止正在 Docker 中运行的 TDengine 服务 + +```bash +docker stop tdengine +``` + +- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 diff --git a/docs-cn/27-train-faq/_category_.yml b/docs/zh/27-train-faq/_category_.yml similarity index 100% rename from docs-cn/27-train-faq/_category_.yml rename to docs/zh/27-train-faq/_category_.yml diff --git a/docs-cn/27-train-faq/index.md b/docs/zh/27-train-faq/index.md similarity index 100% rename from docs-cn/27-train-faq/index.md rename to docs/zh/27-train-faq/index.md diff --git a/docs/zh/30-release/01-2.6.md b/docs/zh/30-release/01-2.6.md new file mode 100644 index 0000000000000000000000000000000000000000..c7f46b110cde153de57d38468bcf885390c3a33b --- /dev/null +++ b/docs/zh/30-release/01-2.6.md @@ -0,0 +1,11 @@ +--- +title: 2.6 +--- + +[2.6.0.6](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.6) + +[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4) + +[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1) + +[2.6.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.0) diff --git a/docs/zh/30-release/02-2.4.md b/docs/zh/30-release/02-2.4.md new file mode 100644 index 0000000000000000000000000000000000000000..9eeb5a10b4b9e76d207d3fbcf498df5dd3256bce --- /dev/null +++ b/docs/zh/30-release/02-2.4.md @@ -0,0 +1,31 @@ +--- +title: 2.4 +--- + +[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30) + +[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) + +[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) + +[2.4.0.24](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.24) + +[2.4.0.20](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.20) + +[2.4.0.18](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.18) + +[2.4.0.16](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.16) + +[2.4.0.14](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.14) + +[2.4.0.12](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.12) + +[2.4.0.10](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.10) + +[2.4.0.7](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.7) + +[2.4.0.5](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.5) + +[2.4.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.4) + +[2.4.0.0](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.0) diff --git a/docs-cn/30-release/_category_.yml b/docs/zh/30-release/_category_.yml similarity index 100% rename from docs-cn/30-release/_category_.yml rename to docs/zh/30-release/_category_.yml diff --git a/docs-cn/30-release/index.md b/docs/zh/30-release/index.md similarity index 100% rename from docs-cn/30-release/index.md rename to docs/zh/30-release/index.md diff --git a/docs-en/02-intro/eco_system.webp b/docs/zh/eco_system.webp similarity index 100% rename from docs-en/02-intro/eco_system.webp rename to docs/zh/eco_system.webp diff --git a/examples/C#/C#checker/C#checker.cs b/examples/C#/C#checker/C#checker.cs index 7d0b6a50b673278ac6982a97de7eb31ce76761b6..f49fda88cdd8d298f2253bb8f47ccce58c3b0118 100644 --- a/examples/C#/C#checker/C#checker.cs +++ b/examples/C#/C#checker/C#checker.cs @@ -389,7 +389,7 @@ namespace TDengineDriver static void ExitProgram() { - System.Environment.Exit(0); + System.Environment.Exit(1); } public void cleanup() diff --git a/examples/C#/insertCn/lib/ResultSetUtils.cs b/examples/C#/insertCn/lib/ResultSetUtils.cs new file mode 100644 index 0000000000000000000000000000000000000000..7d299411ee68067fca9b8cc5fc8c38e53510fa5d --- /dev/null +++ b/examples/C#/insertCn/lib/ResultSetUtils.cs @@ -0,0 +1,43 @@ +using System; +using TDengineDriver; +using System.Runtime.InteropServices; +using System.Text; +using System.Collections.Generic; +namespace Test.UtilsTools.ResultSet +{ + public class ResultSet + { + private List resultMeta; + private List resultData; + // private bool isValidResult = false; + public ResultSet(IntPtr res) + { + + resultMeta = UtilsTools.GetResField(res); + resultData = UtilsTools.GetResData(res); + } + + public ResultSet(List metas, List datas) + { + resultMeta = metas; + resultData = datas; + } + + public List GetResultData() + { + return resultData; + } + + public List GetResultMeta() + { + return resultMeta; + } + + public int GetFieldsNum() + { + return resultMeta.Count; + } + } + + +} diff --git a/examples/C#/insertCn/lib/Utils.cs b/examples/C#/insertCn/lib/Utils.cs new file mode 100644 index 0000000000000000000000000000000000000000..6107ecab57869fbdd001988d54ba36930bb1fd0d --- /dev/null +++ b/examples/C#/insertCn/lib/Utils.cs @@ -0,0 +1,418 @@ +using System; +using TDengineDriver; +using System.Runtime.InteropServices; +using System.Text; +using System.Collections.Generic; +namespace Test.UtilsTools +{ + public class UtilsTools + { + + static string ip = "127.0.0.1"; + static string user = "root"; + static string password = "taosdata"; + static string db = ""; + static short port = 0; + //get a tdengine connection + public static IntPtr TDConnection() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, GetConfigPath()); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + IntPtr conn = TDengine.Connect(ip, user, password, db, port); + return conn; + } + //get taos.cfg file based on different os + public static string GetConfigPath() + { + string configDir = "" ; + if(OperatingSystem.IsOSPlatform("Windows")) + { + configDir = "C:/TDengine/cfg"; + } + else if(OperatingSystem.IsOSPlatform("Linux")) + { + configDir = "/etc/taos"; + } + else if(OperatingSystem.IsOSPlatform("macOS")) + { + configDir = "/etc/taos"; + } + return configDir; + } + + public static IntPtr ExecuteQuery(IntPtr conn, String sql) + { + IntPtr res = TDengine.Query(conn, sql); + if (!IsValidResult(res)) + { + Console.Write(sql.ToString() + " failure, "); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + return res; + } + + public static IntPtr ExecuteErrorQuery(IntPtr conn, String sql) + { + IntPtr res = TDengine.Query(conn, sql); + if (!IsValidResult(res)) + { + Console.Write(sql.ToString() + " failure, "); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + + } + return res; + } + + public static void ExecuteUpdate(IntPtr conn, String sql) + { + IntPtr res = TDengine.Query(conn, sql); + if (!IsValidResult(res)) + { + Console.Write(sql.ToString() + " failure, "); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + + } + TDengine.FreeResult(res); + } + + + public static bool IsValidResult(IntPtr res) + { + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + return false; + } + Console.WriteLine(""); + return false; + } + return true; + } + public static void CloseConnection(IntPtr conn) + { + if (conn != IntPtr.Zero) + { + if (TDengine.Close(conn) == 0) + { + Console.WriteLine("close connection sucess"); + } + else + { + Console.WriteLine("close Connection failed"); + } + } + TDengine.Cleanup(); + } + public static List GetResField(IntPtr res) + { + List metas = TDengine.FetchFields(res); + return metas; + } + public static void AssertEqual(string expectVal, string actualVal) + { + if (expectVal == actualVal) + { + Console.WriteLine("{0}=={1} pass", expectVal, actualVal); + } + else + { + Console.WriteLine("{0}=={1} failed", expectVal, actualVal); + ExitProgram(); + } + } + public static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(1); + } + public static List GetResData(IntPtr res) + { + List dataRaw = new List(); + if (!IsValidResult(res)) + { + ExitProgram(); + } + List metas = GetResField(res); + dataRaw = QueryRes(res, metas); + return dataRaw; + } + + public static TDengineMeta ConstructTDengineMeta(string name, string type) + { + + TDengineMeta _meta = new TDengineMeta(); + _meta.name = name; + char[] separators = new char[] { '(', ')' }; + string[] subs = type.Split(separators, StringSplitOptions.RemoveEmptyEntries); + + switch (subs[0].ToUpper()) + { + case "BOOL": + _meta.type = 1; + _meta.size = 1; + break; + case "TINYINT": + _meta.type = 2; + _meta.size = 1; + break; + case "SMALLINT": + _meta.type = 3; + _meta.size = 2; + break; + case "INT": + _meta.type = 4; + _meta.size = 4; + break; + case "BIGINT": + _meta.type = 5; + _meta.size = 8; + break; + case "TINYINT UNSIGNED": + _meta.type = 11; + _meta.size = 1; + break; + case "SMALLINT UNSIGNED": + _meta.type = 12; + _meta.size = 2; + break; + case "INT UNSIGNED": + _meta.type = 13; + _meta.size = 4; + break; + case "BIGINT UNSIGNED": + _meta.type = 14; + _meta.size = 8; + break; + case "FLOAT": + _meta.type = 6; + _meta.size = 4; + break; + case "DOUBLE": + _meta.type = 7; + _meta.size = 8; + break; + case "BINARY": + _meta.type = 8; + _meta.size = short.Parse(subs[1]); + break; + case "TIMESTAMP": + _meta.type = 9; + _meta.size = 8; + break; + case "NCHAR": + _meta.type = 10; + _meta.size = short.Parse(subs[1]); + break; + case "JSON": + _meta.type = 15; + _meta.size = 4096; + break; + default: + _meta.type = byte.MaxValue; + _meta.size = 0; + break; + } + return _meta; + } + + private static List QueryRes(IntPtr res, List metas) + { + IntPtr rowdata; + long queryRows = 0; + List dataRaw = new List(); + int fieldCount = metas.Count; + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + IntPtr colLengthPtr = TDengine.FetchLengths(res); + int[] colLengthArr = new int[fieldCount]; + Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount); + + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + if (data == IntPtr.Zero) + { + dataRaw.Add("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + dataRaw.Add(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + sbyte v2 = (sbyte)Marshal.ReadByte(data); + dataRaw.Add(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + dataRaw.Add(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + dataRaw.Add(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + dataRaw.Add(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + dataRaw.Add(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + dataRaw.Add(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + // string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); + string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); + dataRaw.Add(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + dataRaw.Add(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + // string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); + string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); + dataRaw.Add(v10); + break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v12 = Marshal.ReadByte(data); + dataRaw.Add(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v13 = (ushort)Marshal.ReadInt16(data); + dataRaw.Add(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v14 = (uint)Marshal.ReadInt32(data); + dataRaw.Add(v14); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v15 = (ulong)Marshal.ReadInt64(data); + dataRaw.Add(v15); + break; + default: + dataRaw.Add("unknown value"); + break; + } + } + + } + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + TDengine.FreeResult(res); + Console.WriteLine(""); + return dataRaw; + } + + // Generate insert sql for the with the coldata and tag data + public static string ConstructInsertSql(string table,string stable,List colData,List tagData,int numOfRows) + { + int numofFileds = colData.Count / numOfRows; + StringBuilder insertSql; + + if (stable == "") + { + insertSql = new StringBuilder($"insert into {table} values("); + } + else + { + insertSql = new StringBuilder($"insert into {table} using {stable} tags("); + + for (int j = 0; j < tagData.Count; j++) + { + if (tagData[j] is String) + { + insertSql.Append('\''); + insertSql.Append(tagData[j]); + insertSql.Append('\''); + } + else + { + insertSql.Append(tagData[j]); + } + if (j + 1 != tagData.Count) + { + insertSql.Append(','); + } + } + + insertSql.Append(")values("); + } + for (int i = 0; i < colData.Count; i++) + { + + if (colData[i] is String) + { + insertSql.Append('\''); + insertSql.Append(colData[i]); + insertSql.Append('\''); + } + else + { + insertSql.Append(colData[i]); + } + + if ((i + 1) % numofFileds == 0 && (i + 1) != colData.Count) + { + insertSql.Append(")("); + } + else if ((i + 1) == colData.Count) + { + insertSql.Append(')'); + } + else + { + insertSql.Append(','); + } + } + insertSql.Append(';'); + //Console.WriteLine(insertSql.ToString()); + + return insertSql.ToString(); + } + + public static List CombineColAndTagData(List colData,List tagData, int numOfRows) + { + var list = new List(); + for (int i = 0; i < colData.Count; i++) + { + list.Add(colData[i]); + if ((i + 1) % (colData.Count / numOfRows) == 0) + { + for (int j = 0; j < tagData.Count; j++) + { + list.Add(tagData[j]); + } + } + } + return list; + } + } +} diff --git a/examples/C#/jsonTag/JsonTag.cs b/examples/C#/jsonTag/JsonTag.cs index 453e54eabdc9a4ec61cdc2a061af69ed64753416..5c94df8b5a36bf20589250567e0352cfe7ef9b25 100644 --- a/examples/C#/jsonTag/JsonTag.cs +++ b/examples/C#/jsonTag/JsonTag.cs @@ -11,7 +11,7 @@ namespace Cases IntPtr conn = IntPtr.Zero; Console.WriteLine("===================JsonTagTest===================="); conn = conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); - UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp_sample keep 3650"); + UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp keep 3650"); UtilsTools.ExecuteUpdate(conn, "use csharp"); JsonTagSample jsonTagSample = new JsonTagSample(); jsonTagSample.Test(conn); diff --git a/examples/C#/jsonTag/Util.cs b/examples/C#/jsonTag/Util.cs index 5138938df60532616e75b45d8a95597c322dfd1a..7446378fc75a4cfe49840f778961e92c37d7699d 100644 --- a/examples/C#/jsonTag/Util.cs +++ b/examples/C#/jsonTag/Util.cs @@ -217,10 +217,10 @@ namespace Utils } } } - public static void ExitProgram() + public static void ExitProgram(int i = 1) { TDengine.Cleanup(); - System.Environment.Exit(0); + System.Environment.Exit(i); } } } \ No newline at end of file diff --git a/examples/C#/jsonTag/jsonTag.csproj b/examples/C#/jsonTag/jsonTag.csproj index ed3af6e806f0321828742597d226011bfb4d5185..eb33d899ac803baadbcfc4f1ee4a4888ed6434ab 100644 --- a/examples/C#/jsonTag/jsonTag.csproj +++ b/examples/C#/jsonTag/jsonTag.csproj @@ -5,8 +5,8 @@ net5.0 - - + + diff --git a/examples/C#/schemaless/schemaless.csproj b/examples/C#/schemaless/schemaless.csproj index d132e34589525826d5b0ff0f0055156fad2d5a38..c2369f3e8eaf82188f6c55b7bb6cf8564eb9017b 100644 --- a/examples/C#/schemaless/schemaless.csproj +++ b/examples/C#/schemaless/schemaless.csproj @@ -5,8 +5,8 @@ net5.0 - - + + diff --git a/examples/C#/schemaless/schemalessSample.cs b/examples/C#/schemaless/schemalessSample.cs index f27ac352a6fc8a3fbbaf84966ae3b82e6036e91a..8d0b7f60d0dad60d382887e9c0661f72ca522c18 100644 --- a/examples/C#/schemaless/schemalessSample.cs +++ b/examples/C#/schemaless/schemalessSample.cs @@ -289,7 +289,7 @@ namespace TDengineDriver static void ExitProgram() { - System.Environment.Exit(0); + System.Environment.Exit(1); } public void cleanup() diff --git a/examples/C#/stmt/StmtDemo.cs b/examples/C#/stmt/StmtDemo.cs index fdd647fdb5f9c4bb528a2e99acc6975adf4c30a3..56a5aa20f3456524d9cca4f056d5510de23d4689 100644 --- a/examples/C#/stmt/StmtDemo.cs +++ b/examples/C#/stmt/StmtDemo.cs @@ -543,7 +543,7 @@ namespace TDengineDriver public static void ExitProgram() { TDengine.Cleanup(); - System.Environment.Exit(0); + System.Environment.Exit(1); } } } diff --git a/examples/c/makefile b/examples/c/makefile index 4d6cfc1f5f3e8d4d8b0a7ce88ce285c1b3259a5a..6f0ab8880aeb3f4be9b2596edd4e819914d67617 100644 --- a/examples/c/makefile +++ b/examples/c/makefile @@ -7,7 +7,6 @@ LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ - -I../../../deps/cJson/inc \ -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment diff --git a/packaging/clean_env.bat b/packaging/clean_env.bat new file mode 100644 index 0000000000000000000000000000000000000000..069102cf7b1a33b3f1ecf089622f2f179c32c36e --- /dev/null +++ b/packaging/clean_env.bat @@ -0,0 +1,37 @@ +@echo off + +set CUR_DIR=%~dp0 +set SHELL_DIR=%cd% +set ENTERPRISE_DIR="%SHELL_DIR%\..\.." +set COMMUNITY_DIR="%SHELL_DIR%\.." +set TOOLS_DIR="%SHELL_DIR%\..\src\kit\taos-tools" + +cd %ENTERPRISE_DIR% +git checkout -- . +if exist "enterprise\src\plugins\taosainternal\taosadapter" ( + del /f "enterprise\src\plugins\taosainternal\taosadapter" +) +if exist "enterprise\src\plugins\taosainternal\upx.tar.xz" ( + del /f "enterprise\src\plugins\taosainternal\upx.tar.xz" +) + +cd %COMMUNITY_DIR% +git checkout -- . + +cd %TOOLS_DIR% +git checkout -- . +if exist "packaging\tools\install-khtools.sh" ( + del /f "packaging\tools\install-khtools.sh" +) +if exist "packaging\tools\uninstall-khtools.sh" ( + del /f "packaging/tools/uninstall-khtools.sh" +) + +if exist "packaging\tools\install-prodbtools.sh" ( + del /f "packaging\tools\install-prodbtools.sh" +) +if exist "packaging\tools\uninstall-prodbtools.sh" ( + del /f "packaging\tools\uninstall-prodbtools.sh" +) + +cd %CUR_DIR% \ No newline at end of file diff --git a/packaging/clean_env.sh b/packaging/clean_env.sh new file mode 100644 index 0000000000000000000000000000000000000000..51a0fe4eb218d2f5a1aaae097bf7f94e9f168599 --- /dev/null +++ b/packaging/clean_env.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +CUR_DIR=$(pwd) +SHELL_DIR=$(dirname $(readlink -f "$0")) +ENTERPRISE_DIR=$SHELL_DIR/../.. +COMMUNITY_DIR=$SHELL_DIR/.. +TOOLS_DIR=$COMMUNITY_DIR/src/kit/taos-tools + +cd $ENTERPRISE_DIR +git checkout -- . +if [[ -e enterprise/src/plugins/taosainternal/taosadapter ]]; then + rm -f enterprise/src/plugins/taosainternal/taosadapter +fi +if [[ -e enterprise/src/plugins/taosainternal/upx.tar.xz ]]; then + rm -f enterprise/src/plugins/taosainternal/upx.tar.xz +fi + +cd $COMMUNITY_DIR +git checkout -- . +if [[ -e src/plugins/taosadapter/taosadapter ]]; then + rm -f src/plugins/taosadapter/taosadapter +fi +if [[ -e src/plugins/taosadapter/upx.tar.xz ]]; then + rm -f src/plugins/taosadapter/upx.tar.xz +fi + +cd $TOOLS_DIR +git checkout -- . + +rm -f $(find packaging/tools/ -name install-*tools.sh | grep -v taos) +rm -f $(find packaging/tools/ -name uninstall-*tools.sh | grep -v taos) + +rm -rf $COMMUNITY_DIR/debug/* +rm -rf $COMMUNITY_DIR/release/* +if [[ -e $COMMUNITY_DIR/rpms ]]; then + rm -rf $COMMUNITY_DIR/rpms +fi +if [[ -e $COMMUNITY_DIR/debs ]]; then + rm -rf $COMMUNITY_DIR/debs +fi + +cd $CUR_DIR diff --git a/packaging/deb/taosd b/packaging/deb/taosd index fe356ca6565c916086273e5669918b04065964cd..8f8ab2f1ea2840a1b9f791b314a60a72313b9553 100644 --- a/packaging/deb/taosd +++ b/packaging/deb/taosd @@ -1,20 +1,4 @@ #!/bin/bash -# -# Modified from original source: Elastic Search -# https://github.com/elasticsearch/elasticsearch -# Thank you to the Elastic Search authors -# -# chkconfig: 2345 99 01 -# -### BEGIN INIT INFO -# Provides: TDengine -# Required-Start: $local_fs $network $syslog -# Required-Stop: $local_fs $network $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Starts TDengine taosd -# Description: Starts TDengine taosd, a time-series database engine -### END INIT INFO set -e diff --git a/packaging/release.bat b/packaging/release.bat index c1cf7875a505852ce3f8c0b78029fedf481aed8f..6a25026f62d9b993672bdc8f9e8ec1482acdf982 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -6,9 +6,10 @@ cd %community_dir% git checkout -- . cd %community_dir%\packaging -:: %1 name %2 version +:: %1 name %2 version %3 cpuType if !%1==! GOTO USAGE if !%2==! GOTO USAGE +if !%3==! GOTO USAGE if %1 == taos GOTO TAOS if %1 == power GOTO POWER if %1 == tq GOTO TQ @@ -21,42 +22,66 @@ GOTO USAGE goto RELEASE :POWER +cd %internal_dir%\enterprise\packaging\oem call sed_power.bat %community_dir% +cd %community_dir%\packaging goto RELEASE :TQ +cd %internal_dir%\enterprise\packaging\oem call sed_tq.bat %community_dir% +cd %community_dir%\packaging goto RELEASE :PRO +cd %internal_dir%\enterprise\packaging\oem call sed_pro.bat %community_dir% +cd %community_dir%\packaging goto RELEASE :KH +cd %internal_dir%\enterprise\packaging\oem call sed_kh.bat %community_dir% +cd %community_dir%\packaging goto RELEASE :JH +cd %internal_dir%\enterprise\packaging\oem call sed_jh.bat %community_dir% +cd %community_dir%\packaging goto RELEASE :RELEASE -echo release windows-client-64 for %1, version: %2 -if not exist %internal_dir%\debug\ver-%2-64bit-%1 ( - md %internal_dir%\debug\ver-%2-64bit-%1 +echo release windows-client for %1, version: %2, cpyType: %3 +if not exist %internal_dir%\debug\ver-%2-%1-%3 ( + md %internal_dir%\debug\ver-%2-%1-%3 ) else ( - rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1 - md %internal_dir%\debug\ver-%2-64bit-%1 + rd /S /Q %internal_dir%\debug\ver-%2-%1-%3 + md %internal_dir%\debug\ver-%2-%1-%3 ) -cd %internal_dir%\debug\ver-%2-64bit-%1 +cd %internal_dir%\debug\ver-%2-%1-%3 + +if %3% == x64 GOTO X64 +if %3% == x86 GOTO X86 +GOTO USAGE + +:X86 +call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86 +cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x86 +GOTO MAKE_AND_INSTALL + +:X64 call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64 +GOTO MAKE_AND_INSTALL + +:MAKE_AND_INSTALL set CL=/MP4 nmake install goto EXIT0 :USAGE -echo Usage: release.bat $productName $version +echo Usage: release.bat $productName $version $cpuType goto EXIT0 :EXIT0 \ No newline at end of file diff --git a/packaging/release.sh b/packaging/release.sh index a96b3129992ea829e9d70fb11d41647760e480c8..0ad8d9b1bfaa09a4be51c8448c2494feff2cdbf7 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -101,13 +101,13 @@ echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} curr_dir=$(pwd) if [ "$osType" == "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/..)" +else script_dir=$(dirname $0) cd ${script_dir} script_dir="$(pwd)" top_dir=${script_dir}/.. -else - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/..)" fi csudo="" @@ -182,14 +182,10 @@ cd "${curr_dir}" # 2. cmake executable file compile_dir="${top_dir}/debug" if [ -d ${compile_dir} ]; then - ${csudo}rm -rf ${compile_dir} + rm -rf ${compile_dir} fi -if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${compile_dir} -else - mkdir -p ${compile_dir} -fi +mkdir -p ${compile_dir} cd ${compile_dir} if [[ "$allocator" == "jemalloc" ]]; then @@ -198,9 +194,11 @@ else allocator_macro="" fi +# 3. replace product info if [[ "$dbName" != "taos" ]]; then source ${enterprise_dir}/packaging/oem/sed_$dbName.sh replace_community_$dbName + replace_output_$dbName fi if [[ "$httpdBuild" == "true" ]]; then @@ -227,6 +225,7 @@ if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" = else if [[ "$dbName" != "taos" ]]; then replace_enterprise_$dbName + replace_output_$dbName fi cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} fi @@ -255,18 +254,18 @@ if [ "$osType" != "Darwin" ]; then echo "====do deb package for the ubuntu system====" output_dir="${top_dir}/debs" if [ -d ${output_dir} ]; then - ${csudo}rm -rf ${output_dir} + rm -rf ${output_dir} fi - ${csudo}mkdir -p ${output_dir} + mkdir -p ${output_dir} cd ${script_dir}/deb ${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} if [[ "$pagMode" == "full" ]]; then if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then cd ${top_dir}/src/kit/taos-tools/packaging/deb + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}') [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}') ${csudo}./make-taos-tools-deb.sh ${top_dir} \ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} fi @@ -280,18 +279,18 @@ if [ "$osType" != "Darwin" ]; then echo "====do rpm package for the centos system====" output_dir="${top_dir}/rpms" if [ -d ${output_dir} ]; then - ${csudo}rm -rf ${output_dir} + rm -rf ${output_dir} fi - ${csudo}mkdir -p ${output_dir} + mkdir -p ${output_dir} cd ${script_dir}/rpm ${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} if [[ "$pagMode" == "full" ]]; then if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then cd ${top_dir}/src/kit/taos-tools/packaging/rpm + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g') [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g') ${csudo}./make-taos-tools-rpm.sh ${top_dir} \ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} fi diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index f07705ff442b65f1295431e59b48ef50a76cadc0..d74a962210318f59f4afc9a369f95503b342bbd2 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -105,6 +105,9 @@ elif echo $osinfo | grep -qwi "debian"; then elif echo $osinfo | grep -qwi "Kylin"; then # echo "This is Kylin system" os_type=1 +elif echo $osinfo | grep -qwi "Red"; then + # echo "This is Red Hat system" + os_type=1 elif echo $osinfo | grep -qwi "centos"; then # echo "This is centos system" os_type=2 @@ -196,7 +199,6 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -207,11 +209,10 @@ function install_bin() { [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : - [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : +# [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh index e3c63965d4beee31cea91d2f8fd84e3d2bdd00d3..31a9ce38ac2c8fe1998c73680fd48183f56c1077 100755 --- a/packaging/tools/install_arbi.sh +++ b/packaging/tools/install_arbi.sh @@ -7,6 +7,9 @@ set -e #set -x # -----------------------Variables definition--------------------- +productName="TDengine" +emailName="taosdata.com" + script_dir=$(dirname $(readlink -f "$0")) bin_link_dir="/usr/bin" @@ -73,17 +76,26 @@ elif echo $osinfo | grep -qwi "debian"; then elif echo $osinfo | grep -qwi "Kylin"; then # echo "This is Kylin system" os_type=1 +elif echo $osinfo | grep -qwi "Red"; then + # echo "This is Red Hat system" + os_type=1 elif echo $osinfo | grep -qwi "centos"; then # echo "This is centos system" os_type=2 elif echo $osinfo | grep -qwi "fedora"; then # echo "This is fedora system" os_type=2 +elif echo $osinfo | grep -qwi "Linx"; then + # echo "This is Linx system" + os_type=1 + service_mod=0 + initd_mod=0 + service_config_dir="/etc/systemd/system" else echo " osinfo: ${osinfo}" echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." + echo " please feel free to contact ${emailName} for support." os_type=1 fi @@ -241,7 +253,7 @@ function install_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'Description=${productName} arbitrator service' >> ${tarbitratord_service_config}" ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" ${csudo}bash -c "echo >> ${tarbitratord_service_config}" @@ -273,9 +285,9 @@ function install_service() { fi } -function update_TDengine() { +function update_Product() { # Start to update - echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}" + echo -e "${GREEN}Start to update ${productName}'s arbitrator ...${NC}" # Stop the service if running if pidof tarbitrator &>/dev/null; then if ((${service_mod} == 0)); then @@ -303,12 +315,12 @@ function update_TDengine() { echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" fi echo - echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}" + echo -e "\033[44;32;1m${productName}'s arbitrator is updated successfully!${NC}" } -function install_TDengine() { +function install_Product() { # Start to install - echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}" + echo -e "${GREEN}Start to install ${productName}'s arbitrator ...${NC}" install_main_path #install_header @@ -325,7 +337,7 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" fi - echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}" + echo -e "\033[44;32;1m${productName}'s arbitrator is installed successfully!${NC}" echo } @@ -333,7 +345,7 @@ function install_TDengine() { # Install server and client if [ -x ${bin_dir}/tarbitrator ]; then update_flag=1 - update_TDengine + update_Product else - install_TDengine + install_Product fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 28001fb769e5f3b6b59680c398ab683a287f1352..844ac16a26e17c6f6d9d39c30749326ef015ef74 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -18,22 +18,23 @@ clientName="taos" uninstallScript="rmtaos" configFile="taos.cfg" tarName="taos.tar.gz" +demoName="taosdemo" osType=Linux pagMode=full verMode=edge if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir=${dataDir} - log_dir=${logDir} + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir=${dataDir} + log_dir=${logDir} else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir=${dataDir} - log_dir=~/${productName}/log + script_dir=$(dirname $0) + cd ${script_dir} + script_dir="$(pwd)" + data_dir=${dataDir} + log_dir=~/${productName}/log fi log_link_dir="${installDir}/log" @@ -41,14 +42,14 @@ log_link_dir="${installDir}/log" cfg_install_dir=${configDir} if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" fi #install main path @@ -65,8 +66,8 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " +if command -v sudo >/dev/null; then + csudo="sudo " fi update_flag=0 @@ -74,7 +75,7 @@ update_flag=0 function kill_client() { pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } @@ -96,186 +97,184 @@ function install_main_path() { function install_bin() { # Remove links - ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/${demoName} || : fi - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : - ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : + [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : } function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : } function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : + # Remove links + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* + ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + if [ "$osType" != "Darwin" ]; then + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + if [ -d "${lib64_link_dir}" ]; then + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi + else + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi - if [ "$osType" != "Darwin" ]; then - ${csudo}ldconfig - else - ${csudo}update_dyld_shared_cache - fi + if [ "$osType" != "Darwin" ]; then + ${csudo}ldconfig + else + ${csudo}update_dyld_shared_cache + fi } function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* + ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h + ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo}/usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo}/usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi } function install_config() { - if [ ! -f ${cfg_install_dir}/${configFile} ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi + if [ ! -f ${cfg_install_dir}/${configFile} ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} + ${csudo}chmod 644 ${cfg_install_dir}/* + fi - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org - ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org + ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg } - function install_log() { - ${csudo}rm -rf ${log_dir} || : + ${csudo}rm -rf ${log_dir} || : - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log + if [ "$osType" != "Darwin" ]; then + ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} + fi + ${csudo}ln -s ${log_dir} ${install_main_dir}/log } function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ + ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ } function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi + if [ -d ${script_dir}/examples ]; then + ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi } -function update_TDengine() { - # Start to update - if [ ! -e ${tarName} ]; then - echo "File ${tarName} does not exist" - exit 1 - fi - tar -zxf ${tarName} +function update_Product() { + # Start to update + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} - echo -e "${GREEN}Start to update ${productName} client...${NC}" - # Stop the client shell if running - if pidof ${clientName} &> /dev/null; then - kill_client - sleep 1 - fi + echo -e "${GREEN}Start to update ${productName} client...${NC}" + # Stop the client shell if running + if pidof ${clientName} &>/dev/null; then + kill_client + sleep 1 + fi - install_main_path + install_main_path - install_log - install_header - install_lib - install_jemalloc - if [ "$verMode" == "cluster" ]; then - install_connector - fi - install_examples - install_bin - install_config + install_log + install_header + install_lib + install_jemalloc + if [ "$verMode" == "cluster" ]; then + install_connector + fi + install_examples + install_bin + install_config - echo - echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" + echo + echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName}) } -function install_TDengine() { +function install_Product() { # Start to install if [ ! -e ${tarName} ]; then echo "File ${tarName} does not exist" @@ -303,18 +302,17 @@ function install_TDengine() { rm -rf $(tar -tf ${tarName}) } - ## ==============================Main program starts from here============================ # Install or updata client and client # if server is already install, don't install client - if [ -e ${bin_dir}/${serverName} ]; then - echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}" - exit 0 - fi +if [ -e ${bin_dir}/${serverName} ]; then + echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}" + exit 0 +fi - if [ -x ${bin_dir}/${clientName} ]; then - update_flag=1 - update_TDengine - else - install_TDengine - fi +if [ -x ${bin_dir}/${clientName} ]; then + update_flag=1 + update_Product +else + install_Product +fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index dc5d9f1bc61a306568888899dc99f73a2b18308f..c40fe14e3a34354e03f2169fca7e9f0171c064ee 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -29,7 +29,11 @@ installDir="/usr/local/taos" productName="TDengine" emailName="taosdata.com" uninstallScript="rmtaos" +adapterName="taosadapter" +demoName="taosdemo" +benchmarkName="taosBenchmark" +# Dynamic directory if [ "$osType" != "Darwin" ]; then data_dir=${dataDir} log_dir=${logDir} @@ -125,14 +129,14 @@ if [ "$osType" != "Darwin" ]; then fi fi -function kill_taosadapter() { - pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') +function kill_adapter() { + pid=$(ps -ef | grep "${adapterName}" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi } -function kill_taosd() { +function kill_server() { ps -ef | grep ${serverName} pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then @@ -168,40 +172,36 @@ function install_bin() { # Remove links ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : + ${csudo}rm -f ${bin_link_dir}/${adapterName} || : + ${csudo}rm -f ${bin_link_dir}/${demoName} || : if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${bin_link_dir}/perfMonitor || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || : - [ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || : - [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || : - [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : - [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/${benchmarkName} ] && ${csudo}cp -r ${binary_dir}/build/bin/${benchmarkName} ${install_main_dir}/bin || : + [ -f ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${install_main_dir}/bin/${demoName} || : + # [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/${adapterName} ] && ${csudo}cp -r ${binary_dir}/build/bin/${adapterName} ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || : ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/run_taosd_and_taosadapter.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin ${csudo}chmod 0555 ${install_main_dir}/bin/* #Make link [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : - [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : + # [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : else @@ -212,9 +212,9 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/${clientName} ] || [ -x ${install_main_2_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || ${csudo}ln -s ${install_main_2_dir}/bin/${clientName} || : [ -x ${install_main_dir}/bin/${serverName} ] || [ -x ${install_main_2_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || ${csudo}ln -s ${install_main_2_dir}/bin/${serverName} || : - [ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo}ln -s ${install_main_2_dir}/bin/taosadapter || : - [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/${adapterName} ] || [ -x ${install_main_2_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || ${csudo}ln -s ${install_main_2_dir}/bin/${adapterName} || : + # [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/${demoName} ] || [ -x ${install_main_2_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || ln -s ${install_main_2_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || : fi } @@ -380,23 +380,23 @@ function install_config() { fi } -function install_taosadapter_config() { - if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then +function install_adapter_config() { + if [ ! -f "${cfg_install_dir}/${adapterName}.toml" ]; then ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && - ${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/taosadapter.toml ] && - ${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml - [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && - ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ - ${cfg_install_dir}/taosadapter.toml.${verNumber} - [ -f ${cfg_install_dir}/taosadapter.toml ] && - ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml \ - ${install_main_dir}/cfg/taosadapter.toml + [ -f ${binary_dir}/test/cfg/${adapterName}.toml ] && + ${csudo}cp ${binary_dir}/test/cfg/${adapterName}.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/${adapterName}.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml + [ -f ${binary_dir}/test/cfg/${adapterName}.toml ] && + ${csudo}cp -f ${binary_dir}/test/cfg/${adapterName}.toml \ + ${cfg_install_dir}/${adapterName}.toml.${verNumber} + [ -f ${cfg_install_dir}/${adapterName}.toml ] && + ${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml \ + ${install_main_dir}/cfg/${adapterName}.toml else - if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then - ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ - ${cfg_install_dir}/taosadapter.toml.${verNumber} + if [ -f "${binary_dir}/test/cfg/${adapterName}.toml" ]; then + ${csudo}cp -f ${binary_dir}/test/cfg/${adapterName}.toml \ + ${cfg_install_dir}/${adapterName}.toml.${verNumber} fi fi } @@ -529,10 +529,10 @@ function install_service_on_systemd() { ${csudo}systemctl enable ${serverName} } -function install_taosadapter_service() { +function install_adapter_service() { if ((${service_mod} == 0)); then - [ -f ${binary_dir}/test/cfg/taosadapter.service ] && - ${csudo}cp ${binary_dir}/test/cfg/taosadapter.service \ + [ -f ${binary_dir}/test/cfg/${adapterName}.service ] && + ${csudo}cp ${binary_dir}/test/cfg/${adapterName}.service \ ${service_config_dir}/ || : ${csudo}systemctl daemon-reload fi @@ -544,11 +544,11 @@ function install_service() { elif ((${service_mod} == 1)); then install_service_on_sysvinit else - kill_taosd + kill_server fi } -function update_TDengine() { +function update_Product() { echo -e "${GREEN}Start to update ${productName}...${NC}" # Stop the service if running @@ -558,8 +558,8 @@ function update_TDengine() { elif ((${service_mod} == 1)); then ${csudo}service ${serverName} stop || : else - kill_taosadapter - kill_taosd + kill_adapter + kill_server fi sleep 1 fi @@ -574,23 +574,23 @@ function update_TDengine() { install_bin install_service - install_taosadapter_service + install_adapter_service install_config - install_taosadapter_config + install_adapter_config echo echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${configDir}/${adapterName}.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${adapterName} (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" fi @@ -599,7 +599,7 @@ function update_TDengine() { echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" } -function install_TDengine() { +function install_Product() { # Start to install echo -e "${GREEN}Start to install ${productName}...${NC}" @@ -614,23 +614,23 @@ function install_TDengine() { install_bin install_service - install_taosadapter_service + install_adapter_service install_config - install_taosadapter_config + install_adapter_config # Ask if to start the service echo echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${configDir}/${adapterName}.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${adapterName} (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" fi @@ -644,14 +644,14 @@ echo source directory: $1 echo binary directory: $2 if [ "$osType" != "Darwin" ]; then if [ -x ${bin_dir}/${clientName} ]; then - update_TDengine + update_Product else - install_TDengine + install_Product fi else if [ -x ${bin_dir}/${clientName} ] || [ -x ${bin_2_dir}/${clientName} ]; then - update_TDengine + update_Product else - install_TDengine + install_Product fi fi diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh index 6dacfdd90d4499ac1f79c1eb31f9c786e5c862df..4991dcb9343a54cd39ded48445eff312459c0318 100755 --- a/packaging/tools/makearbi.sh +++ b/packaging/tools/makearbi.sh @@ -36,13 +36,11 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh" install_files="${script_dir}/install_arbi.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord # make directories. mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi.sh || : -#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 89865ae54f7b42b56c4ad4e85f51e4f84e0a720d..1b5a3f159dc3272129d77ac27616716d6223dc1e 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -44,7 +44,6 @@ else fi # Directories and files. - if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/${clientName} @@ -182,26 +181,27 @@ if [[ $productName == "TDengine" ]]; then fi fi fi + # Copy driver mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector || : - fi - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector - cp -r ${connector_dir}/nodejs ${install_dir}/connector -fi +#connector_dir="${code_dir}/connector" +#mkdir -p ${install_dir}/connector +# +#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then +# if [ "$osType" != "Darwin" ]; then +# cp ${build_dir}/lib/*.jar ${install_dir}/connector || : +# fi +# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then +# cp -r ${connector_dir}/go ${install_dir}/connector +# else +# echo "WARNING: go connector not found, please check if want to use it!" +# fi +# cp -r ${connector_dir}/python ${install_dir}/connector +# cp -r ${connector_dir}/nodejs ${install_dir}/connector +#fi # Copy release note # cp ${script_dir}/release_note ${install_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 139749e4e64bd2f7c4983915274c8c2b879ad775..8c45b0a7d12485d3e95554f7d2223790366f3401 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -91,7 +91,6 @@ else ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ - ${script_dir}/run_taosd_and_taosadapter.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" fi @@ -158,7 +157,6 @@ if [ $adapterName != "taosadapter" ]; then sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service # !!! do not change taosadaptor here mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName} - mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb fi diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index f886d08df0b127c2dcec494b220104e39e4bce13..cb20085125571d4037c264ab713ce4678874de0c 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -16,12 +16,16 @@ serverName="taosd" clientName="taos" uninstallScript="rmtaos" productName="TDengine" +adapterName="taosadapter" +benchmarkName="taosBenchmark" +dumpName="taosdump" +demoName="taosdemo" #install main path install_main_dir=${installDir} -data_link_dir=${installDir}/data -log_link_dir=${installDir}/log -cfg_link_dir=${installDir}/cfg +data_link_dir="${installDir}/data" +log_link_dir="${installDir}/log" +cfg_link_dir="${installDir}/cfg" bin_link_dir="/usr/bin" local_bin_link_dir="/usr/local/bin" lib_link_dir="/usr/lib" @@ -31,7 +35,7 @@ install_nginxd_dir="/usr/local/nginxd" service_config_dir="/etc/systemd/system" taos_service_name=${serverName} -taosadapter_service_name="taosadapter" +taosadapter_service_name=${adapterName} tarbitrator_service_name="tarbitratord" nginx_service_name="nginxd" csudo="" @@ -59,22 +63,8 @@ else service_mod=2 fi -function kill_taosadapter() { - pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_taosd() { - pid=$(ps -ef | grep ${serverName} | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi @@ -84,14 +74,13 @@ function clean_bin() { # Remove link ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : + ${csudo}rm -f ${bin_link_dir}/${adapterName} || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName} || : + ${csudo}rm -f ${bin_link_dir}/${demoName} || : + ${csudo}rm -f ${bin_link_dir}/${dumpName} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : } @@ -133,9 +122,9 @@ function clean_service_on_systemd() { ${csudo}systemctl disable ${taos_service_name} &>/dev/null || echo &>/dev/null ${csudo}rm -f ${taosd_service_config} - taosadapter_service_config="${service_config_dir}/taosadapter.service" + taosadapter_service_config="${service_config_dir}/${adapterName}.service" if systemctl is-active --quiet ${taosadapter_service_name}; then - echo "${productName} taosAdapter is running, stopping it..." + echo "${productName} ${adapterName} is running, stopping it..." ${csudo}systemctl stop ${taosadapter_service_name} &>/dev/null || echo &>/dev/null fi ${csudo}systemctl disable ${taosadapter_service_name} &>/dev/null || echo &>/dev/null @@ -210,9 +199,9 @@ function clean_service() { elif ((${service_mod} == 1)); then clean_service_on_sysvinit else - kill_taosadapter - kill_taosd - kill_tarbitrator + kill_process ${adapterName} + kill_process ${serverName} + kill_process "tarbitrator" fi } diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index f2cbccb45f738c058236e5625a86fc40c161f488..701bfde2733cd300e42e5c0f63a52e7138de777d 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -23,47 +23,46 @@ lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " +if command -v sudo >/dev/null; then + csudo="sudo " fi function kill_client() { if [ -n "$(pidof ${clientName})" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/${clientName} || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : - ${csudo}rm -f ${bin_link_dir}/set_core || : + # Remove link + ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/set_core || : } function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : + # Remove link + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo}rm -rf ${v15_java_app_dir} || : } function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : + # Remove link + ${csudo}rm -f ${inc_link_dir}/taos.h || : + ${csudo}rm -f ${inc_link_dir}/taosdef.h || : + ${csudo}rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : + # Remove link + ${csudo}rm -f ${cfg_link_dir}/* || : } function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : + # Remove link + ${csudo}rm -rf ${log_link_dir} || : } # Stop client. @@ -82,4 +81,4 @@ clean_config ${csudo}rm -rf ${install_main_dir} echo -e "${GREEN}TDengine client is removed successfully!${NC}" -echo +echo diff --git a/packaging/tools/run_taosd_and_taosadapter.sh b/packaging/tools/run_taosd_and_taosadapter.sh deleted file mode 100755 index 9ab9eb484a4a5bbc4e3d3994d97b61e0f4bd328d..0000000000000000000000000000000000000000 --- a/packaging/tools/run_taosd_and_taosadapter.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter & -taosd diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 2ada7b32e8ad74de1a3436139cc59481d674667c..a3db44c3826e823f38a4c1a5533a713bf58bc927 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -387,6 +387,7 @@ typedef struct SSqlObj { SSqlRes res; SSubqueryState subState; + pthread_mutex_t mtxSubs; // avoid double access pSubs after failure struct SSqlObj **pSubs; struct SSqlObj *rootObj; @@ -397,6 +398,10 @@ typedef struct SSqlObj { int32_t retryReason; // previous error code struct SSqlObj *prev, *next; int64_t self; + + // connect alive + int64_t lastAlive; + void * pPrevContext; } SSqlObj; typedef struct SSqlStream { diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index df1b98478c2244b15b0170e310565d8a6cddc314..b0900513e999a1d9f5be182f5aa242794307541e 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -46,6 +46,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para pSql->fetchFp = fp; pSql->rootObj = pSql; + pthread_mutex_init(&pSql->mtxSubs, NULL); + registerSqlObj(pSql); pSql->sqlstr = calloc(1, sqlLen + 1); @@ -312,8 +314,12 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) { return; } - assert(pSql->res.code != TSDB_CODE_SUCCESS); - if (tsShortcutFlag) { + // probe send error , but result be responsed by server async + if(pSql->res.code == TSDB_CODE_SUCCESS) { + return ; + } + + if (tsShortcutFlag && (pSql->res.code == TSDB_CODE_RPC_SHORTCUT)) { tscDebug("0x%" PRIx64 " async result callback, code:%s", pSql->self, tstrerror(pSql->res.code)); pSql->res.code = TSDB_CODE_SUCCESS; } else { diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index b8d47022b4d00cf4025904fee91ee3dba232a59c..82f08fc81da997cc1b938d8bf1d21b316ffb912c 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -33,12 +33,12 @@ typedef struct SCompareParam { int32_t groupOrderType; } SCompareParam; -static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) { +static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t idx, char **buf) { int32_t ret = 0; size_t size = taosArrayGetSize(columnIndexList); if (size > 0) { - ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC); + ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, idx, buf, TSDB_ORDER_ASC); } // if ret == 0, means the result belongs to the same group @@ -563,9 +563,9 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc int32_t size = (int32_t) taosArrayGetSize(pColumnList); for(int32_t i = 0; i < size; ++i) { - SColIndex* index = taosArrayGet(pColumnList, i); - SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index->colIndex); - assert(index->colId == pColInfo->info.colId); + SColIndex* idx = taosArrayGet(pColumnList, i); + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx->colIndex); + assert(idx->colId == pColInfo->info.colId); memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes); } @@ -603,7 +603,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_ for (int32_t j = 0; j < numOfExpr; ++j) { int32_t functionId = pCtx[j].functionId; - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) { continue; } @@ -625,7 +625,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_ static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) { for(int32_t j = 0; j < numOfExpr; ++j) { int32_t functionId = pCtx[j].functionId; - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) { continue; } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 81d658d964dc2a404e04f363751e0a17ebe6e46f..cda3b0a50af7014f1cd2df728cdb40b23cc11ba1 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -152,7 +152,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, int32_t typeColLength, int32_t noteColLength) { int32_t rowLen = 0; - SColumnIndex index = {0}; + SColumnIndex idx = {0, 0}; pSql->cmd.numOfCols = numOfCols; @@ -163,7 +163,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Field", sizeof(f.name)); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); @@ -173,7 +173,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Type", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), -1000, typeColLength, false); rowLen += typeColLength + VARSTR_HEADER_SIZE; @@ -183,7 +183,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Length", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_INT, sizeof(int32_t), -1000, sizeof(int32_t), false); rowLen += sizeof(int32_t); @@ -193,7 +193,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, tstrncpy(f.name, "Note", sizeof(f.name)); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), -1000, noteColLength, false); rowLen += noteColLength + VARSTR_HEADER_SIZE; @@ -415,7 +415,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) { static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const char *ddl) { int32_t rowLen = 0; int16_t ddlLen = (int16_t)strlen(ddl); - SColumnIndex index = {0}; + SColumnIndex idx = {0}; pSql->cmd.numOfCols = 2; SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); @@ -433,7 +433,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); rowLen += f.bytes; @@ -446,13 +446,14 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false); rowLen += ddlLen + VARSTR_HEADER_SIZE; return rowLen; } + static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) { SSqlRes *pRes = &pSql->res; @@ -473,6 +474,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c STR_WITH_MAXSIZE_TO_VARSTR(dst, ddl, pField->bytes); return 0; } + static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) { SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result); @@ -480,6 +482,7 @@ static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char * tscFieldInfoUpdateOffset(pQueryInfo); return tscSCreateSetValueToResObj(pSql, rowLen, str, result); } + int32_t tscRebuildCreateTableStatement(void *param,char *result) { SCreateBuilder *builder = (SCreateBuilder *)param; int32_t code = TSDB_CODE_SUCCESS; @@ -533,8 +536,8 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { memset(buf, 0, sizeof(buf)); int32_t* lengths = taos_fetch_lengths(pSql); int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf); - if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); + if (0 == ret && 0 == strcmp(buf, builder->buf)) { + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE `%s`", buf); for (int i = 1; i < num_fields; i++) { for (int j = 0; showColumns[j][0] != NULL; j++) { if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j][0], strlen(showColumns[j][0]))) { diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index ad209839bb24b7a34fc761a4111997db4076b4de..38aee8a6787d5ae99b1d98ec0b2fadd42208ac5a 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -61,8 +61,8 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColIn return TSDB_CODE_SUCCESS; } -int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { - int32_t index = 0; +int tsParseTime(SStrToken *pToken, int64_t *pTime, char **next, char *err, int16_t timePrec) { + int32_t idx = 0; SStrToken sToken; int64_t interval; int64_t useconds = 0; @@ -80,8 +80,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 useconds = taosStr2int64(pToken->z); } else { // strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm); - if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z); + if (taosParseTime(pToken->z, pTime, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(err, "invalid timestamp format", pToken->z); } return TSDB_CODE_SUCCESS; @@ -91,7 +91,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 if (isspace(pToken->z[k])) continue; if (pToken->z[k] == ',') { *next = pTokenEnd; - *time = useconds; + *pTime = useconds; return 0; } @@ -103,17 +103,17 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 * e.g., now+12a, now-5h */ SStrToken valueToken; - index = 0; - sToken = tStrGetToken(pTokenEnd, &index, false); - pTokenEnd += index; + idx = 0; + sToken = tStrGetToken(pTokenEnd, &idx, false); + pTokenEnd += idx; if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) { - index = 0; - valueToken = tStrGetToken(pTokenEnd, &index, false); - pTokenEnd += index; + idx = 0; + valueToken = tStrGetToken(pTokenEnd, &idx, false); + pTokenEnd += idx; if (valueToken.n < 2) { - return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z); + return tscInvalidOperationMsg(err, "value expected in timestamp", sToken.z); } char unit = 0; @@ -130,7 +130,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 *next = pTokenEnd; } - *time = useconds; + *pTime = useconds; return TSDB_CODE_SUCCESS; } @@ -433,7 +433,7 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { - int32_t index = 0; + int32_t idx = 0; SStrToken sToken = {0}; char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header @@ -455,9 +455,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i SSchema *pSchema = &schema[colIndex]; // get colId here - index = 0; - sToken = tStrGetToken(*str, &index, true); - *str += index; + idx = 0; + sToken = tStrGetToken(*str, &idx, true); + *str += idx; if (sToken.type == TK_QUESTION) { if (!isParseBindParam) { @@ -564,7 +564,7 @@ int32_t boundIdxCompar(const void *lhs, const void *rhs) { int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam, int32_t* numOfRows, char *tmpTokenBuf) { - int32_t index = 0; + int32_t idx = 0; int32_t code = 0; (*numOfRows) = 0; @@ -584,11 +584,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn pDataBlock->rowBuilder.rowSize = extendedRowSize; while (1) { - index = 0; - sToken = tStrGetToken(*str, &index, false); + idx = 0; + sToken = tStrGetToken(*str, &idx, false); if (sToken.n == 0 || sToken.type != TK_LP) break; - *str += index; + *str += idx; if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) { int32_t tSize; code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize); @@ -609,13 +609,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn pDataBlock->size += len; - index = 0; - sToken = tStrGetToken(*str, &index, false); + idx = 0; + sToken = tStrGetToken(*str, &idx, false); if (sToken.n == 0 || sToken.type != TK_RP) { return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str); } - *str += index; + *str += idx; (*numOfRows)++; } @@ -876,7 +876,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbInc static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) { - int32_t index = 0; + int32_t idx = 0; SStrToken sToken = {0}; SStrToken tableToken = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -891,14 +891,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC char *sql = *sqlstr; // get the token of specified table - index = 0; - tableToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + tableToken = tStrGetToken(sql, &idx, false); + sql += idx; // skip possibly exists column list - index = 0; - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; int32_t numOfColList = 0; @@ -907,8 +907,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC *boundColumn = &sToken.z[0]; while (1) { - index = 0; - sToken = tStrGetToken(sql, &index, false); + idx = 0; + sToken = tStrGetToken(sql, &idx, false); if (sToken.type == TK_ILLEGAL) { return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); @@ -918,12 +918,12 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC break; } - sql += index; + sql += idx; ++numOfColList; } - sToken = tStrGetToken(sql, &index, false); - sql += index; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; } if (numOfColList == 0 && (*boundColumn) != NULL) { @@ -933,9 +933,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX); if (sToken.type == TK_USING) { // create table if not exists according to the super table - index = 0; - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; if (sToken.type == TK_ILLEGAL) { return tscSQLSyntaxErrMsg(pCmd->payload, NULL, sql); @@ -980,8 +980,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC SParsedDataColInfo spd = {0}; tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta)); - index = 0; - sToken = tStrGetToken(sql, &index, false); + idx = 0; + sToken = tStrGetToken(sql, &idx, false); if (sToken.type != TK_TAGS && sToken.type != TK_LP) { tscDestroyBoundColumnInfo(&spd); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z); @@ -1002,16 +1002,16 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC sql = end; - index = 0; // keywords of "TAGS" - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; // keywords of "TAGS" + sToken = tStrGetToken(sql, &idx, false); + sql += idx; } else { - sql += index; + sql += idx; } - index = 0; - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; if (sToken.type != TK_LP) { tscDestroyBoundColumnInfo(&spd); @@ -1027,9 +1027,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC for (int i = 0; i < spd.numOfBound; ++i) { SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]]; - index = 0; - sToken = tStrGetToken(sql, &index, true); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, true); + sql += idx; if (TK_ILLEGAL == sToken.type) { tdDestroyKVRowBuilder(&kvRowBuilder); @@ -1101,9 +1101,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC free(row); pInsertParam->tagData.data = pTag; - index = 0; - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; if (sToken.n == 0 || sToken.type != TK_RP) { return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z); } @@ -1112,9 +1112,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2) * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val); * */ - index = 0; - sToken = tStrGetToken(sql, &index, false); - sql += index; + idx = 0; + sToken = tStrGetToken(sql, &idx, false); + sql += idx; int numOfColsAfterTags = 0; if (sToken.type == TK_LP) { if (*boundColumn != NULL) { @@ -1124,18 +1124,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC } while (1) { - index = 0; - sToken = tStrGetToken(sql, &index, false); + idx = 0; + sToken = tStrGetToken(sql, &idx, false); if (sToken.type == TK_RP) { break; } - if (sToken.n == 0 || sToken.type == TK_SEMI || index == 0) { + if (sToken.n == 0 || sToken.type == TK_SEMI || idx == 0) { return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql); } - sql += index; + sql += idx; ++numOfColsAfterTags; } @@ -1143,7 +1143,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; } - sToken = tStrGetToken(sql, &index, false); + sToken = tStrGetToken(sql, &idx, false); } sql = sToken.z; @@ -1213,9 +1213,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t code = TSDB_CODE_SUCCESS; - int32_t index = 0; - SStrToken sToken = tStrGetToken(str, &index, false); - str += index; + int32_t idx = 0; + SStrToken sToken = tStrGetToken(str, &idx, false); + str += idx; if (sToken.type != TK_LP) { code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z); @@ -1225,9 +1225,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat bool isOrdered = true; int32_t lastColIdx = -1; // last column found while (1) { - index = 0; - sToken = tStrGetToken(str, &index, false); - str += index; + idx = 0; + sToken = tStrGetToken(str, &idx, false); + str += idx; char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character backstick(`) strncpy(tmpTokenBuf, sToken.z, sToken.n); @@ -1404,8 +1404,8 @@ int tsParseInsertSql(SSqlObj *pSql) { tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList); while (1) { - int32_t index = 0; - SStrToken sToken = tStrGetToken(str, &index, false); + int32_t idx = 0; + SStrToken sToken = tStrGetToken(str, &idx, false); // no data in the sql string anymore. if (sToken.n == 0) { @@ -1469,9 +1469,9 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - index = 0; - sToken = tStrGetToken(str, &index, false); - str += index; + idx = 0; + sToken = tStrGetToken(str, &idx, false); + str += idx; if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) { code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z); @@ -1484,13 +1484,13 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - index = 0; - sToken = tStrGetToken(str, &index, false); + idx = 0; + sToken = tStrGetToken(str, &idx, false); if (sToken.type != TK_STRING && sToken.type != TK_ID) { code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z); goto _clean; } - str += index; + str += idx; if (sToken.n == 0) { code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z); goto _clean; @@ -1590,7 +1590,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) { return TSDB_CODE_TSC_NO_WRITE_AUTH; } - int32_t index = 0; + int32_t idx = 0; SSqlCmd *pCmd = &pSql->cmd; pCmd->count = 0; @@ -1600,12 +1600,12 @@ int tsInsertInitialCheck(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT); - SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false); + SStrToken sToken = tStrGetToken(pSql->sqlstr, &idx, false); if (sToken.type != TK_INSERT && sToken.type != TK_IMPORT) { return tscSQLSyntaxErrMsg(pInsertParam->msg, NULL, sToken.z); } - sToken = tStrGetToken(pSql->sqlstr, &index, false); + sToken = tStrGetToken(pSql->sqlstr, &idx, false); if (sToken.type != TK_INTO) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z); } diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 5a7de5fd5aa63f32b9a639b186b0f73c439166fe..a5673378f5646479ab53d638e5ff74dee9a4c879 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -930,7 +930,7 @@ _cleanup: free(colKVs); if (r == fromIndex) { - tscError("buffer can not fit one line"); + tscDebug("buffer can not fit one line"); *cTableSqlLen = 0; } else { *cTableSqlLen = totalLen; @@ -1028,7 +1028,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) { tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id); - code = TSDB_CODE_TSC_OUT_OF_MEMORY; + code = TSDB_CODE_TSC_TOO_MANY_SML_LINES; goto cleanup; } @@ -1047,7 +1047,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi tscDebug("SML:0x%"PRIx64" sql: %s" , info->id, batch->sql); if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) { tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id); - code = TSDB_CODE_TSC_OUT_OF_MEMORY; + code = TSDB_CODE_TSC_TOO_MANY_SML_LINES; goto cleanup; } bool batchesExecuted[MAX_SML_SQL_INSERT_BATCHES] = {false}; @@ -1966,14 +1966,14 @@ int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, return TSDB_CODE_SUCCESS; } -static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) { +static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **idx, SSmlLinesInfo* info) { const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; int len = 0; char key[] = "ts"; char *value = NULL; - start = cur = *index; + start = cur = *idx; *pTS = calloc(1, sizeof(TAOS_SML_KV)); while(*cur != '\0') { @@ -2013,8 +2013,8 @@ bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) { return false; } -static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) { - const char *cur = *index; +static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) { + const char *cur = *idx; char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write int16_t len = 0; @@ -2048,16 +2048,17 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash memcpy(pKV->key, key, len + 1); addEscapeCharToString(pKV->key, len); tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); - *index = cur + 1; + *idx = cur + 1; return TSDB_CODE_SUCCESS; } -static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, +static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **idx, bool *is_last_kv, SSmlLinesInfo* info, bool isTag) { const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; char *value = NULL; + int32_t bufSize = TSDB_FUNC_BUF_SIZE; int16_t len = 0; bool kv_done = false; @@ -2077,7 +2078,12 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, val_rqoute } val_state; - start = cur = *index; + value = malloc(bufSize); + if (value == NULL) { + ret = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto error; + } + start = cur = *idx; tag_state = tag_common; val_state = val_common; @@ -2095,22 +2101,20 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, back_slash = false; cur++; - len++; break; } if (*cur == '"') { - if (cur == *index) { + if (cur == *idx) { tag_state = tag_lqoute; } cur += 1; - len += 1; break; } else if (*cur == 'L') { - line_len = strlen(*index); + line_len = strlen(*idx); /* common character at the end */ - if (cur + 1 >= *index + line_len) { + if (cur + 1 >= *idx + line_len) { *is_last_kv = true; kv_done = true; break; @@ -2118,11 +2122,10 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, if (*(cur + 1) == '"') { /* string starts here */ - if (cur + 1 == *index + 1) { + if (cur + 1 == *idx + 1) { tag_state = tag_lqoute; } cur += 2; - len += 2; break; } } @@ -2131,8 +2134,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, case '\\': back_slash = true; cur++; - len++; - break; + continue; case ',': kv_done = true; break; @@ -2146,7 +2148,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, default: cur++; - len++; } break; @@ -2160,7 +2161,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, back_slash = false; cur++; - len++; break; } else if (double_quote == true) { if (*cur != ' ' && *cur != ',' && *cur != '\0') { @@ -2182,13 +2182,11 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, case '\\': back_slash = true; cur++; - len++; - break; + continue; case '"': double_quote = true; cur++; - len++; break; case '\0': @@ -2199,7 +2197,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, default: cur++; - len++; } break; @@ -2217,14 +2214,13 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, goto error; } - back_slash = false; - cur++; - len++; + back_slash = false; + cur++; break; } if (*cur == '"') { - if (cur == *index) { + if (cur == *idx) { val_state = val_lqoute; } else { if (*(cur - 1) != '\\') { @@ -2235,13 +2231,12 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, } cur += 1; - len += 1; break; } else if (*cur == 'L') { - line_len = strlen(*index); + line_len = strlen(*idx); /* common character at the end */ - if (cur + 1 >= *index + line_len) { + if (cur + 1 >= *idx + line_len) { *is_last_kv = true; kv_done = true; break; @@ -2249,15 +2244,13 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, if (*(cur + 1) == '"') { /* string starts here */ - if (cur + 1 == *index + 1) { + if (cur + 1 == *idx + 1) { val_state = val_lqoute; cur += 2; - len += 2; } else { /* MUST at the end of string */ - if (cur + 2 >= *index + line_len) { + if (cur + 2 >= *idx + line_len) { cur += 2; - len += 2; *is_last_kv = true; kv_done = true; } else { @@ -2271,7 +2264,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, } cur += 2; - len += 2; kv_done = true; } } @@ -2284,8 +2276,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, case '\\': back_slash = true; cur++; - len++; - break; + continue; case ',': kv_done = true; @@ -2300,7 +2291,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, default: cur++; - len++; } break; @@ -2311,10 +2301,11 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ret = TSDB_CODE_TSC_LINE_SYNTAX_ERROR; goto error; } - + if (*cur == '"') { + start++; + } back_slash = false; cur++; - len++; break; } else if (double_quote == true) { if (*cur != ' ' && *cur != ',' && *cur != '\0') { @@ -2336,13 +2327,11 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, case '\\': back_slash = true; cur++; - len++; - break; + continue; case '"': double_quote = true; cur++; - len++; break; case '\0': @@ -2353,7 +2342,6 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, default: cur++; - len++; } break; @@ -2362,42 +2350,54 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, } } + if (start < cur) { + if (bufSize <= len + (cur - start)) { + bufSize *= 2; + char *tmp = realloc(value, bufSize); + if (tmp == NULL) { + ret = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto error; + } + value = tmp; + } + memcpy(value + len, start, cur - start); // [start, cur) + len += cur - start; + start = cur; + } + if (kv_done == true) { break; } } if (len == 0 || ret != TSDB_CODE_SUCCESS) { - free(pKV->key); - pKV->key = NULL; - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + ret = TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + goto error; } - value = calloc(len + 1, 1); - memcpy(value, start, len); value[len] = '\0'; if (!convertSmlValueType(pKV, value, len, info, isTag)) { tscError("SML:0x%"PRIx64" Failed to convert sml value string(%s) to any type", info->id, value); - free(value); ret = TSDB_CODE_TSC_INVALID_VALUE; goto error; } free(value); - *index = (*cur == '\0') ? cur : cur + 1; + *idx = (*cur == '\0') ? cur : cur + 1; return ret; error: - //free previous alocated key field + //free previous alocated buffer and key field + free(value); free(pKV->key); pKV->key = NULL; return ret; } -static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index, +static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **idx, uint8_t *has_tags, SSmlLinesInfo* info) { - const char *cur = *index; + const char *cur = *idx; int16_t len = 0; pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1); @@ -2441,7 +2441,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; } addEscapeCharToString(pSml->stableName, len); - *index = cur + 1; + *idx = cur + 1; tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len); return TSDB_CODE_SUCCESS; @@ -2464,10 +2464,10 @@ int32_t isValidChildTableName(const char *pTbName, int16_t len, SSmlLinesInfo* i static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, - const char **index, bool isField, + const char **idx, bool isField, TAOS_SML_DATA_POINT* smlData, SHashObj *pHash, SSmlLinesInfo* info) { - const char *cur = *index; + const char *cur = *idx; int32_t ret = TSDB_CODE_SUCCESS; TAOS_SML_KV *pkv; bool is_last_kv = false; @@ -2555,7 +2555,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, error: return ret; done: - *index = cur; + *idx = cur; return ret; } @@ -2575,13 +2575,13 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t } int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) { - const char* index = sql; + const char* idx = sql; int32_t ret = TSDB_CODE_SUCCESS; uint8_t has_tags = 0; TAOS_SML_KV *timestamp = NULL; SHashObj *keyHashTable = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); - ret = parseSmlMeasurement(smlData, &index, &has_tags, info); + ret = parseSmlMeasurement(smlData, &idx, &has_tags, info); if (ret) { tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id); taosHashCleanup(keyHashTable); @@ -2591,7 +2591,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf //Parse Tags if (has_tags) { - ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info); + ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &idx, false, smlData, keyHashTable, info); if (ret) { tscError("SML:0x%"PRIx64" Unable to parse tag", info->id); taosHashCleanup(keyHashTable); @@ -2601,17 +2601,23 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum); //Parse fields - ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info); + ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &idx, true, smlData, keyHashTable, info); if (ret) { tscError("SML:0x%"PRIx64" Unable to parse field", info->id); taosHashCleanup(keyHashTable); return ret; } tscDebug("SML:0x%"PRIx64" Parse fields finished, num of fields:%d", info->id, smlData->fieldNum); + if (smlData->fieldNum == 0) { + tscDebug("SML:0x%"PRIx64" Parse fields error, no field in line", info->id); + taosHashCleanup(keyHashTable); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + taosHashCleanup(keyHashTable); //Parse timestamp - ret = parseSmlTimeStamp(×tamp, &index, info); + ret = parseSmlTimeStamp(×tamp, &idx, info); if (ret) { tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id); return ret; diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index 4b2738e567d7535bba170d390200b73cf794a4f2..525bfa4bd3ac1cdbb43d68ef4fa3697bd9b20ac3 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -33,8 +33,8 @@ static uint64_t genUID() { return id; } -static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) { - const char *cur = *index; +static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **idx, SSmlLinesInfo* info) { + const char *cur = *idx; uint16_t len = 0; pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1); @@ -76,13 +76,13 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, } addEscapeCharToString(pSml->stableName, len); - *index = cur + 1; + *idx = cur + 1; tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len); return TSDB_CODE_SUCCESS; } -static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) { +static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **idx, SSmlLinesInfo* info) { //Timestamp must be the first KV to parse assert(*num_kvs == 0); @@ -92,7 +92,7 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char char key[] = OTD_TIMESTAMP_COLUMN_NAME; char *value = NULL; - start = cur = *index; + start = cur = *idx; //allocate fields for timestamp and value *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); @@ -130,12 +130,12 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char addEscapeCharToString((*pTS)->key, (int32_t)strlen(key)); *num_kvs += 1; - *index = cur + 1; + *idx = cur + 1; return ret; } -static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) { +static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **idx, SSmlLinesInfo* info) { //skip timestamp TAOS_SML_KV *pVal = *pKVs + 1; const char *start, *cur; @@ -145,7 +145,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch char key[] = OTD_METRIC_VALUE_COLUMN_NAME; char *value = NULL; - start = cur = *index; + start = cur = *idx; //if metric value is string if (*cur == '"') { @@ -201,12 +201,12 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key)); *num_kvs += 1; - *index = cur + 1; + *idx = cur + 1; return ret; } -static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) { - const char *cur = *index; +static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) { + const char *cur = *idx; char key[TSDB_COL_NAME_LEN]; uint16_t len = 0; @@ -244,17 +244,17 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj memcpy(pKV->key, key, len + 1); addEscapeCharToString(pKV->key, len); //tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); - *index = cur + 1; + *idx = cur + 1; return TSDB_CODE_SUCCESS; } -static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, +static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **idx, bool *is_last_kv, SSmlLinesInfo* info) { const char *start, *cur; char *value = NULL; uint16_t len = 0; - start = cur = *index; + start = cur = *idx; while (1) { // whitespace or '\0' identifies a value @@ -290,14 +290,14 @@ static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, } tfree(value); - *index = (*cur == '\0') ? cur : cur + 1; + *idx = (*cur == '\0') ? cur : cur + 1; return TSDB_CODE_SUCCESS; } static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, - const char **index, char **childTableName, + const char **idx, char **childTableName, SHashObj *pHash, SSmlLinesInfo* info) { - const char *cur = *index; + const char *cur = *idx; int32_t ret = TSDB_CODE_SUCCESS; TAOS_SML_KV *pkv; bool is_last_kv = false; @@ -357,11 +357,11 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, } static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) { - const char* index = line; + const char* idx = line; int32_t ret = TSDB_CODE_SUCCESS; //Parse metric - ret = parseTelnetMetric(smlData, &index, info); + ret = parseTelnetMetric(smlData, &idx, info); if (ret) { tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id); return ret; @@ -369,7 +369,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id); //Parse timestamp - ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info); + ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &idx, info); if (ret) { tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id); return ret; @@ -377,7 +377,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id); //Parse value - ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info); + ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &idx, info); if (ret) { tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id); return ret; @@ -386,7 +386,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData //Parse tagKVs SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); - ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info); + ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &idx, &smlData->childTableName, keyHashTable, info); if (ret) { tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id); taosHashCleanup(keyHashTable); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 454f15829874a51a38428f3ffb420e4704e2151b..665efa4c6dbca0437540ee1dd9875267bc2d8b72 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -121,11 +121,11 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_ return TSDB_CODE_SUCCESS; } -static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { +static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) { SNormalStmt* normal = &stmt->normal; for (uint16_t i = 0; i < normal->numParams; ++i) { - TAOS_BIND* tb = bind + i; + TAOS_BIND* tb = pBind + i; tVariant* var = normal->params + i; tVariantDestroy(var); @@ -383,8 +383,8 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) { //////////////////////////////////////////////////////////////////////////////// // functions for insertion statement preparation -static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) { - if (bind->is_null != NULL && *(bind->is_null)) { +static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* pBind, int32_t colNum) { + if (pBind->is_null != NULL && *(pBind->is_null)) { setNull(data + param->offset, param->type, param->bytes); return TSDB_CODE_SUCCESS; } @@ -772,7 +772,7 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam } #endif - if (bind->buffer_type != param->type) { + if (pBind->buffer_type != param->type) { tscError("column type mismatch"); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -782,39 +782,39 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer; + *(uint8_t *)(data + param->offset) = *(uint8_t *)pBind->buffer; break; case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer; + *(uint16_t *)(data + param->offset) = *(uint16_t *)pBind->buffer; break; case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_FLOAT: - *(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer; + *(uint32_t *)(data + param->offset) = *(uint32_t *)pBind->buffer; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_TIMESTAMP: - *(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer; + *(uint64_t *)(data + param->offset) = *(uint64_t *)pBind->buffer; break; case TSDB_DATA_TYPE_BINARY: - if ((*bind->length) > (uintptr_t)param->bytes) { + if ((*pBind->length) > (uintptr_t)param->bytes) { tscError("column length is too big"); return TSDB_CODE_TSC_INVALID_VALUE; } - size = (short)*bind->length; - STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size); + size = (short)*pBind->length; + STR_WITH_SIZE_TO_VARSTR(data + param->offset, pBind->buffer, size); return TSDB_CODE_SUCCESS; case TSDB_DATA_TYPE_NCHAR: { int32_t output = 0; - if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + if (!taosMbsToUcs4(pBind->buffer, *pBind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { tscError("convert nchar failed"); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -889,27 +889,27 @@ static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, ST } -static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) { - if (bind->buffer_type != param->type || !isValidDataType(param->type)) { +static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* pBind, int32_t rowNum) { + if (pBind->buffer_type != param->type || !isValidDataType(param->type)) { tscError("column mismatch or invalid"); return TSDB_CODE_TSC_INVALID_VALUE; } - if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) { + if (IS_VAR_DATA_TYPE(param->type) && pBind->length == NULL) { tscError("BINARY/NCHAR no length"); return TSDB_CODE_TSC_INVALID_VALUE; } - for (int i = 0; i < bind->num; ++i) { + for (int i = 0; i < pBind->num; ++i) { char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i); - if (bind->is_null != NULL && bind->is_null[i]) { + if (pBind->is_null != NULL && pBind->is_null[i]) { setNull(data + param->offset, param->type, param->bytes); continue; } if (!IS_VAR_DATA_TYPE(param->type)) { - memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes); + memcpy(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, tDataTypes[param->type].bytes); if (param->offset == 0) { if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { @@ -918,21 +918,21 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU } } } else if (param->type == TSDB_DATA_TYPE_BINARY) { - if (bind->length[i] > (uintptr_t)param->bytes) { - tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); + if (pBind->length[i] > (uintptr_t)param->bytes) { + tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]); return TSDB_CODE_TSC_INVALID_VALUE; } - int16_t bsize = (short)bind->length[i]; - STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize); + int16_t bsize = (short)pBind->length[i]; + STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, bsize); } else if (param->type == TSDB_DATA_TYPE_NCHAR) { - if (bind->length[i] > (uintptr_t)param->bytes) { - tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); + if (pBind->length[i] > (uintptr_t)param->bytes) { + tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]); return TSDB_CODE_TSC_INVALID_VALUE; } int32_t output = 0; - if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { - tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i)); + if (!taosMbsToUcs4((char *)pBind->buffer + pBind->buffer_length * i, pBind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)pBind->buffer + pBind->buffer_length * i)); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -943,7 +943,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU return TSDB_CODE_SUCCESS; } -static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { +static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; @@ -995,7 +995,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { SParamInfo* param = &pBlock->params[j]; - int code = doBindParam(pBlock, data, param, &bind[param->idx], 1); + int code = doBindParam(pBlock, data, param, &pBind[param->idx], 1); if (code != TSDB_CODE_SUCCESS) { tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); @@ -1006,10 +1006,10 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { } -static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) { +static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* pBind, int colIdx) { SSqlCmd* pCmd = &stmt->pSql->cmd; STscStmt* pStmt = (STscStmt*)stmt; - int rowNum = bind->num; + int rowNum = pBind->num; STableDataBlocks* pBlock = NULL; @@ -1063,12 +1063,12 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c if (colIdx == -1) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { SParamInfo* param = &pBlock->params[j]; - if (bind[param->idx].num != rowNum) { - tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num); + if (pBind[param->idx].num != rowNum) { + tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, pBind[param->idx].num); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch"); } - int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); + int code = doBindBatchParam(pBlock, param, &pBind[param->idx], pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); @@ -1079,7 +1079,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c } else { SParamInfo* param = &pBlock->params[colIdx]; - int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize); + int code = doBindBatchParam(pBlock, param, pBind, pCmd->batchSize); if (code != TSDB_CODE_SUCCESS) { tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); @@ -1312,8 +1312,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { return ret; } - int32_t index = 0; - SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + int32_t idx = 0; + SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n == 0) { tscError("table is is expected, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql); @@ -1333,7 +1333,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { pStmt->mtb.tagSet = true; - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) { return TSDB_CODE_SUCCESS; } @@ -1343,14 +1343,14 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql); } - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) { tscError("invalid token, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql); } pStmt->mtb.stbname = sToken; - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) { tscError("invalid token, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql); @@ -1361,9 +1361,9 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { if (sToken.type == TK_LP) { pStmt->mtb.tagColSet = true; pStmt->mtb.tagCols = sToken; - int32_t tagColsStart = index; + int32_t tagColsStart = idx; while (1) { - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.type == TK_ILLEGAL) { return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); } @@ -1378,16 +1378,16 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { tscError("tag column list expected, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql); } - pStmt->mtb.tagCols.n = index - tagColsStart + 1; + pStmt->mtb.tagCols.n = idx - tagColsStart + 1; - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0 || sToken.type != TK_TAGS) { tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql); } } - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0 || sToken.type != TK_LP) { tscError("( expected, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql); @@ -1398,7 +1398,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { int32_t loopCont = 1; while (loopCont) { - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0) { tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql); @@ -1429,7 +1429,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql); } - sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); + sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false); if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) { tscError("sql error, sql:%s", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql); @@ -1944,7 +1944,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { STMT_RET(TSDB_CODE_SUCCESS); } -int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { +int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* pBind) { STscStmt* pStmt = (STscStmt*)stmt; STMT_CHECK @@ -1965,18 +1965,18 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid); - STMT_RET(insertStmtBindParam(pStmt, bind)); + STMT_RET(insertStmtBindParam(pStmt, pBind)); } else { - STMT_RET(normalStmtBindParam(pStmt, bind)); + STMT_RET(normalStmtBindParam(pStmt, pBind)); } } -int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { +int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind) { STscStmt* pStmt = (STscStmt*)stmt; STMT_CHECK - if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) { + if (pBind == NULL || pBind->num <= 0 || pBind->num > INT16_MAX) { tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param")); } @@ -2000,21 +2000,21 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { pStmt->last = STMT_BIND; - STMT_RET(insertStmtBindParamBatch(pStmt, bind, -1)); + STMT_RET(insertStmtBindParamBatch(pStmt, pBind, -1)); } -int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) { +int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind, int colIdx) { STscStmt* pStmt = (STscStmt*)stmt; STMT_CHECK - if (bind == NULL) { + if (pBind == NULL) { tscError("0x%" PRIx64 " invalid parameter: bind is NULL", pStmt->pSql->self); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param: bind is NULL")); } - if (bind->num <= 0 || bind->num > INT16_MAX) { + if (pBind->num <= 0 || pBind->num > INT16_MAX) { char errMsg[128]; - sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", bind->num, INT16_MAX); + sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", pBind->num, INT16_MAX); tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg)); } @@ -2045,7 +2045,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in pStmt->last = STMT_BIND_COL; - STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx)); + STMT_RET(insertStmtBindParamBatch(pStmt, pBind, colIdx)); } int taos_stmt_add_batch(TAOS_STMT* stmt) { diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 8dfb0b7d67f988d694f462fafd6e78ea969d7c5d..7de9ef762a29c47bf56eb63630be2e45cf269b39 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -283,18 +283,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { // } pthread_mutex_lock(&pSql->subState.mutex); if (pSql->pSubs != NULL && pSql->subState.states != NULL) { - for (int32_t i = 0; i < pQdesc->numOfSub; ++i) { + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { // because subState maybe free on anytime by any thread, check validate from here - if(pSql->subState.numOfSub != pQdesc->numOfSub || - pSql->pSubs == NULL || - pSql->subState.states == NULL) { - tscError(" QUERY-HEART STscObj=%p subState maybe free. numOfSub=%d pSubs=%p states=%p", - pObj, pSql->subState.numOfSub, pSql->pSubs, pSql->subState.states); - pQdesc->numOfSub = 0; - // break for - break; - } - SSqlObj *psub = pSql->pSubs[i]; int64_t self = (psub != NULL)? psub->self : 0; @@ -307,6 +297,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { p += len; } } + pQdesc->numOfSub = pSql->subState.numOfSub; pthread_mutex_unlock(&pSql->subState.mutex); } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 3d806bdac5758016aef05786510a443de56c6120..193676cd93303f035cda24a2d944e85fbff91ee1 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -47,11 +47,11 @@ #define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey)) -// -1 is tbname column index, so here use the -2 as the initial value +// -1 is tbname column idx, so here use the -2 as the initial value #define COLUMN_INDEX_INITIAL_VAL (-2) #define COLUMN_INDEX_INITIALIZER \ { COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL } -#define COLUMN_INDEX_VALID(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_MIN_VALID_COLUMN_INDEX)) +#define COLUMN_INDEX_VALID(idx) (((idx).tableIndex >= 0) && ((idx).columnIndex >= TSDB_MIN_VALID_COLUMN_INDEX)) #define TBNAME_LIST_SEP "," typedef struct SColumnList { // todo refactor @@ -335,21 +335,21 @@ static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) { } static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision) { - int64_t time = 0; + int64_t t = 0; stringProcess(pVar->pz, pVar->nLen); char* seg = strnchr(pVar->pz, '-', pVar->nLen, false); if (seg != NULL) { - if (taosParseTime(pVar->pz, &time, pVar->nLen, precision, tsDaylight) != TSDB_CODE_SUCCESS) { + if (taosParseTime(pVar->pz, &t, pVar->nLen, precision, tsDaylight) != TSDB_CODE_SUCCESS) { return -1; } } else { - if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) { + if (tVariantDump(pVar, (char*)&t, TSDB_DATA_TYPE_BIGINT, true)) { return -1; } } tVariantDestroy(pVar); - tVariantCreateFromBinary(pVar, (char*)&time, 0, TSDB_DATA_TYPE_BIGINT); + tVariantCreateFromBinary(pVar, (char*)&t, 0, TSDB_DATA_TYPE_BIGINT); return 0; } static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) { @@ -606,8 +606,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { case TSDB_SQL_DROP_TABLE: case TSDB_SQL_DROP_USER: case TSDB_SQL_DROP_ACCT: - case TSDB_SQL_DROP_DNODE: - case TSDB_SQL_DROP_DB: { + case TSDB_SQL_DROP_DNODE: { const char* msg2 = "invalid name"; const char* msg3 = "param name too long"; @@ -626,14 +625,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } - if (pInfo->type == TSDB_SQL_DROP_DB) { - assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1); - code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName); - if (code != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - - } else if (pInfo->type == TSDB_SQL_DROP_TABLE) { + if (pInfo->type == TSDB_SQL_DROP_TABLE) { assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1); code = tscSetTableFullName(&pTableMetaInfo->name, &sTblToken, pSql, dbIncluded); @@ -656,11 +648,12 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { break; } + case TSDB_SQL_DROP_DB: case TSDB_SQL_USE_DB: { const char* msg = "invalid db name"; SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0); - if (tscValidateName(pToken, false, NULL) != TSDB_CODE_SUCCESS) { + if (tscValidateName(pToken, true, NULL) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); } @@ -707,7 +700,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char buf[TSDB_DB_NAME_LEN] = {0}; SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf)); - if (tscValidateName(&token, false, NULL) != TSDB_CODE_SUCCESS) { + if (tscValidateName(&token, true, NULL) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -816,7 +809,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0); - if (tscValidateName(pToken, false, NULL) != TSDB_CODE_SUCCESS) { + if (tscValidateName(pToken, true, NULL) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -1162,8 +1155,8 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name)); } - SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, 0); + SColumnIndex idx = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &idx, &s, TSDB_COL_NORMAL, 0); return TSDB_CODE_SUCCESS; } @@ -1310,17 +1303,17 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(col, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; int32_t numOfCols = tscGetNumOfColumns(pTableMeta); - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); - } else if (index.columnIndex >= numOfCols) { + } else if (idx.columnIndex >= numOfCols) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } @@ -1329,7 +1322,7 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex)); } - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex); if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE || pSchema->type == TSDB_DATA_TYPE_NCHAR || pSchema->type == TSDB_DATA_TYPE_BINARY) { @@ -1352,8 +1345,8 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS } } - tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema); - SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; + tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, pTableMeta->id.uid, pSchema); + SColIndex colIndex = { .colIndex = idx.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; taosArrayPush(pGroupExpr->columnInfo, &colIndex); pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; pQueryInfo->stateWindow = true; @@ -1393,11 +1386,11 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(col, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -1639,7 +1632,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { } // field name must be unique - if (has(pFieldList, i + 1, pField->name) == true) { + if (has(pFieldList, i, pField->name) == true) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -1698,7 +1691,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC return false; } - if (has(pTagsList, i + 1, p->name) == true) { + if (has(pTagsList, i, p->name) == true) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -1727,8 +1720,8 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC // field name must be unique for (int32_t i = 0; i < numOfTags; ++i) { TAOS_FIELD* p = taosArrayGet(pTagsList, i); - - if (has(pFieldList, 0, p->name) == true) { + size_t numOfCols = taosArrayGetSize(pFieldList); + if (has(pFieldList, numOfCols, p->name) == true) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -1864,7 +1857,7 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { // field name must be unique for (int32_t i = 0; i < numOfTags + numOfCols; ++i) { - if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) { + if (strncmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) { //return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pColField->name, NULL); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names"); } @@ -1874,9 +1867,8 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } /* is contained in pFieldList or not */ -static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { - size_t numOfCols = taosArrayGetSize(pFieldList); - for (int32_t j = startIdx; j < numOfCols; ++j) { +static bool has(SArray* pFieldList, int32_t endIdx, const char* name) { + for (int32_t j = 0; j < endIdx; ++j) { TAOS_FIELD* field = taosArrayGet(pFieldList, j); if (strncmp(name, field->name, sizeof(field->name) - 1) == 0) return true; } @@ -1955,8 +1947,8 @@ static int32_t handleScalarTypeExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32 return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = {.tableIndex = tableIndex}; - SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_SCALAR_EXPR, &index, pNode->resultType, pNode->resultBytes, + SColumnIndex idx = {.tableIndex = tableIndex}; + SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_SCALAR_EXPR, &idx, pNode->resultType, pNode->resultBytes, getNewResColId(pCmd), 0, false); // set the colId to the result column id pExpr->base.colInfo.colId = pExpr->base.resColId; @@ -2138,9 +2130,9 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, PRIMARYKEY_TIMESTAMP_COL_INDEX); // add the timestamp column into the output columns - SColumnIndex index = {0}; // primary timestamp column info + SColumnIndex idx = {0}; // primary timestamp column info int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); - tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &idx, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; @@ -2277,10 +2269,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS hasDistinct = (pItem->distinct == true); distIdx = hasDistinct ? i : -1; } - if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){ - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); - } - if(pItem->aliasName != NULL && strcasecmp(pItem->aliasName, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == 0){ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); } @@ -2406,16 +2394,16 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, colIndex); int16_t functionId = (int16_t)((colIndex >= numOfCols) ? TSDB_FUNC_TAGPRJ : TSDB_FUNC_PRJ); - SColumnIndex index = {.tableIndex = tableIndex,}; + SColumnIndex idx = {.tableIndex = tableIndex,}; if (functionId == TSDB_FUNC_TAGPRJ) { - index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema); + idx.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta); + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pSchema); } else { - index.columnIndex = colIndex; + idx.columnIndex = colIndex; } - return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0, + return tscExprAppend(pQueryInfo, functionId, &idx, pSchema->type, pSchema->bytes, colId, 0, (functionId == TSDB_FUNC_TAGPRJ)); } @@ -2488,41 +2476,41 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t if (tokenId == TK_ALL) { // project on all fields TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY); - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } // all meters columns are required - if (index.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required. + if (idx.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required. for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { - index.tableIndex = i; - int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd); + idx.tableIndex = i; + int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &idx, startPos, pCmd); startPos += inc; } } else { - doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd); + doAddProjectionExprAndResultFields(pQueryInfo, &idx, startPos, pCmd); } // add the primary timestamp column even though it is not required by user - STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[index.tableIndex]->pTableMeta; + STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[idx.tableIndex]->pTableMeta; if (pTableMeta->tableType != TSDB_TEMP_TABLE) { tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid); } } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT || tokenId == TK_BOOL) { // simple column projection query - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; // user-specified constant value as a new result column - index.columnIndex = (pQueryInfo->udColumnId--); - index.tableIndex = 0; + idx.columnIndex = (pQueryInfo->udColumnId--); + idx.tableIndex = 0; SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->exprToken, pItem->aliasName); - SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC, + SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &idx, &colSchema, TSDB_COL_UDC, getNewResColId(pCmd)); tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pItem->pNode->value); }else if (tokenId == TK_ID || tokenId == TK_ARROW) { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; SStrToken* pToken = NULL; if (tokenId == TK_ARROW){ @@ -2542,35 +2530,35 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t pToken = &pItem->pNode->columnName; } - if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } //for tbname and other pseudo columns - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TSWIN_COL(index.columnIndex)) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TSWIN_COL(idx.columnIndex)) { if (outerQuery) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); bool existed = false; SSchema* pSchema = pTableMetaInfo->pTableMeta->schema; for (int32_t i = 0; i < numOfCols; ++i) { if ((strncasecmp(pSchema[i].name, TSQL_TBNAME_L, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) || + idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_TSWIN_START, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX) || + idx.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_TSWIN_STOP, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX) || + idx.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_TSWIN_DURATION, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX) || + idx.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_QUERY_START, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_QUERY_START_COLUMN_INDEX) || + idx.columnIndex == TSDB_QUERY_START_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_QUERY_STOP, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_QUERY_STOP_COLUMN_INDEX) || + idx.columnIndex == TSDB_QUERY_STOP_COLUMN_INDEX) || (strncasecmp(pSchema[i].name, TSQL_QUERY_DURATION, tListLen(pSchema[i].name)) == 0 && - index.columnIndex == TSDB_QUERY_DURATION_COLUMN_INDEX)) { + idx.columnIndex == TSDB_QUERY_DURATION_COLUMN_INDEX)) { existed = true; - index.columnIndex = i; + idx.columnIndex = i; break; } } @@ -2579,47 +2567,47 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - SSchema colSchema = pSchema[index.columnIndex]; + SSchema colSchema = pSchema[idx.columnIndex]; char name[TSDB_COL_NAME_LEN] = {0}; getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1); tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN); - /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, + /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &idx, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); } else { SSchema colSchema; int16_t functionId, colType; - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { colSchema = *tGetTbnameColumnSchema(); functionId = TSDB_FUNC_TAGPRJ; colType = TSDB_COL_TAG; } else { - if (!timeWindowQuery && (index.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX || - index.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX || - index.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX)) { + if (!timeWindowQuery && (idx.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX || + idx.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX || + idx.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } - colSchema = *tGetTimeWindowColumnSchema(index.columnIndex); - functionId = getTimeWindowFunctionID(index.columnIndex); + colSchema = *tGetTimeWindowColumnSchema(idx.columnIndex); + functionId = getTimeWindowFunctionID(idx.columnIndex); colType = TSDB_COL_NORMAL; } char name[TSDB_COL_NAME_LEN] = {0}; getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1); tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN); - /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, functionId, &index, &colSchema, + /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, functionId, &idx, &colSchema, colType, getNewResColId(pCmd)); } pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } else { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + if (idx.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex); if (tokenId == TK_ARROW && pSchema->type != TSDB_DATA_TYPE_JSON) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } @@ -2627,12 +2615,12 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - addProjectQueryCol(pQueryInfo, startPos, &index, pItem, getNewResColId(pCmd)); + addProjectQueryCol(pQueryInfo, startPos, &idx, pItem, getNewResColId(pCmd)); pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } // add the primary timestamp column even though it is not required by user - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); if (!UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); } @@ -2697,32 +2685,33 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS return TSDB_CODE_SUCCESS; } -void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrToken* pToken, bool multiCols) { +void setResultColName(char* name, bool finalResult, tSqlExprItem* pItem, int32_t functionId, SStrToken* pToken, + bool multiCols) { if (pItem->aliasName != NULL) { tstrncpy(name, pItem->aliasName, TSDB_COL_NAME_LEN); } else { - char uname[TSDB_COL_NAME_LEN] = {0}; + char colName[TSDB_COL_NAME_LEN] = {0}; int32_t len = MIN(pToken->n + 1, TSDB_COL_NAME_LEN); - tstrncpy(uname, pToken->z, len); + tstrncpy(colName, pToken->z, len); - if (tsKeepOriginalColumnName) { // keep the original column name - tstrncpy(name, uname, TSDB_COL_NAME_LEN); + if (finalResult && tsKeepOriginalColumnName) { // keep the original column name + tstrncpy(name, colName, TSDB_COL_NAME_LEN); } else if (multiCols) { if (!TSDB_FUNC_IS_SCALAR(functionId)) { int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1; char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0}; - snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname); + snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, colName); tstrncpy(name, tmp, TSDB_COL_NAME_LEN); } else { - int32_t index = TSDB_FUNC_SCALAR_INDEX(functionId); - int32_t size = TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1; - char tmp[TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1] = {0}; - snprintf(tmp, size, "%s(%s)", aScalarFunctions[index].name, uname); + int32_t idx = TSDB_FUNC_SCALAR_INDEX(functionId); + int32_t size = TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[idx].name) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[idx].name) + 2 + 1] = {0}; + snprintf(tmp, size, "%s(%s)", aScalarFunctions[idx].name, colName); tstrncpy(name, tmp, TSDB_COL_NAME_LEN); } - } else { // use the user-input result column name + } else { // use the user-input result column name len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN); tstrncpy(name, pItem->pNode->exprToken.z, len); } @@ -2804,7 +2793,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } SExprInfo* pExpr = NULL; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->Expr.paramList != NULL) { tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); @@ -2819,47 +2808,47 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // check if the table name is valid or not SStrToken tmpToken = pParamElem->pNode->columnName; - if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + if (getTableIndexByName(&tmpToken, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + idx = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false); } else { // count the number of table created according to the super table - if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); // count tag is equalled to count(tbname) bool isTag = false; - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) || - index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - index.columnIndex = TSDB_TBNAME_COLUMN_INDEX; + if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) || + idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + idx.columnIndex = TSDB_TBNAME_COLUMN_INDEX; isTag = true; } int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, isTag); } } else { // count(*) is equalled to count(primary_timestamp_key) - index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + idx = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); - SColumnList list = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList list = createColumnList(1, idx.tableIndex, idx.columnIndex); if (finalResult) { int32_t numOfOutput = tscNumOfFields(pQueryInfo); insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName, @@ -2873,7 +2862,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } // the time stamp may be always needed - if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { + if (idx.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); } @@ -2887,6 +2876,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_TWA: case TSDB_FUNC_MIN: case TSDB_FUNC_MAX: + case TSDB_FUNC_MIN_ROW: + case TSDB_FUNC_MAX_ROW: case TSDB_FUNC_DIFF: case TSDB_FUNC_DERIVATIVE: case TSDB_FUNC_CSUM: @@ -2919,18 +2910,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); // elapsed only can be applied to primary key if (functionId == TSDB_FUNC_ELAPSED) { - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || + if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); } @@ -2956,13 +2947,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); // functions can not be applied to tags - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || - (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX || + (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } // 2. check if sql function can be applied on this column data type - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); if (functionId == TSDB_FUNC_MODE && pColumnSchema->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pColumnSchema->type == TSDB_DATA_TYPE_TIMESTAMP){ @@ -2973,6 +2964,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); + } else if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } int16_t resultType = 0; @@ -2986,7 +2979,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // set the first column ts for diff query if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) { - SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; + SColumnIndex indexTS = {.tableIndex = idx.tableIndex, .columnIndex = 0}; SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); @@ -2997,7 +2990,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) { - SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; + SColumnIndex indexTS = {.tableIndex = idx.tableIndex, .columnIndex = 0}; SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); @@ -3006,15 +2999,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); - pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &index, pSchema->type, + pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &idx, pSchema->type, pSchema->bytes, getNewResColId(pCmd), 0, false); tstrncpy(pExpr->base.aliasName, pParamElem->pNode->columnName.z, pParamElem->pNode->columnName.n+1); - ids = createColumnList(1, index.tableIndex, index.columnIndex); + ids = createColumnList(1, idx.tableIndex, idx.columnIndex); insertResultField(pQueryInfo, colIndex + 1, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType, pExpr->base.aliasName, pExpr); } - SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters @@ -3129,7 +3122,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } - SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); @@ -3179,54 +3172,55 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; if (pParamElem->pNode->tokenId == TK_ALL) { // select table.* SStrToken tmpToken = pParamElem->pNode->columnName; - if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + if (getTableIndexByName(&tmpToken, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); char name[TSDB_COL_NAME_LEN] = {0}; for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) { - index.columnIndex = j; + idx.columnIndex = j; SStrToken t = {.z = pSchema[j].name, .n = (uint32_t)strnlen(pSchema[j].name, TSDB_COL_NAME_LEN)}; - setResultColName(name, pItem, cvtFunc.originFuncId, &t, true); + setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &t, true); - if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, finalResult, + if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &idx, finalResult, pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } } } else { - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); if (pParamElem->pNode->columnName.z == NULL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } // functions can not be applied to tags - if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) { + if ((idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (idx.columnIndex < 0)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } char name[TSDB_COL_NAME_LEN] = {0}; - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); bool multiColOutput = taosArrayGetSize(pItem->pNode->Expr.paramList) > 1; - setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName, multiColOutput); + setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName, + multiColOutput); - if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult, + if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &idx, finalResult, pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -3246,13 +3240,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) { - SColumnIndex index = {.tableIndex = j, .columnIndex = i}; + SColumnIndex idx = {.tableIndex = j, .columnIndex = i}; char name[TSDB_COL_NAME_LEN] = {0}; SStrToken t = {.z = pSchema[i].name, .n = (uint32_t)strnlen(pSchema[i].name, TSDB_COL_NAME_LEN)}; - setResultColName(name, pItem, cvtFunc.originFuncId, &t, true); + setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &t, true); - if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index, + if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[idx.columnIndex], cvtFunc, name, colIndex, &idx, finalResult, pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -3294,26 +3288,26 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); - if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && + if (idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && (functionId == TSDB_FUNC_UNIQUE || functionId == TSDB_FUNC_TAIL)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg29); } // functions can not be applied to tags - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { + if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -3362,7 +3356,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); colIndex += 1; // the first column is ts - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), interResult, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); @@ -3408,12 +3402,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // todo REFACTOR // set the first column ts for top/bottom query int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS; - SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + SColumnIndex index1 = {idx.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; - SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); + SColumnList ids = createColumnList(1, idx.tableIndex, TS_COLUMN_INDEX); insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[tsFuncId].name, pExpr); @@ -3421,7 +3415,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType, &resultSize, &interResult, 0, false, pUdfInfo); - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), interResult, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } else { @@ -3437,19 +3431,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } // todo REFACTOR // set the first column ts for top/bottom query - SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + SColumnIndex index1 = {idx.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; - SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); + SColumnList ids = createColumnList(1, idx.tableIndex, TS_COLUMN_INDEX); insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType, &resultSize, &interResult, 0, false, pUdfInfo); - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), interResult, false); if (functionId == TSDB_FUNC_TAIL){ int64_t offset = 0; @@ -3476,7 +3470,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); // todo refactor: tscColumnListInsert part - SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex); if (finalResult) { insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr); @@ -3502,46 +3496,46 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->Expr.paramList, 0); tSqlExpr* pParam = pParamItem->pNode; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); // functions can not be applied to normal columns int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex < numOfCols && idx.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (index.columnIndex > 0) { - index.columnIndex -= numOfCols; + if (idx.columnIndex > 0) { + idx.columnIndex -= numOfCols; } // 2. valid the column type int16_t colType = 0; - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { colType = TSDB_DATA_TYPE_BINARY; } else { - colType = pSchema[index.columnIndex].type; + colType = pSchema[idx.columnIndex].type; } if (colType == TSDB_DATA_TYPE_BOOL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid, - &pSchema[index.columnIndex]); + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMetaInfo->pTableMeta->id.uid, + &pSchema[idx.columnIndex]); SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); SSchema s = {0}; - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { s = *tGetTbnameColumnSchema(); } else { - s = pTagSchema[index.columnIndex]; + s = pTagSchema[idx.columnIndex]; } int32_t bytes = 0; @@ -3555,7 +3549,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col s.bytes = bytes; TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); - tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG, getNewResColId(pCmd)); + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &idx, &s, TSDB_COL_TAG, getNewResColId(pCmd)); return TSDB_CODE_SUCCESS; } @@ -3566,11 +3560,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = { + SColumnIndex idx = { .tableIndex = 0, .columnIndex = 0, }; - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); int32_t inter = 0; int16_t resType = 0; @@ -3581,10 +3575,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY, .bytes = bytes}; SExprInfo* pExpr = - tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, resType, bytes, getNewResColId(pCmd), bytes, 0); + tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &idx, resType, bytes, getNewResColId(pCmd), bytes, 0); tstrncpy(pExpr->base.aliasName, s.name, sizeof(pExpr->base.aliasName)); - SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex); insertResultField(pQueryInfo, 0, &ids, bytes, s.type, s.name, pExpr); pExpr->base.numOfParams = 1; @@ -3605,18 +3599,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); if (!IS_NUMERIC_TYPE(pSchema->type)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -3778,7 +3772,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col getResultDataInfo(pSchema->type, pSchema->bytes, functionId, counter, &resultType, &resultSize, &interResult, 0, false, pUdfInfo); SExprInfo* pExpr = NULL; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, + pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), interResult, false); numOutput = numBins - 1; tscExprAddParams(&pExpr->base, (char*)&numOutput, TSDB_DATA_TYPE_INT, sizeof(int32_t)); @@ -3806,7 +3800,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); // todo refactor: tscColumnListInsert part - SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex); if (finalResult) { insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr); @@ -3836,20 +3830,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); // functions can not be applied to tags - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { + if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -3859,21 +3853,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col getResultDataInfo(TSDB_DATA_TYPE_INT, 4, functionId, 0, &resType, &bytes, &inter, 0, false, pUdfInfo); SExprInfo* pExpr = - tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false); + tscExprAppend(pQueryInfo, functionId, &idx, resType, bytes, getNewResColId(pCmd), inter, false); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; - SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); + SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex); if (finalResult) { insertResultField(pQueryInfo, colIndex, &ids, pUdfInfo->resBytes, pUdfInfo->resType, pExpr->base.aliasName, pExpr); } else { for (int32_t i = 0; i < ids.num; ++i) { - tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, pSchema); + tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, uid, pSchema); } } tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); @@ -3890,9 +3884,9 @@ static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t col SColumnList columnList = {0}; columnList.num = num; - int32_t index = num - 1; - columnList.ids[index].tableIndex = tableIndex; - columnList.ids[index].columnIndex = columnIndex; + int32_t idx = num - 1; + columnList.ids[idx].tableIndex = tableIndex; + columnList.ids[idx].columnIndex = columnIndex; return columnList; } @@ -3946,8 +3940,8 @@ static bool isTimeWindowToken(SStrToken* token, int16_t *columnIndex) { } } -static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) { - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index)->pTableMeta; +static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t idx, SStrToken* pToken) { + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, idx)->pTableMeta; int32_t numOfCols = tscGetNumOfColumns(pTableMeta) + tscGetNumOfTags(pTableMeta); SSchema* pSchema = tscGetTableSchema(pTableMeta); @@ -3978,10 +3972,6 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum const char* msg0 = "ambiguous column name"; const char* msg1 = "invalid column name"; - if (pToken->n == 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - int16_t tsWinColumnIndex; if (isTablenameToken(pToken)) { pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX; @@ -3991,7 +3981,7 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum } else if (isTimeWindowToken(pToken, &tsWinColumnIndex)) { pIndex->columnIndex = tsWinColumnIndex; } else { - // not specify the table name, try to locate the table index by column name + // not specify the table name, try to locate the table idx by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { int16_t colIndex = doGetColumnIndex(pQueryInfo, i, pToken); @@ -4005,7 +3995,7 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum } } } - } else { // table index is valid, get the column index + } else { // table idx is valid, get the column idx int16_t colIndex = doGetColumnIndex(pQueryInfo, pIndex->tableIndex, pToken); if (colIndex != COLUMN_INDEX_INITIAL_VAL) { pIndex->columnIndex = colIndex; @@ -4067,6 +4057,12 @@ int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIn } int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) { + const char* msg0 = "invalid column name"; + + if (pToken->n == 0) { + return invalidOperationMsg(msg, msg0); + } + if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -4110,7 +4106,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pDbPrefixToken->n <= 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - if (tscValidateName(pDbPrefixToken, false, NULL) != TSDB_CODE_SUCCESS) { + if (tscValidateName(pDbPrefixToken, true, NULL) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -4545,24 +4541,24 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd } } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&token, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&token, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } if (tableIndex == COLUMN_INDEX_INITIAL_VAL) { - tableIndex = index.tableIndex; - } else if (tableIndex != index.tableIndex) { + tableIndex = idx.tableIndex; + } else if (tableIndex != idx.tableIndex) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { pSchema = tGetTbnameColumnSchema(); } else { - pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex); } if (pSchema->type == TSDB_DATA_TYPE_JSON && !pItem->isJsonExp){ @@ -4573,15 +4569,15 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd } int32_t numOfCols = tscGetNumOfColumns(pTableMeta); - bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols); + bool groupTag = (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX || idx.columnIndex >= numOfCols); if (groupTag) { if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - int32_t relIndex = index.columnIndex; - if (index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { + int32_t relIndex = idx.columnIndex; + if (idx.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { relIndex -= numOfCols; } @@ -4597,17 +4593,17 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd taosArrayPush(pGroupExpr->columnInfo, &colIndex); - index.columnIndex = relIndex; - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema); + idx.columnIndex = relIndex; + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pSchema); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by if (pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema); + tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, pTableMeta->id.uid, pSchema); - SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; + SColIndex colIndex = { .colIndex = idx.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name)); taosArrayPush(pGroupExpr->columnInfo, &colIndex); @@ -4939,9 +4935,9 @@ static int32_t checkColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return checkColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId); } else { // handle leaf node - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - addAllColumn(pCmd, pQueryInfo, pExpr, pExpr->tokenId, &index); - return checkColumnFilterInfo(pCmd, pQueryInfo, &index, pExpr, relOptr); + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + addAllColumn(pCmd, pQueryInfo, pExpr, pExpr->tokenId, &idx); + return checkColumnFilterInfo(pCmd, pQueryInfo, &idx, pExpr, relOptr); } return TSDB_CODE_SUCCESS; } @@ -4975,17 +4971,17 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS pRight = pRight->pLeft; } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); - assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); + assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); - SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex]; if (*leftNode == NULL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -4999,9 +4995,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) { - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1); + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pTagSchema1); atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1); if (pTableMetaInfo->joinTagNum > 1) { @@ -5010,19 +5006,19 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS } } - int16_t leftIdx = index.tableIndex; + int16_t leftIdx = idx.tableIndex; - index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + idx = (SColumnIndex)COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); - assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); + assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); - SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex]; if (*rightNode == NULL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -5035,10 +5031,10 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta); + idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMeta); if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) { - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2); + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pTagSchema2); atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1); if (pTableMetaInfo->joinTagNum > 1) { @@ -5047,7 +5043,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS } } - int16_t rightIdx = index.tableIndex; + int16_t rightIdx = idx.tableIndex; if (pTagSchema1->type != pTagSchema2->type) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); @@ -5300,14 +5296,14 @@ static int32_t validateSQLExprItem(SSqlCmd* pCmd, tSqlExpr* pExpr, return ret; } } else if (pExpr->type == SQL_NODE_TABLE_COLUMN) { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - pList->ids[pList->num++] = index; + pList->ids[pList->num++] = idx; *type = SQLEXPR_TYPE_SCALAR; } else if (pExpr->type == SQL_NODE_DATA_TYPE) { if (pExpr->dataType.type < 0 || pExpr->dataType.bytes <= 0) { @@ -5503,17 +5499,17 @@ static int32_t setNormalExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, int32_t p } -static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { +static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) { const char* msg = "only support is [not] null"; tSqlExpr* pRight = pExpr->pRight; SSchema* pSchema = tscGetTableSchema(pTableMeta); - if (pRight->tokenId == TK_NULL && pSchema[index].type != TSDB_DATA_TYPE_JSON && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) { + if (pRight->tokenId == TK_NULL && pSchema[idx].type != TSDB_DATA_TYPE_JSON && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) { return invalidOperationMsg(msgBuf, msg); } if (pRight->tokenId == TK_STRING) { - if (IS_VAR_DATA_TYPE(pSchema[index].type) || pSchema[index].type == TSDB_DATA_TYPE_JSON) { + if (IS_VAR_DATA_TYPE(pSchema[idx].type) || pSchema[idx].type == TSDB_DATA_TYPE_JSON) { return TSDB_CODE_SUCCESS; } @@ -5536,7 +5532,7 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t } // check for like expression -static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { +static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) { const char* msg1 = "wildcard string should be less than %d characters"; const char* msg2 = "illegal column type for like"; @@ -5551,7 +5547,7 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t } SSchema* pSchema = tscGetTableSchema(pTableMeta); - if ((pLeft->tokenId != TK_ARROW) && (!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { + if ((pLeft->tokenId != TK_ARROW) && (!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[idx].type)) { return invalidOperationMsg(msgBuf, msg2); } } @@ -5611,7 +5607,7 @@ static int32_t validateJsonTagExpr(tSqlExpr* pExpr, char* msgBuf) { } // check for match expression -static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { +static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) { const char* msg1 = "regular expression string should be less than %d characters"; const char* msg3 = "invalid regular expression"; @@ -5686,7 +5682,7 @@ void convertWhereStringCharset(tSqlExpr* pRight){ free(newData); } -static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SColumnIndex* index) { +static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SColumnIndex* idx) { const char* msg2 = "illegal column name"; int32_t ret = TSDB_CODE_SUCCESS; if (pExpr == NULL) { @@ -5695,11 +5691,11 @@ static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (isComparisonOperator(pExpr)) { return TSDB_CODE_TSC_INVALID_OPERATION; } - ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pLeft, index); + ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pLeft, idx); if( ret != TSDB_CODE_SUCCESS) { return ret; } - ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pRight, index); + ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pRight, idx); return ret; } @@ -5711,7 +5707,7 @@ static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS } if (colName) { - if (getColumnIndexByName(colName, pQueryInfo, index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(colName, pQueryInfo, idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } @@ -5739,51 +5735,51 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql int32_t ret = TSDB_CODE_SUCCESS; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; if (!tSqlExprIsParentOfLeaf(*pExpr)) { - ret = handleColumnInQueryCond(pCmd, pQueryInfo, pLeft, &index); + ret = handleColumnInQueryCond(pCmd, pQueryInfo, pLeft, &idx); if (ret != TSDB_CODE_SUCCESS) { return ret; } - ret = handleColumnInQueryCond(pCmd, pQueryInfo, pRight, &index); + ret = handleColumnInQueryCond(pCmd, pQueryInfo, pRight, &idx); if (ret != TSDB_CODE_SUCCESS) { return ret; } } else { - if (getColumnIndexByName(colName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(colName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } - *tbIdx = index.tableIndex; + *tbIdx = idx.tableIndex; - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex); // delete where condition check , column must ts or tag if (delData) { if (!((pSchema->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) || - index.columnIndex >= tscGetNumOfColumns(pTableMeta) || - index.columnIndex == TSDB_TBNAME_COLUMN_INDEX)) { + idx.columnIndex >= tscGetNumOfColumns(pTableMeta) || + idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } } // validate the null expression - int32_t code = validateNullExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + int32_t code = validateNullExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd)); if (code != TSDB_CODE_SUCCESS) { return code; } // validate the like expression - code = validateLikeExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + code = validateLikeExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd)); if (code != TSDB_CODE_SUCCESS) { return code; } // validate the match expression - code = validateMatchExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + code = validateMatchExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd)); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5792,8 +5788,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql convertWhereStringCharset(pRight); } - if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range - if (!tSqlExprIsParentOfLeaf(*pExpr) || !validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { + if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range + if (!tSqlExprIsParentOfLeaf(*pExpr) || !validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &idx)) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -5802,8 +5798,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY); pCondExpr->tsJoin = true; - assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); - SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); + SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex]; if (*leftNode == NULL) { *leftNode = calloc(1, sizeof(SJoinNode)); if (*leftNode == NULL) { @@ -5811,17 +5807,17 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } } - int16_t leftIdx = index.tableIndex; + int16_t leftIdx = idx.tableIndex; - if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) { + if (idx.tableIndex < 0 || idx.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex]; if (*rightNode == NULL) { *rightNode = calloc(1, sizeof(SJoinNode)); if (*rightNode == NULL) { @@ -5829,7 +5825,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } } - int16_t rightIdx = index.tableIndex; + int16_t rightIdx = idx.tableIndex; if ((*leftNode)->tsJoin == NULL) { (*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t)); @@ -5865,7 +5861,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } *pExpr = NULL; // remove this expression - } else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + } else if (idx.columnIndex >= tscGetNumOfColumns(pTableMeta) || idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags, check for tag query condition if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -5880,7 +5876,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } if (joinQuery && pRight != NULL && (pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW)) { // join on tag columns for stable query - if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { + if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &idx)) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -6057,15 +6053,15 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo* tSqlExpr* pLeft = (*pExpr)->pLeft; if (pLeft->tokenId == TK_ARROW || pLeft->tokenId == TK_ID) { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; if(pLeft->tokenId == TK_ARROW) { pLeft = pLeft->pLeft; } - if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return; } - if (index.tableIndex != tableIndex) { + if (idx.tableIndex != tableIndex) { return; } } @@ -6191,12 +6187,12 @@ static int32_t convertTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return code; } } else { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); tSqlExpr* pRight = pExpr->pRight; @@ -6264,27 +6260,27 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) { tscError("%p: invalid column name (left)", pQueryInfo); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]); + tscColumnListInsert(pTableMetaInfo->tagColList, &idx, &pSchema[idx.columnIndex]); - if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) { tscError("%p: invalid column name (right)", pQueryInfo); } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]); + tscColumnListInsert(pTableMetaInfo->tagColList, &idx, &pSchema[idx.columnIndex]); } } */ @@ -6425,10 +6421,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE size_t num = taosArrayGetSize(colList); for(int32_t j = 0; j < num; ++j) { SColIndex* pIndex = taosArrayGet(colList, j); - SColumnIndex index = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols}; + SColumnIndex idx = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols}; SSchema* s = tscGetTableSchema(pTableMetaInfo->pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid, + tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMetaInfo->pTableMeta->id.uid, &s[pIndex->colIndex]); } @@ -7067,7 +7063,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq columnName.z = pVar->pz; } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; bool udf = false; if (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) { @@ -7083,7 +7079,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query - if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsgBuf, msg1); } @@ -7091,8 +7087,8 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq bool orderByTS = false; bool orderByGroupbyCol = false; - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1 - int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1 + int32_t relTagIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { @@ -7116,7 +7112,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq orderByTags = true; } } - } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname + } else if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { return invalidOperationMsg(pMsgBuf, msg4); @@ -7125,13 +7121,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (TSDB_TBNAME_COLUMN_INDEX == pColIndex->colIndex) { orderByTags = true; } - }else if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts + }else if (idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts orderByTS = true; }else{ // order by normal column SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { SColIndex* pColIndex = taosArrayGet(columnInfo, 0); - if (pColIndex->colIndex == index.columnIndex) { + if (pColIndex->colIndex == idx.columnIndex) { orderByGroupbyCol = true; } } @@ -7145,7 +7141,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (tscIsDiffDerivLikeQuery(pQueryInfo)) { return invalidOperationMsg(pMsgBuf, msg12); } - //pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + //pQueryInfo->groupbyExpr.orderIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; } else if (orderByGroupbyCol) { @@ -7164,12 +7160,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, pos); - if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.colInfo.colIndex != idx.columnIndex && idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg5); } pQueryInfo->order.order = pItem->sortOrder; - pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId; } else { if (udf) { return invalidOperationMsg(pMsgBuf, msg11); @@ -7195,27 +7191,27 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq cname.type = pVar->nType; cname.z = pVar->pz; } - if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&cname, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsgBuf, msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg6); } pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table - if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&columnName, pQueryInfo, &idx, pMsgBuf) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsgBuf, msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)){ + if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)){ bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { SColIndex* pColIndex = taosArrayGet(columnInfo, 0); - validOrder = (pColIndex->colIndex == index.columnIndex); + validOrder = (pColIndex->colIndex == idx.columnIndex); } if (!validOrder) { @@ -7227,7 +7223,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { /*SColIndex* pColIndex = taosArrayGet(columnInfo, 0); - if (pColIndex->colIndex != index.columnIndex) { + if (pColIndex->colIndex != idx.columnIndex) { return invalidOperationMsg(pMsgBuf, msg8); }*/ } else { @@ -7238,12 +7234,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, pos); - if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pExpr->base.colInfo.colIndex != idx.columnIndex && idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg5); } } pQueryInfo->order.order = pItem->sortOrder; - pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId; }else{ pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -7257,11 +7253,11 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // inner subquery. assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); - if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&columnName, pQueryInfo, &idx, pMsgBuf) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsgBuf, msg1); } - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(pMsgBuf, msg1); } @@ -7273,7 +7269,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq bool found = false; for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); - if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) { + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[idx.columnIndex].colId) { found = true; break; } @@ -7281,7 +7277,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (!found) { int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); - tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &idx, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; @@ -7291,7 +7287,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } pQueryInfo->order.order = pItem->sortOrder; - pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId; } return TSDB_CODE_SUCCESS; @@ -7404,17 +7400,20 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg9); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { + return invalidOperationMsg(pMsg, msg17); + } SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen}; - if (getColumnIndexByName(&name, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&name, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } int32_t numOfCols = tscGetNumOfColumns(pTableMeta); - if (index.columnIndex < numOfCols) { + if (idx.columnIndex < numOfCols) { return invalidOperationMsg(pMsg, msg10); - } else if (index.columnIndex == numOfCols) { + } else if (idx.columnIndex == numOfCols) { return invalidOperationMsg(pMsg, msg11); } @@ -7478,6 +7477,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { int16_t numOfTags = tscGetNumOfTags(pTableMeta); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + if (item->pVar.nType != TSDB_DATA_TYPE_BINARY) { + return invalidOperationMsg(pMsg, msg17); + } SStrToken name = {.z = item->pVar.pz, .n = item->pVar.nLen}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -7623,6 +7625,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { + return invalidOperationMsg(pMsg, msg17); + } SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { @@ -7778,10 +7783,20 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { bool isProjectionFunction = false; + bool minMaxRowExists = false; const char* msg1 = "functions not compatible with interval"; // multi-output set/ todo refactor size_t size = taosArrayGetSize(pQueryInfo->exprList); + + for (int32_t k = 0; k < size; ++k) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, k); + + if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) { + minMaxRowExists = true; + break; + } + } for (int32_t k = 0; k < size; ++k) { SExprInfo* pExpr = tscExprGet(pQueryInfo, k); @@ -7802,7 +7817,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu } // projection query on primary timestamp, the selectivity function needs to be present. - if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (minMaxRowExists || (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { bool hasSelectivity = false; for (int32_t j = 0; j < size; ++j) { SExprInfo* pEx = tscExprGet(pQueryInfo, j); @@ -8023,7 +8038,9 @@ int32_t validateColumnName(char* name) { return validateColumnName(token.z); } else if (token.type == TK_ID) { stringProcess(name, token.n); - return TSDB_CODE_SUCCESS; + if (strlen(name) == 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } } else { if (isNumber(&token)) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -8252,20 +8269,20 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau SSchema* pTagSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, colId); int16_t colIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, colId); - SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex}; + SColumnIndex idx = {.tableIndex = 0, .columnIndex = colIndex}; char* name = pTagSchema->name; int16_t type = pTagSchema->type; int16_t bytes = pTagSchema->bytes; - pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(&pSql->cmd), bytes, true); + pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &idx, type, bytes, getNewResColId(&pSql->cmd), bytes, true); pExpr->base.colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list SColumnList ids = {0}; insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr); - int32_t relIndex = index.columnIndex; + int32_t relIndex = idx.columnIndex; pExpr->base.colInfo.colIndex = relIndex; SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); @@ -8304,13 +8321,14 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlC pInfo->visible = false; } -static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { +static void doUpdateSqlFunctionForColTagPrj(SQueryInfo* pQueryInfo) { int32_t tagLength = 0; size_t size = taosArrayGetSize(pQueryInfo->exprList); -//todo is 0?? + //todo is 0?? STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); + bool minMaxRowExists = false; for (int32_t i = 0; i < size; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); @@ -8320,6 +8338,20 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { } else if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { pExpr->base.functionId = TSDB_FUNC_TS_DUMMY; // ts_select ts,top(col,2) tagLength += pExpr->base.resBytes; + } else if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) { + minMaxRowExists = true; + } + } + + if (minMaxRowExists) { + for (int32_t i = 0; i < size; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) { + continue; + } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) { + pExpr->base.functionId = TSDB_FUNC_COL_DUMMY; + tagLength += pExpr->base.resBytes; + } } } @@ -8331,8 +8363,9 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { continue; } - if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY) && - !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) { + if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY && + pExpr->base.functionId != TSDB_FUNC_COL_DUMMY) + && !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) { SSchema* pColSchema = &pSchema[pExpr->base.colInfo.colIndex]; getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->base.functionId, (int32_t)pExpr->base.param[0].i64, &pExpr->base.resType, &pExpr->base.resBytes, &pExpr->base.interBytes, tagLength, isSTable, NULL); @@ -8448,7 +8481,15 @@ static bool check_expr_in_groupby_colum(SGroupbyExpr* pGroupbyExpr, SExprInfo* p return false; for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols ; ++k) { pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k); - if (!strcmp(pIndex->name,&pExpr->base.colInfo.name[1])){ // notes:first char is dot, skip one char. + + // find last dot + char * name = strrchr(pExpr->base.colInfo.name, '.'); + if(name) + name += 1; + else + name = pExpr->base.colInfo.name; + + if (!strcmp(pIndex->name, name)){ return true; } } @@ -8461,10 +8502,14 @@ static bool check_expr_in_groupby_colum(SGroupbyExpr* pGroupbyExpr, SExprInfo* p * 2. if selectivity function and tagprj function both exist, there should be only * one selectivity function exists. */ -static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { +static int32_t checkUpdateColTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { const char* msg1 = "only one selectivity function allowed in presence of tags function"; const char* msg2 = "aggregation function should not be mixed up with projection"; + const char* msg3 = "min_row should not be mixed up with max_row"; + const char* msg4 = "only one selectivity function allowed in presence of min_row or max_row function"; + bool minRowExists = false; + bool maxRowExists = false; bool tagTsColExists = false; int16_t numOfScalar = 0; int16_t numOfSelectivity = 0; @@ -8481,9 +8526,17 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { tagTsColExists = true; // selectivity + ts/tag column break; } + } else if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW) { + minRowExists = true; + } else if (pExpr->base.functionId == TSDB_FUNC_MAX_ROW) { + maxRowExists = true; } } + if (minRowExists && maxRowExists) { + return invalidOperationMsg(msg, msg3); + } + for (int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i); @@ -8522,7 +8575,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { } } - if (tagTsColExists) { // check if the selectivity function exists + if (tagTsColExists || minRowExists || maxRowExists) { // check if the selectivity function exists // When the tag projection function on tag column that is not in the group by clause, aggregation function and // selectivity function exist in select clause is not allowed. if (numOfAggregation > 0) { @@ -8533,7 +8586,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { * if numOfSelectivity equals to 0, it is a super table projection query */ if (numOfSelectivity == 1) { - doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColTagPrj(pQueryInfo); int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo); if (code != TSDB_CODE_SUCCESS) { return code; @@ -8559,11 +8612,17 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { (functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) { // do nothing } else { - return invalidOperationMsg(msg, msg1); + if (tagTsColExists) { + return invalidOperationMsg(msg, msg1); + } + + if (minRowExists || maxRowExists) { + return invalidOperationMsg(msg, msg4); + } } } - doUpdateSqlFunctionForTagPrj(pQueryInfo); + doUpdateSqlFunctionForColTagPrj(pQueryInfo); int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo); if (code != TSDB_CODE_SUCCESS) { return code; @@ -8628,8 +8687,8 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo int32_t pos = tscGetFirstInvisibleFieldPos(pQueryInfo); - SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true); + SColumnIndex idx = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; + SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &idx, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true); // NOTE: tag column does not add to source column list SColumnList ids = createColumnList(1, 0, pColIndex->colIndex); insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, pColIndex->name, pExpr); @@ -8787,7 +8846,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* } } - if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) { + if (checkUpdateColTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -8802,7 +8861,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* return TSDB_CODE_SUCCESS; } else { - return checkUpdateTagPrjFunctions(pQueryInfo, msg); + return checkUpdateColTagPrjFunctions(pQueryInfo, msg); } } @@ -8922,20 +8981,20 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq {"client_version()", 16}, {"current_user()", 14}}; - int32_t index = -1; + int32_t idx = -1; if (server_status == true) { - index = 2; + idx = 2; } else { for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { if (strncasecmp(functionsInfo[i].name, pExpr->exprToken.z, functionsInfo[i].len) == 0 && functionsInfo[i].len == pExpr->exprToken.n) { - index = i; + idx = i; break; } } } - switch (index) { + switch (idx) { case 0: pQueryInfo->command = TSDB_SQL_CURRENT_DB;break; case 1: @@ -8954,7 +9013,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pCmd), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false); tSqlExprItem* item = taosArrayGet(pExprList, 0); - const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[index].name; + const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[idx].name; tstrncpy(pExpr1->base.aliasName, name, tListLen(pExpr1->base.aliasName)); return TSDB_CODE_SUCCESS; @@ -9624,10 +9683,12 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect pParam = taosArrayGet(pSqlExpr->Expr.paramList, 0); SStrToken* pToken = &pParam->pNode->columnName; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex); } else { schema = (SSchema) {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE}; } @@ -9647,15 +9708,15 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect // // if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it, // -// SColumnIndex index = COLUMN_INDEX_INITIALIZER; +// SColumnIndex idx = COLUMN_INDEX_INITIALIZER; // int32_t functionId = pSqlExpr->functionId; // if (pSqlExpr->Expr.paramList == NULL) { -// index.columnIndex = 0; -// index.tableIndex = 0; +// idx.columnIndex = 0; +// idx.tableIndex = 0; // } else { // tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->Expr.paramList, 0); // SStrToken* pToken = &pParamElem->pNode->columnName; -// getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)); +// getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)); // } // // size_t numOfNodeInSel = tscNumOfExprs(pQueryInfo); @@ -9666,7 +9727,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect // continue; // } // -// if (pExpr1->base.colInfo.colIndex != index.columnIndex) { +// if (pExpr1->base.colInfo.colIndex != idx.columnIndex) { // continue; // } // @@ -9855,16 +9916,16 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode } if (pExpr1->tokenId == TK_ID) { - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) { + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - if (index.columnIndex <= 0 || - index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + if (idx.columnIndex <= 0 || + idx.columnIndex >= tscGetNumOfColumns(pTableMeta)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } } @@ -10326,8 +10387,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { return meta; } -static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pSql, SQueryInfo* pQueryInfo, char* msgBuf) { - SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, index); +static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t idx, SSqlObj* pSql, SQueryInfo* pQueryInfo, char* msgBuf) { + SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, idx); // union all is not supported currently SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0); @@ -10784,15 +10845,15 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS } return ret; } else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SColumnIndex idx = COLUMN_INDEX_INITIALIZER; - int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)); + int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)); if (ret != TSDB_CODE_SUCCESS) { return ret; } - pQueryInfo->curTableIdx = index.tableIndex; - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta; + pQueryInfo->curTableIdx = idx.tableIndex; + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, idx.tableIndex)->pTableMeta; int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); *pExpr = calloc(1, sizeof(tExprNode)); @@ -10801,14 +10862,14 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS SSchema* pSchema = NULL; - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { pSchema = (*pExpr)->pSchema; strcpy(pSchema->name, tGetTbnameColumnSchema()->name); pSchema->type = tGetTbnameColumnSchema()->type; pSchema->colId = tGetTbnameColumnSchema()->colId; pSchema->bytes = tGetTbnameColumnSchema()->bytes; } else { - pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex); *(*pExpr)->pSchema = *pSchema; } @@ -10816,8 +10877,8 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS SColIndex colIndex = {0}; tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name)); colIndex.colId = pSchema->colId; - colIndex.colIndex = index.columnIndex; - colIndex.flag = (index.columnIndex >= numOfColumns) ? 1 : 0; + colIndex.colIndex = idx.columnIndex; + colIndex.flag = (idx.columnIndex >= numOfColumns) ? 1 : 0; taosArrayPush(pCols, &colIndex); } @@ -10960,17 +11021,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_COL; (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); - if (tsKeepOriginalColumnName && // TD-14196, tsKeepOriginalColumnName params makes logic special - (pSqlExpr->functionId == TSDB_FUNC_FIRST || - pSqlExpr->functionId == TSDB_FUNC_LAST || - pSqlExpr->functionId == TSDB_FUNC_SPREAD || - pSqlExpr->functionId == TSDB_FUNC_LAST_ROW || - pSqlExpr->functionId == TSDB_FUNC_INTERP)) { - tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->Expr.paramList, 0); - strncpy((*pExpr)->pSchema->name, pParamElem->pNode->columnName.z, pParamElem->pNode->columnName.n); - }else{ - strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n); - } + strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n); // set the input column data byte and type. size_t size = taosArrayGetSize(pQueryInfo->exprList); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e42f73fb327b7a0c85741fd1edbcdc21845c5488..92a6f6e149306a7449c823311aaa92061974d39d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -281,6 +281,114 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { } } +// if return true, send probe connection msg to sever ok +bool sendProbeConnMsg(SSqlObj* pSql, int64_t stime, bool *pReqOver) { + if(stime == 0) { + // not start , no need probe + tscInfo("PROBE 0x%" PRIx64 " not start, no need probe.", pSql->self); + return true; + } + + int64_t start = MAX(stime, pSql->lastAlive); + int32_t diff = (int32_t)(taosGetTimestampMs() - start); + if (diff < tsProbeSeconds * 1000) { + // exec time short , need not probe alive + tscInfo("PROBE 0x%" PRIx64 " not arrived probe time. cfg timeout=%ds, no need probe. lastAlive=%" PRId64 " stime=%" PRId64, \ + pSql->self, tsProbeSeconds, pSql->lastAlive, pSql->stime); + return true; + } + + if (diff > tsProbeKillSeconds * 1000) { + // need kill query + tscInfo("PROBE 0x%" PRIx64 " kill query by probe. because arrived kill time. time=%ds cfg timeout=%ds lastAlive=%" PRId64 " stime=%" PRId64, \ + pSql->self, diff/1000, tsProbeKillSeconds, pSql->lastAlive, pSql->stime); + + return false; + } + + if (pSql->pPrevContext == NULL) { + // last connect info save uncompletely, so can't probe + tscInfo("PROBE 0x%" PRIx64 " save last connect info uncompletely. prev context is null", pSql->self); + return true; + } + + if(pSql->rpcRid == -1) { + // cancel or reponse ok from server, so need not probe + tscInfo("PROBE 0x%" PRIx64 " rpcRid is -1, response ok. no need probe.", pSql->self); + return true; + } + + bool ret = rpcSendProbe(pSql->rpcRid, pSql->pPrevContext, pReqOver); + if (!(*pReqOver)) + tscInfo("PROBE 0x%" PRIx64 " send probe msg, ret=%d rpcRid=0x%" PRIx64, pSql->self, ret, pSql->rpcRid); + return ret; +} + +// check have broken link queries than killed +void checkBrokenQueries(STscObj *pTscObj) { + tscDebug("PROBE checkBrokenQueries pTscObj=%p pTscObj->rid=0x%" PRIx64, pTscObj, pTscObj->rid); + SSqlObj *pSql = pTscObj->sqlList; + while (pSql) { + // avoid sqlobj may not be correctly removed from sql list + if (pSql->sqlstr == NULL) { + pSql = pSql->next; + continue; + } + + bool kill = false; + bool reqOver = false; + int32_t numOfSub = pSql->subState.numOfSub; + tscInfo("PROBE 0x%" PRIx64 " start checking sql alive, numOfSub=%d sql=%s stime=%" PRId64 " alive=%" PRId64 " rpcRid=0x%" PRIx64 \ + ,pSql->self, numOfSub, pSql->sqlstr == NULL ? "" : pSql->sqlstr, pSql->stime, pSql->lastAlive, pSql->rpcRid); + if (numOfSub == 0) { + // no sub sql + if(!sendProbeConnMsg(pSql, pSql->stime, &reqOver)) { + // need kill + tscInfo("PROBE 0x%" PRIx64 " need break link done. rpcRid=0x%" PRIx64, pSql->self, pSql->rpcRid); + kill = true; + } + + if (reqOver) { + // current request is finished over, so upate alive to now + pSql->lastAlive = taosGetTimestampMs(); + } + } else { + // lock subs + pthread_mutex_lock(&pSql->subState.mutex); + if (pSql->pSubs) { + // have sub sql + for (int i = 0; i < pSql->subState.numOfSub; i++) { + SSqlObj *pSubSql = pSql->pSubs[i]; + if(pSubSql) { + tscInfo("PROBE 0x%" PRIx64 " sub sql app is 0x%" PRIx64, pSql->self, pSubSql->self); + if(!sendProbeConnMsg(pSubSql, pSql->stime, &reqOver)) { + // need kill + tscInfo("PROBE 0x%" PRIx64 " i=%d sub app=0x%" PRIx64 " need break link done. rpcRid=0x%" PRIx64, pSql->self, i, pSubSql->self, pSubSql->rpcRid); + kill = true; + break; + } + } + + if (reqOver) { + // current request is finished over, so upate alive to now + pSubSql->lastAlive = taosGetTimestampMs(); + } + } + } + // unlock + pthread_mutex_unlock(&pSql->subState.mutex); + } + + // kill query + if(kill) { + taos_stop_query(pSql); + } + + // move next + pSql = pSql->next; + } +} + void tscProcessActivityTimer(void *handle, void *tmrId) { int64_t rid = (int64_t) handle; STscObj *pObj = taosAcquireRef(tscRefId, rid); @@ -296,6 +404,19 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { assert(pHB->self == pObj->hbrid); + // check queries already death + static int activetyCnt = 0; + if (++activetyCnt > tsProbeInterval) { // 1.5s * 40 = 60s interval call check queries alive + activetyCnt = 0; + + // call check if have query doing + if(pObj->sqlList) { + // have queries executing + checkBrokenQueries(pObj); + } + } + + // send self connetion and queries pHB->retry = 0; int32_t code = tscBuildAndSendRequest(pHB, NULL); taosReleaseRef(tscObjRef, pObj->hbrid); @@ -335,11 +456,18 @@ int tscSendMsgToServer(SSqlObj *pSql) { if ((rpcMsg.msgType == TSDB_MSG_TYPE_SUBMIT) && (tsShortcutFlag & TSDB_SHORTCUT_RB_RPC_SEND_SUBMIT)) { rpcFreeCont(rpcMsg.pCont); - return TSDB_CODE_FAILED; + return TSDB_CODE_RPC_SHORTCUT; } - rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid); - return TSDB_CODE_SUCCESS; + + if(rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid) != BOOL_FALSE) { + if(pSql->cmd.command == TSDB_SQL_SELECT) + rpcSaveSendInfo(pSql->rpcRid, &pSql->pPrevContext); + return TSDB_CODE_SUCCESS; + } + + tscError("0x%"PRIx64" rpc send data failed. msg=%s", pSql->self, taosMsg[pSql->cmd.msgType]); + return TSDB_CODE_TSC_SEND_DATA_FAILED; } // handle three situation @@ -413,10 +541,20 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { assert(pSql->self == handle); + // check msgtype + if(rpcMsg->msgType == TSDB_MSG_TYPE_PROBE_CONN_RSP) { + pSql->lastAlive = taosGetTimestampMs(); + tscInfo("PROBE 0x%" PRIx64 " recv probe msg response. rpcRid=0x%" PRIx64, pSql->self, pSql->rpcRid); + rpcFreeCont(rpcMsg->pCont); + return ; + } + STscObj *pObj = pSql->pTscObj; SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; + + pSql->rpcRid = -1; if (pObj->signature != pObj) { tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature); @@ -748,13 +886,13 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab int32_t vgId = -1; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - int32_t index = pTableMetaInfo->vgroupIndex; - assert(index >= 0); + int32_t idx = pTableMetaInfo->vgroupIndex; + assert(idx >= 0); SVgroupMsg* pVgroupInfo = NULL; if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { - assert(index < pTableMetaInfo->vgroupList->numOfVgroups); - pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; + assert(idx < pTableMetaInfo->vgroupList->numOfVgroups); + pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[idx]; } else { tscError("0x%"PRIx64" No vgroup info found", pSql->self); @@ -764,7 +902,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab vgId = pVgroupInfo->vgId; tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo); - tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, index, pTableMetaInfo->vgroupList->numOfVgroups); + tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, idx, pTableMetaInfo->vgroupList->numOfVgroups); } else { vgId = pTableMeta->vgId; @@ -786,11 +924,11 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab pQueryMsg->numOfTables = htonl(1); // set the number of tables pMsg += sizeof(STableIdInfo); } else { // it is a subquery of the super table query, this EP info is acquired from vgroupInfo - int32_t index = pTableMetaInfo->vgroupIndex; + int32_t idx = pTableMetaInfo->vgroupIndex; int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); - assert(index >= 0 && index < numOfVgroups); + assert(idx >= 0 && idx < numOfVgroups); - SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index); + SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, idx); // set the vgroup info tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo); @@ -800,7 +938,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables tscDebug("0x%"PRIx64" query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql->self, - pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups); + pTableIdList->vgInfo.vgId, numOfTables, idx, numOfVgroups); // serialize each table id info for(int32_t i = 0; i < numOfTables; ++i) { @@ -1113,7 +1251,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); if (pQueryInfo->tsBuf != NULL) { - // note: here used the index instead of actual vnode id. + // note: here used the idx instead of actual vnode id. int32_t vnodeIndex = pTableMetaInfo->vgroupIndex; code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks); if (code != TSDB_CODE_SUCCESS) { @@ -1258,10 +1396,10 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } static bool tscIsAlterCommand(char* sqlstr) { - int32_t index = 0; + int32_t idx = 0; do { - SStrToken t0 = tStrGetToken(sqlstr, &index, false); + SStrToken t0 = tStrGetToken(sqlstr, &idx, false); if (t0.type != TK_LP) { return t0.type == TK_ALTER; } @@ -2643,18 +2781,18 @@ int tscProcessShowRsp(SSqlObj *pSql) { SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; - SColumnIndex index = {0}; + SColumnIndex idx = {0}; pSchema = pMetaMsg->schema; uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) { - index.columnIndex = i; + idx.columnIndex = i; tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema); TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes); SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); - pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, + pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pCmd), pTableSchema[i].bytes, false); } @@ -3251,7 +3389,9 @@ int tscRenewTableMeta(SSqlObj *pSql) { pSql->rootObj->retryReason = pSql->retryReason; SSqlObj *rootSql = pSql->rootObj; + pthread_mutex_lock(&rootSql->mtxSubs); tscFreeSubobj(rootSql); + pthread_mutex_unlock(&rootSql->mtxSubs); tfree(rootSql->pSubs); tscResetSqlCmd(&rootSql->cmd, true, rootSql->self); @@ -3481,4 +3621,4 @@ void tscInitMsgsFp() { tscKeepConn[TSDB_SQL_SELECT] = 1; tscKeepConn[TSDB_SQL_FETCH] = 1; tscKeepConn[TSDB_SQL_HB] = 1; -} \ No newline at end of file +} diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index e44f88189fb5f7c353228bc26cabee40cf38aff7..b3d18f50114cd88e75e559323fed8c7d6f934289 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -149,7 +149,6 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* goto fail; } - strtolower(pSql->sqlstr, pSql->sqlstr); pRes->qId = 0; pRes->numOfRows = 1; pCmd->resColumnId = TSDB_RES_COL_ID; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 3fadc7abaf0aa2ae96e87cae77c6ce1a2b3df742..1805c22a9d77181fd5d5e2b3f31a25186741ac74 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -30,7 +30,7 @@ typedef struct SInsertSupporter { SSqlObj* pSql; - int32_t index; + int32_t idx; } SInsertSupporter; static void freeJoinSubqueryObj(SSqlObj* pSql); @@ -84,14 +84,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) { for (int i = 0; i < subState->numOfSub; i++) { SSqlObj* pSub = pParentSql->pSubs[i]; if (0 == subState->states[i]) { - tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished yet", pParentSql->self, pSub->self, i); + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d NOT finished yet", pParentSql->self, pSub->self, i); done = false; break; } else { if (pSub != NULL) { - tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i); + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d finished", pParentSql->self, pSub->self, i); } else { - tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i); + tscDebug("0x%"PRIx64" subquery:%p, idx: %d finished", pParentSql->self, pSub, i); } } } @@ -101,13 +101,12 @@ static bool allSubqueryDone(SSqlObj *pParentSql) { bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { SSubqueryState *subState = &pParentSql->subState; - assert(idx < subState->numOfSub); pthread_mutex_lock(&subState->mutex); - - tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx); - subState->states[idx] = 1; - + if (idx < subState->numOfSub) { + tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx); + subState->states[idx] = 1; + } bool done = allSubqueryDone(pParentSql); if (!done) { tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, pSql, idx, pParentSql->subState.numOfSub); @@ -383,7 +382,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { // todo handle failed to create sub query -SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { +SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t idx) { SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter)); if (pSupporter == NULL) { return NULL; @@ -391,7 +390,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { pSupporter->pObj = pSql->self; - pSupporter->subqueryIndex = index; + pSupporter->subqueryIndex = idx; SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval)); @@ -403,7 +402,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { pSupporter->numOfFillVal = pQueryInfo->numOfFillVal; } - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, index); + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, idx); pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid; assert (pSupporter->uid != 0); @@ -614,7 +613,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { * during the timestamp intersection. */ pSupporter->limit = pQueryInfo->limit; - SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; + SColumnIndex idx = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0); SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); @@ -626,7 +625,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS; - tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd)); + tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &idx, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd)); tscPrintSelNodeList(pNew, 0); tscFieldInfoUpdateOffset(pQueryInfo); @@ -836,8 +835,8 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; - SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + SColumnIndex idx = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; + SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &idx, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); // set the tags value for ts_comp function if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -1280,7 +1279,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // todo retry if other subqueries are not failed assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); - tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); + tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); pParentSql->res.code = numOfRows; if (quitAllSubquery(pSql, pParentSql, pSupporter)) { @@ -1336,7 +1335,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pTableMetaInfo->vgroupIndex += 1; assert(pTableMetaInfo->vgroupIndex < totalVgroups); - tscDebug("0x%"PRIx64" tid_tag from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d", + tscDebug("0x%"PRIx64" tid_tag from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d", pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num); pCmd->command = TSDB_SQL_SELECT; @@ -1447,7 +1446,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // todo retry if other subqueries are not failed yet assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); - tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); + tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); pParentSql->res.code = numOfRows; if (quitAllSubquery(pSql, pParentSql, pSupporter)){ @@ -1525,7 +1524,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pTableMetaInfo->vgroupIndex += 1; assert(pTableMetaInfo->vgroupIndex < totalVgroups); - tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64, + tscDebug("0x%"PRIx64" results from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfClauseTotal); @@ -1610,7 +1609,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR assert(numOfRows == taos_errno(pSql)); pParentSql->res.code = numOfRows; - tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows)); + tscError("0x%"PRIx64" retrieve failed, idx:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows)); tscAsyncResultOnError(pParentSql); goto _return; @@ -1670,7 +1669,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR pParentSql->res.precision = pRes1->precision; if (pRes1->row > 0 && pRes1->numOfRows > 0) { - tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); assert(pRes1->row < pRes1->numOfRows || (pRes1->row == pRes1->numOfRows && pRes1->completed)); } else { @@ -1678,7 +1677,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR pRes1->numOfClauseTotal += pRes1->numOfRows; } - tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self, + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64, pParentSql->self, pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); } } @@ -1879,7 +1878,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { } } -// all subqueries return, set the result output index +// all subqueries return, set the result output idx void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; @@ -2567,7 +2566,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo); - int32_t index = 0; + int32_t idx = 0; for(int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { @@ -2576,7 +2575,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); - SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); p->base.resColId = pExpr->base.resColId; // update the result column id } else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) { taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId); @@ -2585,7 +2584,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)}; tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name)); - SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); p->base.resColId = pExpr->base.resColId; // update the result column id } else if (pExpr->base.functionId == TSDB_FUNC_TAG) { pSup->tagLen += pExpr->base.resBytes; @@ -2598,7 +2597,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { schema = tGetTbnameColumnSchema(); } - SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd)); + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd)); if (schema->type == TSDB_DATA_TYPE_JSON){ p->base.numOfParams = pExpr->base.numOfParams; tVariantAssign(&p->base.param[0], &pExpr->base.param[0]); @@ -2616,7 +2615,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); //doLimitOutputNormalColOfGroupby - SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); p->base.numOfParams = 1; p->base.param[0].i64 = 1; p->base.param[0].nType = TSDB_DATA_TYPE_INT; @@ -2658,7 +2657,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " "numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s", pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, - tscNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); + tscNumOfExprs(pNewQueryInfo), idx+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); pSql->pSubs = calloc(1, POINTER_BYTES); if (pSql->pSubs == NULL) { @@ -3281,7 +3280,7 @@ SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSq assert(trsupport->subqueryIndex < pSql->subState.numOfSub); - // launch subquery for each vnode, so the subquery index equals to the vgroupIndex. + // launch subquery for each vnode, so the subquery idx equals to the vgroupIndex. STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index); pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex; @@ -3296,7 +3295,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { // the param may be null, since it may be done by other query threads. and the asyncOnError may enter in this // function while kill query by a user. if (param == NULL) { - assert(code != TSDB_CODE_SUCCESS); + if(code != TSDB_CODE_SUCCESS) + tscError("tscRetrieveDataRes param is NULL, error code=%d", code); return; } @@ -3391,6 +3391,10 @@ static void doFreeInsertSupporter(SSqlObj* pSqlObj) { } static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) { + if(param == NULL) { + tscError("callback multiVnodeInsertFinalize param is NULL. tres=%p numOfRows=%d", tres, numOfRows); + return ; + } SInsertSupporter *pSupporter = (SInsertSupporter *)param; SSqlObj* pParentObj = pSupporter->pSql; @@ -3411,7 +3415,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } } - if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) { + if (!subAndCheckDone(tres, pParentObj, pSupporter->idx)) { // concurrency problem, other thread already release pParentObj //tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub); return; @@ -3495,9 +3499,9 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param; - assert(pSupporter->index < pSupporter->pSql->subState.numOfSub); + assert(pSupporter->idx < pSupporter->pSql->subState.numOfSub); - STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->index); + STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->idx); int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); if ((pRes->code = code)!= TSDB_CODE_SUCCESS) { @@ -3524,7 +3528,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSub = pSql->pSubs[i]; SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter)); - pSup->index = i; + pSup->idx = i; pSup->pSql = pSql; pSub->param = pSup; @@ -3572,7 +3576,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { } pSupporter->pSql = pSql; - pSupporter->index = numOfSub; + pSupporter->idx = numOfSub; SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT); if (pNew == NULL) { @@ -3763,19 +3767,19 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) { SScalarExprSupport*pSupport = (SScalarExprSupport*) param; - int32_t index = -1; + int32_t idx = -1; SExprInfo* pExpr = NULL; for (int32_t i = 0; i < pSupport->numOfCols; ++i) { pExpr = taosArrayGetP(pSupport->exprList, i); if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) { - index = i; + idx = i; break; } } - assert(index >= 0 && index < pSupport->numOfCols); - return pSupport->data[index] + pSupport->offset * pExpr->base.resBytes; + assert(idx >= 0 && idx < pSupport->numOfCols); + return pSupport->data[idx] + pSupport->offset * pExpr->base.resBytes; } TAOS_ROW doSetResultRowData(SSqlObj *pSql) { @@ -3815,7 +3819,7 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) { j += 1; } - pRes->row++; // index increase one-step + pRes->row++; // idx increase one-step return pRes->tsrow; } @@ -3959,7 +3963,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr pthread_mutex_init(&pQInfo->lock, NULL); tsem_init(&pQInfo->ready, 0, 0); - int32_t index = 0; + int32_t idx = 0; for(int32_t i = 0; i < numOfGroups; ++i) { SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); @@ -3976,7 +3980,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr STableKeyInfo* info = taosArrayGet(pa, j); window.skey = info->lastKey; - void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); + void* buf = (char*) pQInfo->pBuf + idx * sizeof(STableQueryInfo); STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf); if (item == NULL) { goto _cleanup; @@ -3987,7 +3991,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr STableId id = {.tid = 0, .uid = 0}; taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES); - index += 1; + idx += 1; } } diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index d5369e38f0eb0a64a375d4a30fc05173c6a6aafd..944b85e996db3364181d9a2ce4132e827cc3f406 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,7 +47,7 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads -char tscLogFileName[12] = "taoslog"; +char tscLogFileName[] = "taoslog"; int tscLogFileNum = 10; static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently @@ -87,24 +87,24 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry return 0; } - SRpcInit rpcInit; - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.localPort = 0; - rpcInit.label = "TSC"; - rpcInit.numOfThreads = tscNumOfThreads; - rpcInit.cfp = tscProcessMsgFromServer; - rpcInit.sessions = tsMaxConnections; - rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.user = (char *)user; - rpcInit.idleTime = tsShellActivityTimer * 1000; - rpcInit.ckey = "key"; - rpcInit.spi = 1; - rpcInit.secret = (char *)secretEncrypt; + SRpcInit rpcInitial; + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.localPort = 0; + rpcInitial.label = "TSC"; + rpcInitial.numOfThreads = tscNumOfThreads; + rpcInitial.cfp = tscProcessMsgFromServer; + rpcInitial.sessions = tsMaxConnections; + rpcInitial.connType = TAOS_CONN_CLIENT; + rpcInitial.user = (char *)user; + rpcInitial.idleTime = tsShellActivityTimer * 1000; + rpcInitial.ckey = "key"; + rpcInitial.spi = 1; + rpcInitial.secret = (char *)secretEncrypt; SRpcObj rpcObj; memset(&rpcObj, 0, sizeof(rpcObj)); tstrncpy(rpcObj.key, key, sizeof(rpcObj.key)); - rpcObj.pDnodeConn = rpcOpen(&rpcInit); + rpcObj.pDnodeConn = rpcOpen(&rpcInitial); if (rpcObj.pDnodeConn == NULL) { pthread_mutex_unlock(&rpcObjMutex); tscError("failed to init connection to server"); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 11b51f97bd4b6b0278d6a7825a15bfe80f60649a..6c03aeefd789825ddabe1cf1a27bf1f5f28995ea 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1760,6 +1760,10 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSubobj(pSql); + if (pSql && (pSql == pSql->rootObj)) { + pthread_mutex_destroy(&pSql->mtxSubs); + } + pSql->signature = NULL; pSql->fp = NULL; tfree(pSql->sqlstr); @@ -2305,10 +2309,10 @@ void tscCloseTscObj(void *param) { } bool tscIsInsertData(char* sqlstr) { - int32_t index = 0; + int32_t idx = 0; do { - SStrToken t0 = tStrGetToken(sqlstr, &index, false); + SStrToken t0 = tStrGetToken(sqlstr, &idx, false); if (t0.type != TK_LP) { return t0.type == TK_INSERT || t0.type == TK_IMPORT; } @@ -2378,12 +2382,12 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { return taosArrayPush(pFieldInfo->internalField, &info); } -SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { +SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t idx, TAOS_FIELD* field) { pFieldInfo->numOfOutput++; struct SInternalField info = { .pExpr = NULL, .visible = true }; info.field = *field; - return taosArrayInsert(pFieldInfo->internalField, index, &info); + return taosArrayInsert(pFieldInfo->internalField, idx, &info); } void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { @@ -2398,18 +2402,18 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { } } -SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index) { - assert(index < pFieldInfo->numOfOutput); - return TARRAY_GET_ELEM(pFieldInfo->internalField, index); +SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t idx) { + assert(idx < pFieldInfo->numOfOutput); + return TARRAY_GET_ELEM(pFieldInfo->internalField, idx); } -TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { - assert(index < pFieldInfo->numOfOutput); - return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field; +TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t idx) { + assert(idx < pFieldInfo->numOfOutput); + return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, idx))->field; } -int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { - SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index); +int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t idx) { + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, idx); assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL); return pInfo->pExpr->base.offset; @@ -2635,16 +2639,16 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo return pExpr; } -SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int32_t interSize, bool isTagCol) { int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList); - if (index == num) { + if (idx == num) { return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); - taosArrayInsert(pQueryInfo->exprList, index, &pExpr); + taosArrayInsert(pQueryInfo->exprList, idx, &pExpr); return pExpr; } @@ -2656,10 +2660,10 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde return pExpr; } -SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, +SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, int16_t srcColumnIndex, int16_t type, int32_t size) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SExprInfo* pExpr = tscExprGet(pQueryInfo, index); + SExprInfo* pExpr = tscExprGet(pQueryInfo, idx); if (pExpr == NULL) { return NULL; } @@ -2676,8 +2680,8 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function return pExpr; } -bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) { - if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[index])) { +bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t idx) { + if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[idx])) { return false; } @@ -2725,8 +2729,8 @@ void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t byt assert(pExpr->numOfParams <= 3); } -SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) { - return taosArrayGetP(pQueryInfo->exprList, index); +SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t idx) { + return taosArrayGetP(pQueryInfo->exprList, idx); } /* @@ -3014,6 +3018,10 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) // single token, validate it if (len == pToken->n) { + if (taosIsKeyWordToken(pToken->z, (int32_t) pToken->n)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + return validateQuoteToken(pToken, escapeEnabled, NULL); } else { sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true); @@ -3068,6 +3076,12 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) } } + if (escapeEnabled && pToken->type == TK_ID) { + if (pToken->z[0] == TS_BACKQUOTE_CHAR) { + pToken->n = stringProcess(pToken->z, pToken->n); + firstPartQuote = true; + } + } int32_t firstPartLen = pToken->n; pToken->z = sep + 1; @@ -3291,8 +3305,8 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - int16_t index = pExpr->base.colInfo.colIndex; - pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY; + int16_t idx = pExpr->base.colInfo.colIndex; + pColInfo[i].type = (idx != -1) ? pTagSchema[idx].type : TSDB_DATA_TYPE_BINARY; } else { pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type; } @@ -3375,7 +3389,7 @@ SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd) { return pQueryInfo; } -STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) { +STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* idx) { int32_t k = -1; for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { @@ -3385,8 +3399,8 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i } } - if (index != NULL) { - *index = k; + if (idx != NULL) { + *idx = k; } assert(k != -1); @@ -3609,19 +3623,19 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) { taosArrayDestroy(&pVgroupTables); } -void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { - assert(pVgroupTable != NULL && index >= 0); +void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t idx) { + assert(pVgroupTable != NULL && idx >= 0); size_t size = taosArrayGetSize(pVgroupTable); - assert(size > index); + assert(size > idx); - SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, idx); // for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { // tfree(pInfo->vgInfo.epAddr[j].fqdn); // } taosArrayDestroy(&pInfo->itemList); - taosArrayRemove(pVgroupTable, index); + taosArrayRemove(pVgroupTable, idx); } void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { @@ -4096,15 +4110,15 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) { SSqlObj* pParentSql = ps->pParentSql; SSqlObj* pSql = tres; - int32_t index = ps->subqueryIndex; - bool ret = subAndCheckDone(pSql, pParentSql, index); + int32_t idx = ps->subqueryIndex; + bool ret = subAndCheckDone(pSql, pParentSql, idx); // TODO refactor tfree(ps); pSql->param = NULL; if (!ret) { - tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx); return; } @@ -4125,13 +4139,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { if (pSql->res.code != TSDB_CODE_SUCCESS) { SSqlObj* pParentSql = ps->pParentSql; - int32_t index = ps->subqueryIndex; - bool ret = subAndCheckDone(pSql, pParentSql, index); + int32_t idx = ps->subqueryIndex; + bool ret = subAndCheckDone(pSql, pParentSql, idx); tscFreeRetrieveSup(&pSql->param); if (!ret) { - tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); + tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx); return; } @@ -4236,7 +4250,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { tscAddIntoSqlList(pSql); } - if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + // upstream may be freed before retry + if (pQueryInfo->pUpstream && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream)); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -4950,7 +4965,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI } } - if (!pQueryInfo->stableQuery && TSDB_COL_IS_TAG(pSource->base.colInfo.flag)) { + if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && TSDB_COL_IS_TAG(pSource->base.colInfo.flag)) { pse->colInfo.flag = (pSource->base.colInfo.flag) & (~TSDB_COL_TAG); } else { pse->colInfo.flag = pSource->base.colInfo.flag; diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index 5fdb56703d819d35e60f88ec4d5f7c815be277dc..3ce519c140a2e13c48f3d0eb2f8140d979241b62 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -85,7 +85,7 @@ struct SSchema; typedef struct { int16_t type; int16_t bytes; - int16_t numOfRows; + int32_t numOfRows; char* data; } tExprOperandInfo; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 3add0b566dffb88d0a60e70c6098fd6586d3b689..e1b7cff8be8c551d0985ea293fcaff3b52f0d97b 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -218,6 +218,11 @@ extern int32_t debugFlag; extern int8_t tsClientMerge; +// probe alive connection +extern int32_t tsProbeSeconds; +extern int32_t tsProbeKillSeconds; +extern int32_t tsProbeInterval; + #ifdef TD_TSZ // lossy extern char lossyColumns[]; diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index ca5d247dd60eb0865c2a02b5d0e911185280f0fa..71702e124913e126df1dd6fbdd202003e5f117e5 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -60,40 +60,40 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight } } -typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t index); +typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t idx); -double getVectorDoubleValue_TINYINT(void *src, int32_t index) { - return (double)*((int8_t *)src + index); +double getVectorDoubleValue_TINYINT(void *src, int32_t idx) { + return (double)*((int8_t *)src + idx); } -double getVectorDoubleValue_UTINYINT(void *src, int32_t index) { - return (double)*((uint8_t *)src + index); +double getVectorDoubleValue_UTINYINT(void *src, int32_t idx) { + return (double)*((uint8_t *)src + idx); } -double getVectorDoubleValue_SMALLINT(void *src, int32_t index) { - return (double)*((int16_t *)src + index); +double getVectorDoubleValue_SMALLINT(void *src, int32_t idx) { + return (double)*((int16_t *)src + idx); } -double getVectorDoubleValue_USMALLINT(void *src, int32_t index) { - return (double)*((uint16_t *)src + index); +double getVectorDoubleValue_USMALLINT(void *src, int32_t idx) { + return (double)*((uint16_t *)src + idx); } -double getVectorDoubleValue_INT(void *src, int32_t index) { - return (double)*((int32_t *)src + index); +double getVectorDoubleValue_INT(void *src, int32_t idx) { + return (double)*((int32_t *)src + idx); } -double getVectorDoubleValue_UINT(void *src, int32_t index) { - return (double)*((uint32_t *)src + index); +double getVectorDoubleValue_UINT(void *src, int32_t idx) { + return (double)*((uint32_t *)src + idx); } -double getVectorDoubleValue_BIGINT(void *src, int32_t index) { - return (double)*((int64_t *)src + index); +double getVectorDoubleValue_BIGINT(void *src, int32_t idx) { + return (double)*((int64_t *)src + idx); } -double getVectorDoubleValue_UBIGINT(void *src, int32_t index) { - return (double)*((uint64_t *)src + index); +double getVectorDoubleValue_UBIGINT(void *src, int32_t idx) { + return (double)*((uint64_t *)src + idx); } -double getVectorDoubleValue_FLOAT(void *src, int32_t index) { - return (double)*((float *)src + index); +double getVectorDoubleValue_FLOAT(void *src, int32_t idx) { + return (double)*((float *)src + idx); } -double getVectorDoubleValue_DOUBLE(void *src, int32_t index) { - return (double)*((double *)src + index); +double getVectorDoubleValue_DOUBLE(void *src, int32_t idx) { + return (double)*((double *)src + idx); } -int64_t getVectorTimestampValue(void *src, int32_t index) { - return (int64_t)*((int64_t *)src + index); +int64_t getVectorTimestampValue(void *src, int32_t idx) { + return (int64_t)*((int64_t *)src + idx); } _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { _arithmetic_getVectorDoubleValue_fn_t p = NULL; @@ -124,40 +124,40 @@ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { } -typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t index); +typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t idx); -void* getVectorValueAddr_BOOL(void *src, int32_t index) { - return (void*)((bool *)src + index); +void* getVectorValueAddr_BOOL(void *src, int32_t idx) { + return (void*)((bool *)src + idx); } -void* getVectorValueAddr_TINYINT(void *src, int32_t index) { - return (void*)((int8_t *)src + index); +void* getVectorValueAddr_TINYINT(void *src, int32_t idx) { + return (void*)((int8_t *)src + idx); } -void* getVectorValueAddr_UTINYINT(void *src, int32_t index) { - return (void*)((uint8_t *)src + index); +void* getVectorValueAddr_UTINYINT(void *src, int32_t idx) { + return (void*)((uint8_t *)src + idx); } -void* getVectorValueAddr_SMALLINT(void *src, int32_t index) { - return (void*)((int16_t *)src + index); +void* getVectorValueAddr_SMALLINT(void *src, int32_t idx) { + return (void*)((int16_t *)src + idx); } -void* getVectorValueAddr_USMALLINT(void *src, int32_t index) { - return (void*)((uint16_t *)src + index); +void* getVectorValueAddr_USMALLINT(void *src, int32_t idx) { + return (void*)((uint16_t *)src + idx); } -void* getVectorValueAddr_INT(void *src, int32_t index) { - return (void*)((int32_t *)src + index); +void* getVectorValueAddr_INT(void *src, int32_t idx) { + return (void*)((int32_t *)src + idx); } -void* getVectorValueAddr_UINT(void *src, int32_t index) { - return (void*)((uint32_t *)src + index); +void* getVectorValueAddr_UINT(void *src, int32_t idx) { + return (void*)((uint32_t *)src + idx); } -void* getVectorValueAddr_BIGINT(void *src, int32_t index) { - return (void*)((int64_t *)src + index); +void* getVectorValueAddr_BIGINT(void *src, int32_t idx) { + return (void*)((int64_t *)src + idx); } -void* getVectorValueAddr_UBIGINT(void *src, int32_t index) { - return (void*)((uint64_t *)src + index); +void* getVectorValueAddr_UBIGINT(void *src, int32_t idx) { + return (void*)((uint64_t *)src + idx); } -void* getVectorValueAddr_FLOAT(void *src, int32_t index) { - return (void*)((float *)src + index); +void* getVectorValueAddr_FLOAT(void *src, int32_t idx) { + return (void*)((float *)src + idx); } -void* getVectorValueAddr_DOUBLE(void *src, int32_t index) { - return (void*)((double *)src + index); +void* getVectorValueAddr_DOUBLE(void *src, int32_t idx) { + return (void*)((double *)src + idx); } _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) { @@ -474,34 +474,34 @@ void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, } } -typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t index); +typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t idx); -int64_t getVectorBigintValue_BOOL(void *src, int32_t index) { - return (int64_t)*((bool *)src + index); +int64_t getVectorBigintValue_BOOL(void *src, int32_t idx) { + return (int64_t)*((bool *)src + idx); } -int64_t getVectorBigintValue_TINYINT(void *src, int32_t index) { - return (int64_t)*((int8_t *)src + index); +int64_t getVectorBigintValue_TINYINT(void *src, int32_t idx) { + return (int64_t)*((int8_t *)src + idx); } -int64_t getVectorBigintValue_UTINYINT(void *src, int32_t index) { - return (int64_t)*((uint8_t *)src + index); +int64_t getVectorBigintValue_UTINYINT(void *src, int32_t idx) { + return (int64_t)*((uint8_t *)src + idx); } -int64_t getVectorBigintValue_SMALLINT(void *src, int32_t index) { - return (int64_t)*((int16_t *)src + index); +int64_t getVectorBigintValue_SMALLINT(void *src, int32_t idx) { + return (int64_t)*((int16_t *)src + idx); } -int64_t getVectorBigintValue_USMALLINT(void *src, int32_t index) { - return (int64_t)*((uint16_t *)src + index); +int64_t getVectorBigintValue_USMALLINT(void *src, int32_t idx) { + return (int64_t)*((uint16_t *)src + idx); } -int64_t getVectorBigintValue_INT(void *src, int32_t index) { - return (int64_t)*((int32_t *)src + index); +int64_t getVectorBigintValue_INT(void *src, int32_t idx) { + return (int64_t)*((int32_t *)src + idx); } -int64_t getVectorBigintValue_UINT(void *src, int32_t index) { - return (int64_t)*((uint32_t *)src + index); +int64_t getVectorBigintValue_UINT(void *src, int32_t idx) { + return (int64_t)*((uint32_t *)src + idx); } -int64_t getVectorBigintValue_BIGINT(void *src, int32_t index) { - return (int64_t)*((int64_t *)src + index); +int64_t getVectorBigintValue_BIGINT(void *src, int32_t idx) { + return (int64_t)*((int64_t *)src + idx); } -int64_t getVectorBigintValue_UBIGINT(void *src, int32_t index) { - return (int64_t)*((uint64_t *)src + index); +int64_t getVectorBigintValue_UBIGINT(void *src, int32_t idx) { + return (int64_t)*((uint64_t *)src + idx); } _arithmetic_getVectorBigintValue_fn_t getVectorBigintValueFn(int32_t srcType) { diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 55afd4c62096974e379e78d448086f10e9860764..4786700f97ab488e33df81810f3f061b5bb67111 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -304,14 +304,14 @@ bool isNEleNull(SDataCol *pCol, int nEle) { return true; } -static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { +static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int idx) { if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->dataOff[index] = pCol->len; + pCol->dataOff[idx] = pCol->len; char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); setVardataNull(ptr, pCol->type); pCol->len += varDataTLen(ptr); } else { - setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes); + setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * idx), pCol->type, pCol->bytes); pCol->len += TYPE_BYTES[pCol->type]; } } diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index 7198c3d4642b55f83a6662da32d074b543c9bca6..1acfd3c4f9b87041abb888b878306743230e17e2 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -668,55 +668,56 @@ void exprTreeExprNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandI pl = ltmp; pt = transl; + int32_t i; switch (left.type) { case TSDB_DATA_TYPE_TINYINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int8_t *) pl + i)); } break; case TSDB_DATA_TYPE_SMALLINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int16_t *) pl + i)); } break; case TSDB_DATA_TYPE_INT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int32_t *) pl + i)); } break; case TSDB_DATA_TYPE_BIGINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int64_t *) pl + i)); } break; case TSDB_DATA_TYPE_UTINYINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint8_t *) pl + i)); } break; case TSDB_DATA_TYPE_USMALLINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint16_t *) pl + i)); } break; case TSDB_DATA_TYPE_UINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint32_t *) pl + i)); } break; case TSDB_DATA_TYPE_UBIGINT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint64_t *) pl + i)); } break; case TSDB_DATA_TYPE_FLOAT: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((float *) pl + i)); } break; case TSDB_DATA_TYPE_DOUBLE: - for (int16_t i = 0; i < left.numOfRows; i++) { + for (i = 0; i < left.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((double *) pl + i)); } break; @@ -766,55 +767,56 @@ void exprTreeExprNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandI pr = rtmp; pt = transr; + int32_t i; switch (right.type) { case TSDB_DATA_TYPE_TINYINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int8_t *) pr + i)); } break; case TSDB_DATA_TYPE_SMALLINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int16_t *) pr + i)); } break; case TSDB_DATA_TYPE_INT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int32_t *) pr + i)); } break; case TSDB_DATA_TYPE_BIGINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((int64_t *) pr + i)); } break; case TSDB_DATA_TYPE_UTINYINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint8_t *) pr + i)); } break; case TSDB_DATA_TYPE_USMALLINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint16_t *) pr + i)); } break; case TSDB_DATA_TYPE_UINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint32_t *) pr + i)); } break; case TSDB_DATA_TYPE_UBIGINT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((uint64_t *) pr + i)); } break; case TSDB_DATA_TYPE_FLOAT: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((float *) pr + i)); } break; case TSDB_DATA_TYPE_DOUBLE: - for (int16_t i = 0; i < right.numOfRows; i++) { + for (i = 0; i < right.numOfRows; i++) { *((int64_t *) pt + i) = (int64_t)(*((double *) pr + i)); } break; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 616c5fba890a03a2f59a6bee6ae7d2f79ee52760..77540cd0b61ed54c461fa44984abd20d507a74ab 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -268,6 +268,12 @@ int32_t fsDebugFlag = 135; int8_t tsClientMerge = 0; +// probe alive connection +int32_t tsProbeSeconds = 5 * 60; // start probe link alive after tsProbeSeconds from starting query +int32_t tsProbeKillSeconds = 10 * 60; // start kill query after tsProbeKillSeconds from last alive time +int32_t tsProbeInterval = 40; // 40 * 1.5s = 60 s interval time + + #ifdef TD_TSZ // // lossy compress 6 @@ -397,10 +403,10 @@ bool taosCfgDynamicOptions(char *msg) { return false; } -void taosAddDataDir(int index, char *v1, int level, int primary) { - tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); - tsDiskCfg[index].level = level; - tsDiskCfg[index].primary = primary; +void taosAddDataDir(int idx, char *v1, int level, int primary) { + tstrncpy(tsDiskCfg[idx].dir, v1, TSDB_FILENAME_LEN); + tsDiskCfg[idx].level = level; + tsDiskCfg[idx].primary = primary; uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary); } @@ -627,7 +633,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "tcpConnTimout"; + cfg.option = "tcpConnTimeout"; cfg.ptr = &tsTcpConnTimeout; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; @@ -1733,6 +1739,39 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // probeSeconds + cfg.option = "probeSeconds"; + cfg.ptr = &tsProbeSeconds; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 100000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + + // probeKillSeconds + cfg.option = "probeKillSeconds"; + cfg.ptr = &tsProbeKillSeconds; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 100000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + + // probeInterval + cfg.option = "probeInterval"; + cfg.ptr = &tsProbeInterval; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 100000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 521dcffa6c018a00e7485d15ee9e5799054bdba5..7653df3879db6596accad6fd46f8734110960c2f 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -442,29 +442,29 @@ void tNameAssign(SName* dst, const SName* src) { memcpy(dst, src, sizeof(SName)); } -int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) { - assert(dst != NULL && dbToken != NULL && acct != NULL); +int32_t tNameSetDbName(SName* dst, const char* accnt, SStrToken* dbToken) { + assert(dst != NULL && dbToken != NULL && accnt != NULL); // too long account id or too long db name - if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) { + if (strlen(accnt) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) { return -1; } dst->type = TSDB_DB_NAME_T; - tstrncpy(dst->acctId, acct, tListLen(dst->acctId)); + tstrncpy(dst->acctId, accnt, tListLen(dst->acctId)); tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1); return 0; } -int32_t tNameSetAcctId(SName* dst, const char* acct) { - assert(dst != NULL && acct != NULL); +int32_t tNameSetAcctId(SName* dst, const char* accnt) { + assert(dst != NULL && accnt != NULL); // too long account id or too long db name - if (strlen(acct) >= tListLen(dst->acctId)) { + if (strlen(accnt) >= tListLen(dst->acctId)) { return -1; } - tstrncpy(dst->acctId, acct, tListLen(dst->acctId)); + tstrncpy(dst->acctId, accnt, tListLen(dst->acctId)); assert(strlen(dst->acctId) > 0); diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 81bc9c7275b07cf41dc1305e4db807e1b2b839a0..fa4126350c2e809459c830c7f5cd08e2bd851ac7 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -259,8 +259,8 @@ static void getStatics_u64(const void *pData, int32_t numOfRow, int64_t *min, in static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { float *data = (float *)pData; - float fmin = FLT_MAX; - float fmax = -FLT_MAX; + float flmin = FLT_MAX; + float flmax = -FLT_MAX; double dsum = 0; *minIndex = 0; *maxIndex = 0; @@ -276,20 +276,20 @@ static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int6 float fv = GET_FLOAT_VAL((const char*)&(data[i])); dsum += fv; - if (fmin > fv) { - fmin = fv; + if (flmin > fv) { + flmin = fv; *minIndex = i; } - if (fmax < fv) { - fmax = fv; + if (flmax < fv) { + flmax = fv; *maxIndex = i; } } SET_DOUBLE_VAL(sum, dsum); - SET_DOUBLE_VAL(max, fmax); - SET_DOUBLE_VAL(min, fmin); + SET_DOUBLE_VAL(max, flmax); + SET_DOUBLE_VAL(min, flmin); } static void getStatics_d(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, diff --git a/src/dnode/inc/dnodeVRead.h b/src/dnode/inc/dnodeVRead.h index 9c88886f88bedaa63a5071b9dd2d773d4ff1cc0c..768075da9128d3bafbfc4035f929ec589c83f01c 100644 --- a/src/dnode/inc/dnodeVRead.h +++ b/src/dnode/inc/dnodeVRead.h @@ -28,6 +28,8 @@ void * dnodeAllocVQueryQueue(void *pVnode); void * dnodeAllocVFetchQueue(void *pVnode); void dnodeFreeVQueryQueue(void *pQqueue); void dnodeFreeVFetchQueue(void *pFqueue); +// reponse probe connection msg +void dnodeResponseProbeMsg(SRpcMsg *pMsg); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index 87baff30673afc68eb23a00bef279433a422ba67..f0218fdba9b531800ab5a6791ee700e0a36e5c9b 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -229,12 +229,12 @@ static void dnodeAllocCheckItem() { } void dnodeCleanupCheck() { - for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { - if (tsCheckItem[index].enable && tsCheckItem[index].stopFp) { - (*tsCheckItem[index].stopFp)(); + for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) { + if (tsCheckItem[idx].enable && tsCheckItem[idx].stopFp) { + (*tsCheckItem[idx].stopFp)(); } - if (tsCheckItem[index].cleanUpFp) { - (*tsCheckItem[index].cleanUpFp)(); + if (tsCheckItem[idx].cleanUpFp) { + (*tsCheckItem[idx].cleanUpFp)(); } } } @@ -242,19 +242,19 @@ void dnodeCleanupCheck() { int32_t dnodeInitCheck() { dnodeAllocCheckItem(); - for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { - if (tsCheckItem[index].initFp) { - if ((*tsCheckItem[index].initFp)() != 0) { - dError("failed to init check item:%s", tsCheckItem[index].name); + for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) { + if (tsCheckItem[idx].initFp) { + if ((*tsCheckItem[idx].initFp)() != 0) { + dError("failed to init check item:%s", tsCheckItem[idx].name); return -1; } } } - for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { - if (tsCheckItem[index].enable && tsCheckItem[index].startFp) { - if ((*tsCheckItem[index].startFp)() != 0) { - dError("failed to check item:%s", tsCheckItem[index].name); + for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) { + if (tsCheckItem[idx].enable && tsCheckItem[idx].startFp) { + if ((*tsCheckItem[idx].startFp)() != 0) { + dError("failed to check item:%s", tsCheckItem[idx].name); exit(-1); } } diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index 08269c0bf6141974366936660bee326682cd90f5..cc1b1c98aa6c97032b4ce6aa198088353c48374f 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -56,17 +56,17 @@ int32_t dnodeInitServer() { dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue; - SRpcInit rpcInit; - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.localPort = tsDnodeDnodePort; - rpcInit.label = "DND-S"; - rpcInit.numOfThreads = 1; - rpcInit.cfp = dnodeProcessReqMsgFromDnode; - rpcInit.sessions = TSDB_MAX_VNODES << 4; - rpcInit.connType = TAOS_CONN_SERVER; - rpcInit.idleTime = tsShellActivityTimer * 1000; - - tsServerRpc = rpcOpen(&rpcInit); + SRpcInit rpcInitial; + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.localPort = tsDnodeDnodePort; + rpcInitial.label = "DND-S"; + rpcInitial.numOfThreads = 1; + rpcInitial.cfp = dnodeProcessReqMsgFromDnode; + rpcInitial.sessions = TSDB_MAX_VNODES << 4; + rpcInitial.connType = TAOS_CONN_SERVER; + rpcInitial.idleTime = tsShellActivityTimer * 1000; + + tsServerRpc = rpcOpen(&rpcInitial); if (tsServerRpc == NULL) { dError("failed to init inter-dnodes RPC server"); return -1; @@ -123,19 +123,19 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { int32_t dnodeInitClient() { char secret[TSDB_KEY_LEN] = "secret"; - SRpcInit rpcInit; - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.label = "DND-C"; - rpcInit.numOfThreads = 1; - rpcInit.cfp = dnodeProcessRspFromDnode; - rpcInit.sessions = TSDB_MAX_VNODES << 4; - rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.idleTime = tsShellActivityTimer * 1000; - rpcInit.user = "t"; - rpcInit.ckey = "key"; - rpcInit.secret = secret; - - tsClientRpc = rpcOpen(&rpcInit); + SRpcInit rpcInitial; + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.label = "DND-C"; + rpcInitial.numOfThreads = 1; + rpcInitial.cfp = dnodeProcessRspFromDnode; + rpcInitial.sessions = TSDB_MAX_VNODES << 4; + rpcInitial.connType = TAOS_CONN_CLIENT; + rpcInitial.idleTime = tsShellActivityTimer * 1000; + rpcInitial.user = "t"; + rpcInitial.ckey = "key"; + rpcInitial.secret = secret; + + tsClientRpc = rpcOpen(&rpcInitial); if (tsClientRpc == NULL) { dError("failed to init mnode rpc client"); return -1; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 7676343b37d242c1d174a31959ea4be25a9d5af2..af1afc9766af70b180b06cb56bb35d90de40d2eb 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -77,24 +77,25 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_PROBE_CONN] = dnodeResponseProbeMsg; int32_t numOfThreads = (int32_t)((tsNumOfCores * tsNumOfThreadsPerCore) / 2.0); if (numOfThreads < 1) { numOfThreads = 1; } - SRpcInit rpcInit; - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.localPort = tsDnodeShellPort; - rpcInit.label = "SHELL"; - rpcInit.numOfThreads = numOfThreads; - rpcInit.cfp = dnodeProcessMsgFromShell; - rpcInit.sessions = tsMaxShellConns; - rpcInit.connType = TAOS_CONN_SERVER; - rpcInit.idleTime = tsShellActivityTimer * 1000; - rpcInit.afp = dnodeRetrieveUserAuthInfo; - - tsShellRpc = rpcOpen(&rpcInit); + SRpcInit rpcInitial; + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.localPort = tsDnodeShellPort; + rpcInitial.label = "SHELL"; + rpcInitial.numOfThreads = numOfThreads; + rpcInitial.cfp = dnodeProcessMsgFromShell; + rpcInitial.sessions = tsMaxShellConns; + rpcInitial.connType = TAOS_CONN_SERVER; + rpcInitial.idleTime = tsShellActivityTimer * 1000; + rpcInitial.afp = dnodeRetrieveUserAuthInfo; + + tsShellRpc = rpcOpen(&rpcInitial); if (tsShellRpc == NULL) { dError("failed to init shell rpc server"); return -1; @@ -258,10 +259,10 @@ SDnodeStatisInfo dnodeGetStatisInfo() { return info; } -int32_t dnodeGetHttpStatusInfo(int32_t index) { +int32_t dnodeGetHttpStatusInfo(int32_t idx) { int32_t httpStatus = 0; #ifdef HTTP_EMBEDDED - httpStatus = httpGetStatusCodeCount(index); + httpStatus = httpGetStatusCodeCount(idx); #endif return httpStatus; } diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index c404ab1a55c3788f5756c99f7914764e6e9af295..0b0bf29e504e1779ebe7921d8d374b1b31729b7a 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -152,3 +152,13 @@ static void *dnodeProcessReadQueue(void *wparam) { return NULL; } + +// reponse probe connection msg +void dnodeResponseProbeMsg(SRpcMsg *pMsg) { + // check probe conn msg + if(pMsg->msgType == TSDB_MSG_TYPE_PROBE_CONN) { + SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = 0, .msgType = TSDB_MSG_TYPE_PROBE_CONN_RSP}; + rpcSendResponse(&rpcRsp); + return ; + } +} \ No newline at end of file diff --git a/src/inc/taos.h b/src/inc/taos.h index 5cb0420fe2e8a19d07bc08ebc89fccfc2af968d6..44d83969a8090fc1e98fcec08065dd1834cf3f75 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -48,6 +48,12 @@ typedef void **TAOS_ROW; #define TSDB_DATA_TYPE_UBIGINT 14 // 8 bytes #define TSDB_DATA_TYPE_JSON 15 // json string +typedef enum { + BOOL_FALSE = 0, + BOOL_TRUE = 1, + BOOL_ASYNC = 2 //request is processed by async for another thread, not now true or false +} TBOOL; + typedef enum { TSDB_OPTION_LOCALE, TSDB_OPTION_CHARSET, diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 6266926d55f2e7057732aa9fc882791479b19098..fc387d331b07fcf407ade3305d1874ef2b5c8c31 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -60,6 +60,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready" #define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN" #define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version" +#define TSDB_CODE_RPC_SHORTCUT TAOS_DEF_ERROR_CODE(0, 0x0017) //"Shortcut" //common & util #define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported" @@ -116,6 +117,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type") #define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output") #define TSDB_CODE_TSC_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0228) //"invalid table schema version") +#define TSDB_CODE_TSC_TOO_MANY_SML_LINES TAOS_DEF_ERROR_CODE(0, 0x0229) //"too many lines in batch") +#define TSDB_CODE_TSC_SEND_DATA_FAILED TAOS_DEF_ERROR_CODE(0, 0x0230) //"Client send request data error" // mnode #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed" diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 1a8907efabc483233f7e12ce2511bfec339a8d6f..280747afed543eaddf2ed657c6067f4aad31a321 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -123,6 +123,9 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TP, "alter-tp" ) // delete TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DELDATA, "delete-data" ) +// syn -> ack probe connection msg +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_PROBE_CONN, "probe-connection-alive" ) + #ifndef TAOS_MESSAGE_C TSDB_MSG_TYPE_MAX // 105 diff --git a/src/inc/trpc.h b/src/inc/trpc.h index 5e3fbd571ef4b6425f2e5a58c308c8fc9da0b12e..fe061eb4f20fa550299316c028d99cdd9d6e8bd7 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -85,7 +85,7 @@ void rpcClose(void *); void *rpcMallocCont(int contLen); void rpcFreeCont(void *pCont); void *rpcReallocCont(void *ptr, int contLen); -void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); +TBOOL rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); void rpcSendResponse(const SRpcMsg *pMsg); void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet); int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); @@ -93,6 +93,10 @@ void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp int rpcReportProgress(void *pConn, char *pCont, int contLen); void rpcCancelRequest(int64_t rid); int32_t rpcUnusedSession(void * rpcInfo, bool bLock); +// send rpc Refid connection probe alive message +bool rpcSendProbe(int64_t rpcRid, void* pPrevContext, bool *pReqOver); +// after sql request send , save conn info +bool rpcSaveSendInfo(int64_t rpcRid, void** ppContext); #ifdef __cplusplus } diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index b311361c438d033ad3f7582d30df7d1c33357c1d..614fa328bae1f0ad32917da6b08dc372a8d696a2 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -46,6 +46,8 @@ ELSEIF (TD_DARWIN) LIST(APPEND SRC ./src/shellCommand.c) LIST(APPEND SRC ./src/shellImport.c) LIST(APPEND SRC ./src/shellCheck.c) + LIST(APPEND SRC ./src/shellAuto.c) + LIST(APPEND SRC ./src/tire.c) ADD_EXECUTABLE(shell ${SRC}) # linking with dylib TARGET_LINK_LIBRARIES(shell taos cJson) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 1e2136a8abefc44daa29e10d48af7f95a350bda1..5c48728d72793567c138fd6040aadbe13cac74d0 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -48,13 +48,11 @@ typedef struct SShellArguments { char* database; char* timezone; bool restful; - char* token; #ifdef WINDOWS SOCKET socket; #else int socket; #endif - TAOS* con; bool is_raw_time; bool is_use_passwd; @@ -70,6 +68,11 @@ typedef struct SShellArguments { int pktNum; char* pktType; char* netTestRole; + char* cloudDsn; + bool cloud; + char* cloudHost; + char* cloudPort; + char* cloudToken; } SShellArguments; typedef enum WS_ACTION_TYPE_S { WS_CONN, WS_QUERY, WS_FETCH, WS_FETCH_BLOCK } WS_ACTION_TYPE; @@ -91,7 +94,6 @@ void shellCheck(TAOS* con, SShellArguments* args); void get_history_path(char* history); void shellCheck(TAOS* con, SShellArguments* args); void cleanup_handler(void* arg); -char *last_strstr(const char *haystack, const char *needle); void exitShell(); int shellDumpResult(TAOS_RES* con, char* fname, int* error_no, bool printMode); void shellGetGrantInfo(void* con); @@ -99,7 +101,8 @@ int isCommentLine(char* line); int wsclient_handshake(); int wsclient_conn(); void wsclient_query(char* command); -int tcpConnect(); +int tcpConnect(char* host, int port); +int parse_cloud_dsn(); /**************** Global variable declarations ****************/ extern char PROMPT_HEADER[]; diff --git a/src/kit/shell/inc/shellAuto.h b/src/kit/shell/inc/shellAuto.h new file mode 100644 index 0000000000000000000000000000000000000000..0bd6bdf4038c112b453feea02950cc3aa5577a50 --- /dev/null +++ b/src/kit/shell/inc/shellAuto.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __SHELL_AUTO__ +#define __SHELL_AUTO__ + +#define TAB_KEY 0x09 + +// press tab key +void pressTabKey(TAOS * con, Command * cmd); + +// press othr key +void pressOtherKey(char c); + +// init shell auto funciton , shell start call once +bool shellAutoInit(); + +// exit shell auto funciton, shell exit call once +void shellAutoExit(); + +// callback autotab module +void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb); + + +#endif diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h index 6e4d3e382e3d7e8c50405c07da8ed73725230434..47ef6b30a9b37ef0d790bcfc427abdfc14907874 100644 --- a/src/kit/shell/inc/shellCommand.h +++ b/src/kit/shell/inc/shellCommand.h @@ -41,6 +41,7 @@ extern void deleteChar(Command *cmd); extern void moveCursorLeft(Command *cmd); extern void moveCursorRight(Command *cmd); extern void positionCursorHome(Command *cmd); +extern void positionCursorMiddle(Command *cmd); extern void positionCursorEnd(Command *cmd); extern void showOnScreen(Command *cmd); extern void updateBuffer(Command *cmd); @@ -51,5 +52,6 @@ int countPrefixOnes(unsigned char c); void clearScreen(int ecmd_pos, int cursor_pos); void printChar(char c, int times); void positionCursor(int step, int direction); +void getPrevCharSize(const char *str, int pos, int *size, int *width); #endif diff --git a/src/kit/shell/inc/tire.h b/src/kit/shell/inc/tire.h new file mode 100644 index 0000000000000000000000000000000000000000..88bae5480937cfdb1513415d13ba41d0a60e6b22 --- /dev/null +++ b/src/kit/shell/inc/tire.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __TRIE__ +#define __TRIE__ + +// +// The prefix search tree is a efficient storage words and search words tree, it support 95 visible ascii code character +// +#define FIRST_ASCII 40 // first visiable char is '0' +#define LAST_ASCII 122 // last visilbe char is 'z' + +// capacity save char is 95 +#define CHAR_CNT (LAST_ASCII - FIRST_ASCII + 1) +#define MAX_WORD_LEN 256 // max insert word length + +// define STire +#define TIRE_TREE 0 +#define TIRE_LIST 1 + +typedef struct STireNode { + struct STireNode** d; + bool end; // record end flag +}STireNode; + +typedef struct StrName { + char * name; + struct StrName * next; +}StrName; + + +typedef struct STire { + char type; // see define TIRE_ + STireNode root; + + StrName * head; + StrName * tail; + + int count; // all count + int ref; +}STire; + +typedef struct SMatchNode { + char* word; + struct SMatchNode* next; +}SMatchNode; + + +typedef struct SMatch { + SMatchNode* head; + SMatchNode* tail; // append node to tail + int count; + char pre[MAX_WORD_LEN]; +}SMatch; + + +// ----------- interface ------------- + +// create prefix search tree, return value call freeTire to free +STire* createTire(char type); + +// destroy prefix search tree +void freeTire(STire* tire); + +// add a new word +bool insertWord(STire* tire, char* word); + +// add a new word +bool deleteWord(STire* tire, char* word); + +// match prefix words, if match is not NULL , put all item to match and return match +SMatch* matchPrefix(STire* tire, char* prefix, SMatch* match); + +// get all items from tires tree +SMatch* enumAll(STire* tire); + +// free match result +void freeMatch(SMatch* match); + +#endif diff --git a/src/kit/shell/src/shellAuto.c b/src/kit/shell/src/shellAuto.c new file mode 100644 index 0000000000000000000000000000000000000000..8622b201a6fe6666476f0ac9916aebc169b78923 --- /dev/null +++ b/src/kit/shell/src/shellAuto.c @@ -0,0 +1,1761 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define __USE_XOPEN +#include "os.h" +#include "tglobal.h" +#include "shell.h" +#include "shellCommand.h" +#include "tkey.h" +#include "tulog.h" +#include "shellAuto.h" +#include "tire.h" +#include "tthread.h" + +// +// ------------- define area --------------- +// +#define UNION_ALL " union all " + + +// extern function +void insertChar(Command *cmd, char *c, int size); + + +typedef struct SAutoPtr { + STire* p; + int ref; +}SAutoPtr; + +typedef struct SWord{ + int type ; // word type , see WT_ define + char * word; + int32_t len; + struct SWord * next; + bool free; // if true need free +}SWord; + +typedef struct { + char * source; + int32_t source_len; // valid data length in source + int32_t count; + SWord* head; + // matched information + int32_t matchIndex; // matched word index in words + int32_t matchLen; // matched length at matched word +}SWords; + + +SWords shellCommands[] = { + {"alter database ", 0, 0, NULL}, + {"alter dnode balance ", 0, 0, NULL}, + {"alter dnode resetlog;", 0, 0, NULL}, + {"alter dnode debugFlag 141;", 0, 0, NULL}, + {"alter dnode monitor 1;", 0, 0, NULL}, + {"alter table ", 0, 0, NULL}, + {"alter table modify column", 0, 0, NULL}, + {"alter local resetlog;", 0, 0, NULL}, + {"alter local DebugFlag 143;", 0, 0, NULL}, + {"alter local cDebugFlag 143;", 0, 0, NULL}, + {"alter local uDebugFlag 143;", 0, 0, NULL}, + {"alter local rpcDebugFlag 143;", 0, 0, NULL}, + {"alter local tmrDebugFlag 143;", 0, 0, NULL}, + {"alter topic", 0, 0, NULL}, + {"alter user pass", 0, 0, NULL}, + {"alter user privilege read", 0, 0, NULL}, + {"alter user privilege write", 0, 0, NULL}, + {"create table using tags(", 0, 0, NULL}, + {"create database ", 0, 0, NULL}, + {"create table as ", 0, 0, NULL}, + {"create dnode ", 0, 0, NULL}, + {"create topic", 0, 0, NULL}, + {"create function ", 0, 0, NULL}, + {"create user pass", 0, 0, NULL}, + {"compact vnode in", 0, 0, NULL}, + {"describe ", 0, 0, NULL}, +#ifdef TD_ENTERPRISE + {"delete from where", 0, 0, NULL}, +#endif + {"drop database ", 0, 0, NULL}, + {"drop table ", 0, 0, NULL}, + {"drop dnode ", 0, 0, NULL}, + {"drop user ;", 0, 0, NULL}, + {"drop function", 0, 0, NULL}, + {"drop topic", 0, 0, NULL}, + {"kill connection", 0, 0, NULL}, + {"kill query", 0, 0, NULL}, + {"kill stream", 0, 0, NULL}, + {"select * from where ", 0, 0, NULL}, + {"select _block_dist() from \\G;", 0, 0, NULL}, + {"select client_version();", 0, 0, NULL}, + {"select current_user();", 0, 0, NULL}, + {"select database;", 0, 0, NULL}, + {"select server_version();", 0, 0, NULL}, + {"set max_binary_display_width ", 0, 0, NULL}, + {"show create database \\G;", 0, 0, NULL}, + {"show create stable \\G;", 0, 0, NULL}, + {"show create table \\G;", 0, 0, NULL}, + {"show connections;", 0, 0, NULL}, + {"show databases;", 0, 0, NULL}, + {"show dnodes;", 0, 0, NULL}, + {"show functions;", 0, 0, NULL}, + {"show modules;", 0, 0, NULL}, + {"show mnodes;", 0, 0, NULL}, + {"show queries;", 0, 0, NULL}, + {"show stables;", 0, 0, NULL}, + {"show stables like ", 0, 0, NULL}, + {"show streams;", 0, 0, NULL}, + {"show scores;", 0, 0, NULL}, + {"show tables;", 0, 0, NULL}, + {"show tables like", 0, 0, NULL}, + {"show users;", 0, 0, NULL}, + {"show variables;", 0, 0, NULL}, + {"show vgroups;", 0, 0, NULL}, + {"insert into values(", 0, 0, NULL}, + {"insert into using tags(", 0, 0, NULL}, + {"use ", 0, 0, NULL}, + {"quit", 0, 0, NULL} +}; + +char * keywords[] = { + "and ", + "asc ", + "desc ", + "from ", + "fill(", + "limit ", + "where ", + "interval(", + "order by ", + "order by ", + "offset ", + "or ", + "group by ", + "now()", + "session(", + "sliding ", + "slimit ", + "soffset ", + "state_window(", + "today() ", + "union all select ", +}; + +char * functions[] = { + "count(", + "sum(", + "avg(", + "last(", + "last_row(", + "top(", + "interp(", + "max(", + "min(", + "now()", + "today()", + "percentile(", + "tail(", + "pow(", + "abs(", + "atan(", + "acos(", + "asin(", + "apercentile(", + "bottom(", + "cast(", + "ceil(", + "char_length(", + "cos(", + "concat(", + "concat_ws(", + "csum(", + "diff(", + "derivative(", + "elapsed(", + "first(", + "floor(", + "hyperloglog(", + "histogram(", + "irate(", + "leastsquares(", + "length(", + "log(", + "lower(", + "ltrim(", + "mavg(", + "mode(", + "tan(", + "round(", + "rtrim(", + "sample(", + "sin(", + "spread(", + "substr(", + "statecount(", + "stateduration(", + "stddev(", + "sqrt(", + "timediff(", + "timezone(", + "timetruncate(", + "twa(", + "to_unixtimestamp(", + "unique(", + "upper(", +}; + +char * tb_actions[] = { + "add column", + "modify column", + "drop column", + "change tag", +}; + +char * db_options[] = { + "blocks", + "cachelast", + "comp", + "keep", + "replica", + "quorum", +}; + +char * data_types[] = { + "timestamp", + "int", + "float", + "double", + "binary(16)", + "nchar(16)", + "bigint", + "smallint", + "tinyint", + "bool", + "json" +}; + +char * key_tags[] = { + "tags(" +}; + + +// +// ------- gobal variant define --------- +// +int32_t firstMatchIndex = -1; // first match shellCommands index +int32_t lastMatchIndex = -1; // last match shellCommands index +int32_t curMatchIndex = -1; // current match shellCommands index +int32_t lastWordBytes = -1; // printShow last word length +bool waitAutoFill = false; + + +// +// ----------- global var array define ----------- +// +#define WT_VAR_DBNAME 0 +#define WT_VAR_STABLE 1 +#define WT_VAR_TABLE 2 +#define WT_VAR_DNODEID 3 +#define WT_VAR_USERNAME 4 +#define WT_VAR_ALLTABLE 5 +#define WT_VAR_FUNC 6 +#define WT_VAR_KEYWORD 7 +#define WT_VAR_TBACTION 8 +#define WT_VAR_DBOPTION 9 +#define WT_VAR_DATATYPE 10 +#define WT_VAR_KEYTAGS 11 +#define WT_VAR_ANYWORD 12 +#define WT_VAR_CNT 13 + +#define WT_FROM_DB_MAX 4 // max get content from db +#define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) + +#define WT_TEXT 0xFF + +char dbName[256] = ""; // save use database name; +// tire array +STire* tires[WT_VAR_CNT]; +pthread_mutex_t tiresMutex; +//save thread handle obtain var name from db server +pthread_t* threads[WT_FROM_DB_CNT]; +// obtain var name with sql from server +char varTypes[WT_VAR_CNT][64] = { + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" +}; + +char varSqls[WT_FROM_DB_CNT][64] = { + "show databases;", + "show stables;", + "show tables;", + "show dnodes;", + "show users;" +}; + + +// var words current cursor, if user press any one key except tab, cursorVar can be reset to -1 +int cursorVar = -1; +bool varMode = false; // enter var names list mode + + +TAOS* varCon = NULL; +Command* varCmd = NULL; +SMatch* lastMatch = NULL; // save last match result +int cntDel = 0; // delete byte count after next press tab + + +// show auto tab introduction +void printfIntroduction() { + printf(" ********************* How to Use TAB in TAOS Shell ******************************\n"); + printf(" * Taos shell supports pressing TAB key to complete word. You can try it. *\n"); + printf(" * Press TAB key anywhere, You'll get surprise. *\n"); + printf(" * KEYBOARD SHORTCUT: *\n"); + printf(" * [ TAB ] ...... Complete the word or show help if no input *\n"); + printf(" * [ Ctrl + A ] ...... move cursor to [A]head of line *\n"); + printf(" * [ Ctrl + E ] ...... move cursor to [E]nd of line *\n"); + printf(" * [ Ctrl + W ] ...... move cursor to line of middle *\n"); + printf(" * [ Ctrl + L ] ...... clean screen *\n"); + printf(" * [ Ctrl + K ] ...... clean after cursor *\n"); + printf(" * [ Ctrl + U ] ...... clean before cursor *\n"); + printf(" * *\n"); + printf(" **********************************************************************************\n\n"); +} + +void showHelp() { + printf("\nThe following are supported commands for Taos shell:"); + printf("\n\ + ----- A ----- \n\ + alter database \n\ + alter dnode balance \n\ + alter dnode resetlog;\n\ + alter dnode DebugFlag 143;\n\ + alter dnode monitor 1;\n\ + alter table ADD COLUMN ; \n\ + alter table DROP COLUMN ; \n\ + alter table MODIFY COLUMN ;\n\ + alter local resetlog; \n\ + alter local DebugFlag 143; \n\ + alter topic \n\ + alter user pass\n\ + alter user privilege read ;\n\ + alter user privilege write ;\n\ + ----- C ----- \n\ + create table using tags ...\n\ + create database ;\n\ + create table as ...\n\ + create dnode \n\ + create topic \n\ + create function \n\ + create user pass ;\n\ + compact vnode in (vgid,vgid,vgid);\n\ + ----- D ----- \n\ + describe ;\n\ + delete from where ... \n\ + drop database ;\n\ + drop table ;\n\ + drop dnode ;\n\ + drop function ;\n\ + drop topic ;\n\ + drop user ;\n\ + ----- K ----- \n\ + kill connection ; \n\ + kill query ; \n\ + kill stream ; \n\ + ----- S ----- \n\ + select * from where ... \n\ + select _block_dist() from ;\n\ + select client_version();\n\ + select current_user();\n\ + select database;\n\ + select server_version();\n\ + set max_binary_display_width ; \n\ + show create database ;\n\ + show create stable ;\n\ + show create table ;\n\ + show connections;\n\ + show databases;\n\ + show dnodes;\n\ + show functions;\n\ + show modules;\n\ + show mnodes;\n\ + show queries;\n\ + show stables;\n\ + show stables like ''; note: regular expression only support '_' and '%%' match.\n\ + show streams;\n\ + show scores;\n\ + show tables;\n\ + show tables like ''; \n\ + show users;\n\ + show variables;\n\ + show vgroups;\n\ + ----- I ----- \n\ + insert into values(...) ;\n\ + ----- U ----- \n\ + use ;"); + + printf("\n\n"); + + //define in getDuration() function + printf("\ + Timestamp expression Format:\n\ + b - nanosecond \n\ + u - microsecond \n\ + a - millisecond \n\ + s - second \n\ + m - minute \n\ + h - hour \n\ + d - day \n\ + w - week \n\ + now - current time \n\ + Example : \n\ + select * from t1 where ts > now - 2w + 3d and ts <= now - 1w -2h ;\n"); + printf("\n"); +} + +// +// ------------------- parse words -------------------------- +// + +#define SHELL_COMMAND_COUNT() (sizeof(shellCommands) / sizeof(SWords)) + +// get at +SWord * atWord(SWords * command, int32_t index) { + SWord * word = command->head; + for (int32_t i = 0; i < index; i++) { + if (word == NULL) + return NULL; + word = word->next; + } + + return word; +} + +#define MATCH_WORD(x) atWord(x, x->matchIndex) + +int wordType(const char* p, int32_t len) { + for (int i = 0; i < WT_VAR_CNT; i++) { + if (strncmp(p, varTypes[i], len) == 0) + return i; + } + return WT_TEXT; +} + +// add word +SWord * addWord(const char* p, int32_t len, bool pattern) { + SWord* word = (SWord *) malloc(sizeof(SWord)); + memset(word, 0, sizeof(SWord)); + word->word = (char* )p; + word->len = len; + + // check format + if (pattern) { + word->type = wordType(p, len); + } else { + word->type = WT_TEXT; + } + + return word; +} + +// parse one command +void parseCommand(SWords * command, bool pattern) { + char * p = command->source; + int32_t start = 0; + int32_t size = command->source_len > 0 ? command->source_len : strlen(p); + + bool lastBlank = false; + for (int i = 0; i <= size; i++) { + if (p[i] == ' ' || i == size) { + // check continue blank like ' ' + if (p[i] == ' ') { + if (lastBlank) { + start ++; + continue; + } + if (i == 0) { // first blank + lastBlank = true; + start ++; + continue; + } + lastBlank = true; + } + + // found split or string end , append word + if (command->head == NULL) { + command->head = addWord(p + start, i - start, pattern); + command->count = 1; + } else { + SWord * word = command->head; + while (word->next) { + word = word->next; + } + word->next = addWord(p + start, i - start, pattern); + command->count ++; + } + start = i + 1; + } else { + lastBlank = false; + } + } +} + +// free Command +void freeCommand(SWords * command) { + SWord * word = command->head; + if (word == NULL) { + return ; + } + + // loop + while (word->next) { + SWord * tmp = word; + word = word->next; + // if malloc need free + if(tmp->free && tmp->word) + free(tmp->word); + free(tmp); + } + + // if malloc need free + if(word->free && word->word) + free(word->word); + free(word); +} + +void GenerateVarType(int type, char** p, int count) { + STire* tire = createTire(TIRE_LIST); + for (int i = 0; i < count; i++) { + insertWord(tire, p[i]); + } + + pthread_mutex_lock(&tiresMutex); + tires[type] = tire; + pthread_mutex_unlock(&tiresMutex); +} + +// +// -------------------- shell auto ---------------- +// + + +// init shell auto funciton , shell start call once +bool shellAutoInit() { + // command + int32_t count = SHELL_COMMAND_COUNT(); + for (int32_t i = 0; i < count; i ++) { + parseCommand(shellCommands + i, true); + } + + // tires + memset(tires, 0, sizeof(STire*) * WT_VAR_CNT); + pthread_mutex_init(&tiresMutex, NULL); + + // threads + memset(threads, 0, sizeof(pthread_t*) * WT_FROM_DB_CNT); + + // generate varType + GenerateVarType(WT_VAR_FUNC, functions, sizeof(functions) /sizeof(char *)); + GenerateVarType(WT_VAR_KEYWORD, keywords, sizeof(keywords) /sizeof(char *)); + GenerateVarType(WT_VAR_DBOPTION, db_options, sizeof(db_options) /sizeof(char *)); + GenerateVarType(WT_VAR_TBACTION, tb_actions, sizeof(tb_actions) /sizeof(char *)); + GenerateVarType(WT_VAR_DATATYPE, data_types, sizeof(data_types) /sizeof(char *)); + GenerateVarType(WT_VAR_KEYTAGS, key_tags, sizeof(key_tags) /sizeof(char *)); + + printfIntroduction(); + + return true; +} + +// exit shell auto funciton, shell exit call once +void shellAutoExit() { + // free command + int32_t count = SHELL_COMMAND_COUNT(); + for (int32_t i = 0; i < count; i ++) { + freeCommand(shellCommands + i); + } + + // free tires + pthread_mutex_lock(&tiresMutex); + for (int32_t i = 0; i < WT_VAR_CNT; i++) { + if (tires[i]) { + freeTire(tires[i]); + tires[i] = NULL; + } + } + pthread_mutex_unlock(&tiresMutex); + // destory + pthread_mutex_destroy(&tiresMutex); + + // free threads + for (int32_t i = 0; i < WT_VAR_CNT; i++) { + if (threads[i]) { + taosDestroyThread(threads[i]); + threads[i] = NULL; + } + } + + // free lastMatch + if (lastMatch) { + freeMatch(lastMatch); + lastMatch = NULL; + } +} + +// +// ------------------- auto ptr for tires -------------------------- +// +bool setNewAuotPtr(int type, STire* pNew) { + if (pNew == NULL) + return false; + + pthread_mutex_lock(&tiresMutex); + STire* pOld = tires[type]; + if (pOld != NULL) { + // previous have value, release self ref count + if (--pOld->ref == 0) { + freeTire(pOld); + } + } + + // set new + tires[type] = pNew; + tires[type]->ref = 1; + pthread_mutex_unlock(&tiresMutex); + + return true; +} + +// get ptr +STire* getAutoPtr(int type) { + if (tires[type] == NULL) { + return NULL; + } + + pthread_mutex_lock(&tiresMutex); + tires[type]->ref++; + pthread_mutex_unlock(&tiresMutex); + + return tires[type]; +} + +// put back tire to tires[type], if tire not equal tires[type].p, need free tire +void putBackAutoPtr(int type, STire* tire) { + if (tire == NULL) { + return ; + } + + pthread_mutex_lock(&tiresMutex); + if (tires[type] != tire) { + //update by out, can't put back , so free + if (--tire->ref == 1) { + // support multi thread getAuotPtr + freeTire(tire); + } + + } else { + tires[type]->ref--; + assert(tires[type]->ref > 0); + } + pthread_mutex_unlock(&tiresMutex); + + return ; +} + + + +// +// ------------------- var Word -------------------------- +// + +#define MAX_CACHED_CNT 100000 // max cached rows 10w +// write sql result to var name, return write rows cnt +int writeVarNames(int type, TAOS_RES* tres) { + // fetch row + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } + + TAOS_FIELD *fields = taos_fetch_fields(tres); + // create new tires + char tireType = type == WT_VAR_TABLE ? TIRE_TREE : TIRE_LIST; + STire* tire = createTire(tireType); + + // enum rows + char name[1024]; + int numOfRows = 0; + do { + int32_t* lengths = taos_fetch_lengths(tres); + int32_t bytes = lengths[0]; + if(fields[0].type == TSDB_DATA_TYPE_SMALLINT) { + sprintf(name,"%d", *(int16_t*)row[0]); + } else { + memcpy(name, row[0], bytes); + } + + name[bytes] = 0; //set string end + // insert to tire + insertWord(tire, name); + + if (++numOfRows > MAX_CACHED_CNT ) { + break; + } + + row = taos_fetch_row(tres); + } while (row != NULL); + + // replace old tire + setNewAuotPtr(type, tire); + + return numOfRows; +} + +bool firstMatchCommand(TAOS * con, Command * cmd); +// +// thread obtain var thread from db server +// +void* varObtainThread(void* param) { + int type = *(int* )param; + free(param); + + if (varCon == NULL || type > WT_FROM_DB_MAX) { + return NULL; + } + + TAOS_RES* pSql = taos_query_h(varCon, varSqls[type], NULL); + if (taos_errno(pSql)) { + taos_free_result(pSql); + return NULL; + } + + // write var names from pSql + int cnt = writeVarNames(type, pSql); + + // free sql + taos_free_result(pSql); + + // check need call auto tab + if (cnt > 0 && waitAutoFill) { + // press tab key by program + firstMatchCommand(varCon, varCmd); + } + + return NULL; +} + +// only match next one word from all match words, return valuue must free by caller +char* matchNextPrefix(STire* tire, char* pre) { + SMatch* match = NULL; + + // re-use last result + if (lastMatch) { + if (strcmp(pre, lastMatch->pre) == 0) { + // same pre + match = lastMatch; + } + } + + if (match == NULL) { + // not same with last result + if (pre[0] == 0) { + // EMPTY PRE + match = enumAll(tire); + } else { + // NOT EMPTY + match = matchPrefix(tire, pre, NULL); + } + + // save to lastMatch + if (match) { + if (lastMatch) + freeMatch(lastMatch); + lastMatch = match; + } + } + + // check valid + if (match == NULL || match->head == NULL) { + // no one matched + return false; + } + + if (cursorVar == -1) { + // first + cursorVar = 0; + return strdup(match->head->word); + } + + // according to cursorVar , calculate next one + int i = 0; + SMatchNode* item = match->head; + while (item) { + if (i == cursorVar + 1) { + // found next position ok + if (item->next == NULL) { + // match last item, reset cursorVar to head + cursorVar = -1; + } else { + cursorVar = i; + } + + return strdup(item->word); + } + + // check end item + if (item->next == NULL) { + // if cursorVar > var list count, return last and reset cursorVar + cursorVar = -1; + + return strdup(item->word); + } + + // move next + item = item->next; + i++; + } + + return NULL; +} + +// search pre word from tire tree, return value must free by caller +char* tireSearchWord(int type, char* pre) { + if (type == WT_TEXT) { + return NULL; + } + + if(type > WT_FROM_DB_MAX) { + // NOT FROM DB , tires[type] alwary not null + STire* tire = tires[type]; + if (tire == NULL) + return NULL; + return matchNextPrefix(tire, pre); + } + + // TYPE CONTEXT GET FROM DB + pthread_mutex_lock(&tiresMutex); + + // check need obtain from server + if (tires[type] == NULL) { + waitAutoFill = true; + // need async obtain var names from db sever + if (threads[type] != NULL) { + if (taosThreadRunning(threads[type])) { + // thread running , need not obtain again, return + pthread_mutex_unlock(&tiresMutex); + return NULL; + } + // destroy previous thread handle for new create thread handle + taosDestroyThread(threads[type]); + threads[type] = NULL; + } + + // create new + void * param = malloc(sizeof(int)); + *((int* )param) = type; + threads[type] = taosCreateThread(varObtainThread, param); + pthread_mutex_unlock(&tiresMutex); + return NULL; + } + pthread_mutex_unlock(&tiresMutex); + + // can obtain var names from local + STire* tire = getAutoPtr(type); + if (tire == NULL) { + return NULL; + } + + char* str = matchNextPrefix(tire, pre); + // used finish, put back pointer to autoptr array + putBackAutoPtr(type, tire); + + return str; +} + +// match var word, word1 is pattern , word2 is input from shell +bool matchVarWord(SWord* word1, SWord* word2) { + // search input word from tire tree + char pre[512]; + memcpy(pre, word2->word, word2->len); + pre[word2->len] = 0; + + char* str = NULL; + if (word1->type == WT_VAR_ALLTABLE) { + // ALL_TABLE + str = tireSearchWord(WT_VAR_STABLE, pre); + if (str == NULL) { + str = tireSearchWord(WT_VAR_TABLE, pre); + if(str == NULL) + return false; + } + } else { + // OTHER + str = tireSearchWord(word1->type, pre); + if (str == NULL) { + // not found or word1->type variable list not obtain from server, return not match + return false; + } + } + + // free previous malloc + if(word1->free && word1->word) { + free(word1->word); + } + + // save + word1->word = str; + word1->len = strlen(str); + word1->free = true; // need free + + return true; +} + +// +// ------------------- match words -------------------------- +// + + +// compare command cmd1 come from shellCommands , cmd2 come from user input +int32_t compareCommand(SWords * cmd1, SWords * cmd2) { + SWord * word1 = cmd1->head; + SWord * word2 = cmd2->head; + + if (word1 == NULL || word2 == NULL) { + return -1; + } + + for (int32_t i = 0; i < cmd1->count; i++) { + if (word1->type == WT_TEXT) { + // WT_TEXT match + if (word1->len == word2->len) { + if (strncasecmp(word1->word, word2->word, word1->len) != 0) + return -1; + } else if (word1->len < word2->len) { + return -1; + } else { + // word1->len > word2->len + if (strncasecmp(word1->word, word2->word, word2->len) == 0) { + cmd1->matchIndex = i; + cmd1->matchLen = word2->len; + return i; + } else { + return -1; + } + } + } else { + // WT_VAR auto match any one word + if (word2->next == NULL) { // input words last one + if (matchVarWord(word1, word2)) { + cmd1->matchIndex = i; + cmd1->matchLen = word2->len; + varMode = true; + return i; + } + return -1; + } + } + + // move next + word1 = word1->next; + word2 = word2->next; + if (word1 == NULL || word2 == NULL) { + return -1; + } + } + + return -1; +} + +// match command +SWords * matchCommand(SWords * input, bool continueSearch) { + int32_t count = SHELL_COMMAND_COUNT(); + for (int32_t i = 0; i < count; i ++) { + SWords * shellCommand = shellCommands + i; + if (continueSearch && lastMatchIndex != -1 && i <= lastMatchIndex) { + // new match must greate than lastMatchIndex + if (varMode && i == lastMatchIndex) { + // do nothing, var match on lastMatchIndex + } else { + continue; + } + } + + // command is large + if (input->count > shellCommand->count ) { + continue; + } + + // compare + int32_t index = compareCommand(shellCommand, input); + if (index != -1) { + if (firstMatchIndex == -1) + firstMatchIndex = i; + curMatchIndex = i; + return &shellCommands[i]; + } + } + + // not match + return NULL; +} + +// +// ------------------- print screen -------------------------- +// + +// delete char count +void deleteCount(Command * cmd, int count) { + int size = 0; + int width = 0; + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + + // loop delete + while (--count >= 0 && cmd->cursorOffset > 0) { + getPrevCharSize(cmd->command, cmd->cursorOffset, &size, &width); + memmove(cmd->command + cmd->cursorOffset - size, cmd->command + cmd->cursorOffset, + cmd->commandSize - cmd->cursorOffset); + cmd->commandSize -= size; + cmd->cursorOffset -= size; + cmd->screenOffset -= width; + cmd->endOffset -= width; + } +} + +// show screen +void printScreen(TAOS * con, Command * cmd, SWords * match) { + // modify Command + if (firstMatchIndex == -1 || curMatchIndex == -1) { + // no match + return ; + } + + // first tab press + const char * str = NULL; + int strLen = 0; + + if (firstMatchIndex == curMatchIndex && lastWordBytes == -1) { + // first press tab + SWord * word = MATCH_WORD(match); + str = word->word + match->matchLen; + strLen = word->len - match->matchLen; + lastMatchIndex = firstMatchIndex; + lastWordBytes = word->len; + } else { + if (lastWordBytes == -1) + return ; + deleteCount(cmd, lastWordBytes); + + SWord * word = MATCH_WORD(match); + str = word->word; + strLen = word->len; + // set current to last + lastMatchIndex = curMatchIndex; + lastWordBytes = word->len; + } + + // insert new + insertChar(cmd, (char *)str, strLen); +} + + +// main key press tab , matched return true else false +bool firstMatchCommand(TAOS * con, Command * cmd) { + // parse command + SWords* input = (SWords *)malloc(sizeof(SWords)); + memset(input, 0, sizeof(SWords)); + input->source = cmd->command; + input->source_len = cmd->commandSize; + parseCommand(input, false); + + // if have many , default match first, if press tab again , switch to next + curMatchIndex = -1; + lastMatchIndex = -1; + SWords * match = matchCommand(input, true); + if (match == NULL) { + // not match , nothing to do + freeCommand(input); + free(input); + return false; + } + + // print to screen + printScreen(con, cmd, match); + freeCommand(input); + free(input); + return true; +} + +// create input source +void createInputFromFirst(SWords* input, SWords * firstMatch) { + // + // if next pressTabKey , input context come from firstMatch, set matched length with source_len + // + input->source = (char*)malloc(1024); + memset((void* )input->source, 0, 1024); + + SWord * word = firstMatch->head; + + // source_len = full match word->len + half match with firstMatch->matchLen + for (int i = 0; i < firstMatch->matchIndex && word; i++) { + // combine source from each word + strncpy(input->source + input->source_len, word->word, word->len); + strcat(input->source, " "); // append blank splite + input->source_len += word->len + 1; // 1 is blank length + // move next + word = word->next; + } + // appand half matched word for last + if (word) { + strncpy(input->source + input->source_len, word->word, firstMatch->matchLen); + input->source_len += firstMatch->matchLen; + } +} + +// user press Tabkey again is named next , matched return true else false +bool nextMatchCommand(TAOS * con, Command * cmd, SWords * firstMatch) { + if (firstMatch == NULL || firstMatch->head == NULL) { + return false; + } + SWords* input = (SWords *)malloc(sizeof(SWords)); + memset(input, 0, sizeof(SWords)); + + // create input from firstMatch + createInputFromFirst(input, firstMatch); + + // parse input + parseCommand(input, false); + + // if have many , default match first, if press tab again , switch to next + SWords * match = matchCommand(input, true); + if (match == NULL) { + // if not match , reset all index + firstMatchIndex = -1; + curMatchIndex = -1; + match = matchCommand(input, false); + if (match == NULL) { + freeCommand(input); + if (input->source) + free(input->source); + free(input); + return false; + } + } + + // print to screen + printScreen(con, cmd, match); + + // free + if (input->source) { + free(input->source); + input->source = NULL; + } + freeCommand(input); + free(input); + + return true; +} + +// fill with type +bool fillWithType(TAOS * con, Command * cmd, char* pre, int type) { + // get type + STire* tire = tires[type]; + char* str = matchNextPrefix(tire, pre); + if (str == NULL) { + return false; + } + + // need insert part string + char * part = str + strlen(pre); + + // show + int count = strlen(part); + insertChar(cmd, part, count); + cntDel = count; // next press tab delete current append count + + free(str); + return true; +} + +// fill with type +bool fillTableName(TAOS * con, Command * cmd, char* pre) { + // search stable and table + char * str = tireSearchWord(WT_VAR_STABLE, pre); + if (str == NULL) { + str = tireSearchWord(WT_VAR_TABLE, pre); + if(str == NULL) + return false; + } + + // need insert part string + char * part = str + strlen(pre); + + // delete autofill count last append + if(cntDel > 0) { + deleteCount(cmd, cntDel); + cntDel = 0; + } + + // show + int count = strlen(part); + insertChar(cmd, part, count); + cntDel = count; // next press tab delete current append count + + free(str); + return true; +} + +// +// find last word from sql select clause +// example : +// 1 select cou -> press tab select count( +// 2 select count(*),su -> select count(*), sum( +// 3 select count(*), su -> select count(*), sum( +// +char * lastWord(char * p) { + // get near from end revert find ' ' and ',' + char * p1 = strrchr(p, ' '); + char * p2 = strrchr(p, ','); + + if (p1 && p2) { + return MAX(p1, p2) + 1; + } else if (p1) { + return p1 + 1; + } else if(p2) { + return p2 + 1; + } else { + return p; + } +} + +bool fieldsInputEnd(char* sql) { + // not in '()' + char* p1 = strrchr(sql, '('); + char* p2 = strrchr(sql, ')'); + if (p1 && p2 == NULL) { + // like select count( ' ' + return false; + } else if (p1 && p2 && p1 > p2) { + // like select sum(age), count( ' ' + return false; + } + + // not in ',' + char * p3 = strrchr(sql, ','); + char * p = p3; + // like select ts, age,' ' + if (p) { + ++p; + bool allBlank = true; // after last ',' all char is blank + int cnt = 0; // blank count , like ' ' as one blank + char * plast = NULL; // last blank position + while(*p) { + if (*p == ' ') { + plast = p; + cnt ++; + } else { + allBlank = false; + } + ++p; + } + + // any one word is not blank + if(allBlank) { + return false; + } + + // like 'select count(*),sum(age) fr' need return true + if (plast && plast > p3 && p2 > p1 && plast > p2 && p1 > p3) { + return true; + } + + // if last char not ' ', then not end field, like 'select count(*), su' can fill sum( + if(sql[strlen(sql)-1] != ' ' && cnt <= 1) { + return false; + } + } + + char * p4 = strrchr(sql, ' '); + if(p4 == NULL) { + // only one word + return false; + } + + return true; +} + +// need insert from +bool needInsertFrom(char * sql, int len) { + // last is blank + if(sql[len-1] != ' ') { + // insert from keyword + return false; + } + + // select fields input is end + if (!fieldsInputEnd(sql)) { + return false; + } + + // can insert from keyword + return true; +} + +bool matchSelectQuery(TAOS * con, Command * cmd) { + // if continue press Tab , delete bytes by previous autofill + if (cntDel > 0) { + deleteCount(cmd, cntDel); + cntDel = 0; + } + + // match select ... + int len = cmd->commandSize; + char * p = cmd->command; + + // remove prefix blank + while (p[0] == ' ' && len > 0) { + p++; + len--; + } + + // special range + if(len < 7 || len > 512) { + return false; + } + + // select and from + if(strncasecmp(p, "select ", 7) != 0) { + // not select query clause + return false; + } + p += 7; + len -= 7; + + char* ps = p = strndup(p, len); + + // union all + char * p1; + do { + p1 = strstr(p, UNION_ALL); + if(p1) { + p = p1 + strlen(UNION_ALL); + } + } while (p1); + + char * from = strstr(p, " from "); + //last word , maybe empty string or some letters of a string + char * last = lastWord(p); + bool ret = false; + if (from == NULL) { + bool fieldEnd = fieldsInputEnd(p); + // cheeck fields input end then insert from keyword + if (fieldEnd && p[len-1] == ' ') { + insertChar(cmd, "from", 4); + free(ps); + return true; + } + + // fill funciton + if(fieldEnd) { + // fields is end , need match keyword + ret = fillWithType(con, cmd, last, WT_VAR_KEYWORD); + } else { + ret = fillWithType(con, cmd, last, WT_VAR_FUNC); + } + + free(ps); + return ret; + } + + // have from + char * blank = strstr(from + 6, " "); + if (blank == NULL) { + // no table name, need fill + ret = fillTableName(con, cmd, last); + } else { + ret = fillWithType(con, cmd, last, WT_VAR_KEYWORD); + } + + free(ps); + return ret; +} + +// if is input create fields or tags area, return true +bool isCreateFieldsArea(char * p) { + char * left = strrchr(p, '('); + if (left == NULL) { + // like 'create table st' + return false; + } + + char * right = strrchr(p, ')'); + if(right == NULL) { + // like 'create table st( ' + return true; + } + + if (left > right) { + // like 'create table st( ts timestamp, age int) tags(area ' + return true; + } + + return false; +} + +bool matchCreateTable(TAOS * con, Command * cmd) { + // if continue press Tab , delete bytes by previous autofill + if (cntDel > 0) { + deleteCount(cmd, cntDel); + cntDel = 0; + } + + // match select ... + int len = cmd->commandSize; + char * p = cmd->command; + + // remove prefix blank + while (p[0] == ' ' && len > 0) { + p++; + len--; + } + + // special range + if(len < 7 || len > 1024) { + return false; + } + + // select and from + if(strncasecmp(p, "create table ", 13) != 0) { + // not select query clause + return false; + } + p += 13; + len -= 13; + + char* ps = strndup(p, len); + bool ret = false; + char * last = lastWord(ps); + + // check in create fields or tags input area + if (isCreateFieldsArea(ps)) { + ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE); + } + + // tags + if (!ret) { + // find only one ')' , can insert tags + char * p1 = strchr(ps, ')'); + if (p1) { + if(strchr(p1 + 1, ')') == NULL && strstr(p1 + 1, "tags") == NULL) { + // can insert tags keyword + ret = fillWithType(con, cmd, last, WT_VAR_KEYTAGS); + } + } + } + + free(ps); + return ret; +} + +bool matchOther(TAOS * con, Command * cmd) { + int len = cmd->commandSize; + char* p = cmd->command; + + if (p[len - 1] == '\\') { + // append '\G' + char a[] = "G;"; + insertChar(cmd, a, 2); + return true; + } + + return false; +} + + +// main key press tab +void pressTabKey(TAOS * con, Command * cmd) { + // check + if (cmd->commandSize == 0) { + // empty + showHelp(); + showOnScreen(cmd); + return ; + } + + // save connection to global + varCon = con; + varCmd = cmd; + bool matched = false; + + // manual match like create table st( ... + matched = matchCreateTable(con, cmd); + if (matched) + return ; + + // shellCommands match + if (firstMatchIndex == -1) { + matched = firstMatchCommand(con, cmd); + } else { + matched = nextMatchCommand(con, cmd, &shellCommands[firstMatchIndex]); + } + if (matched) + return ; + + // NOT MATCHED ANYONE + // match other like '\G' ... + matched = matchOther(con, cmd); + if (matched) + return ; + + // manual match like select * from ... + matched = matchSelectQuery(con, cmd); + if (matched) + return ; + + return ; +} + +// press othr key +void pressOtherKey(char c) { + // reset global variant + firstMatchIndex = -1; + lastMatchIndex = -1; + curMatchIndex = -1; + lastWordBytes = -1; + + // var names + cursorVar = -1; + varMode = false; + waitAutoFill = false; + cntDel = 0; + + if (lastMatch) { + freeMatch(lastMatch); + lastMatch = NULL; + } +} + +// put name into name, return name length +int getWordName(char* p, char * name, int nameLen) { + //remove prefix blank + while (*p == ' ') { + p++; + } + + // get databases name; + int i = 0; + while(p[i] != 0 && i < nameLen - 1) { + name[i] = p[i]; + i++; + if(p[i] == ' ' || p[i] == ';'|| p[i] == '(') { + // name end + break; + } + } + name[i] = 0; + + return i; +} + +// deal use db, if have 'use' return true +bool dealUseDB(char * sql) { + // check use keyword + if(strncasecmp(sql, "use ", 4) != 0) { + return false; + } + + char db[256]; + char *p = sql + 4; + if (getWordName(p, db, sizeof(db)) == 0) { + // no name , return + return true; + } + + // dbName is previous use open db name + if (strcasecmp(db, dbName) == 0) { + // same , no need switch + return true; + } + + // switch new db + pthread_mutex_lock(&tiresMutex); + // STABLE set null + STire* tire = tires[WT_VAR_STABLE]; + tires[WT_VAR_STABLE] = NULL; + if(tire) { + freeTire(tire); + } + // TABLE set null + tire = tires[WT_VAR_TABLE]; + tires[WT_VAR_TABLE] = NULL; + if(tire) { + freeTire(tire); + } + // save + strcpy(dbName, db); + pthread_mutex_unlock(&tiresMutex); + + return true; +} + +// deal create, if have 'create' return true +bool dealCreateCommand(char * sql) { + // check keyword + if(strncasecmp(sql, "create ", 7) != 0) { + return false; + } + + char name[1024]; + char *p = sql + 7; + if (getWordName(p, name, sizeof(name)) == 0) { + // no name , return + return true; + } + + int type = -1; + // dbName is previous use open db name + if (strcasecmp(name, "database") == 0) { + type = WT_VAR_DBNAME; + } else if (strcasecmp(name, "table") == 0) { + if(strstr(sql, " tags") != NULL && strstr(sql, " using ") == NULL) + type = WT_VAR_STABLE; + else + type = WT_VAR_TABLE; + } else if (strcasecmp(name, "user") == 0) { + type = WT_VAR_USERNAME; + } else { + // no match , return + return true; + } + + // move next + p += strlen(name); + + // get next word , that is table name + if (getWordName(p, name, sizeof(name)) == 0) { + // no name , return + return true; + } + + // switch new db + pthread_mutex_lock(&tiresMutex); + // STABLE set null + STire* tire = tires[type]; + if(tire) { + insertWord(tire, name); + } + pthread_mutex_unlock(&tiresMutex); + + return true; +} + +// deal create, if have 'drop' return true +bool dealDropCommand(char * sql) { + // check keyword + if(strncasecmp(sql, "drop ", 5) != 0) { + return false; + } + + char name[1024]; + char *p = sql + 5; + if (getWordName(p, name, sizeof(name)) == 0) { + // no name , return + return true; + } + + int type = -1; + // dbName is previous use open db name + if (strcasecmp(name, "database") == 0) { + type = WT_VAR_DBNAME; + } else if (strcasecmp(name, "table") == 0) { + type = WT_VAR_ALLTABLE; + } else if (strcasecmp(name, "dnode") == 0) { + type = WT_VAR_DNODEID; + } else if (strcasecmp(name, "user") == 0) { + type = WT_VAR_USERNAME; + } else { + // no match , return + return true; + } + + // move next + p += strlen(name); + + // get next word , that is table name + if (getWordName(p, name, sizeof(name)) == 0) { + // no name , return + return true; + } + + // switch new db + pthread_mutex_lock(&tiresMutex); + // STABLE set null + if(type == WT_VAR_ALLTABLE) { + bool del = false; + // del in stable + STire* tire = tires[WT_VAR_STABLE]; + if(tire) + del = deleteWord(tire, name); + // del in table + if(!del) { + tire = tires[WT_VAR_TABLE]; + if(tire) + del = deleteWord(tire, name); + } + } else { + // OTHER TYPE + STire* tire = tires[type]; + if(tire) + deleteWord(tire, name); + } + pthread_mutex_unlock(&tiresMutex); + + return true; +} + +// callback autotab module after shell sql execute +void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { + char * sql = sqlstr; + // remove prefix blank + while (*sql == ' ') { + sql++; + } + + if(dealUseDB(sql)) { + // change to new db + return ; + } + + // create command add name to autotab + if(dealCreateCommand(sql)) { + return ; + } + + // drop command remove name from autotab + if(dealDropCommand(sql)) { + return ; + } + + return ; +} diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index d78e152dbdbc5c0144c65d50a32daadbce1cf534..2fe09691e3285c2e3031672404b0aa6ed7bac244 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -79,8 +79,11 @@ void insertChar(Command *cmd, char *c, int size) { /* update the values */ cmd->commandSize += size; cmd->cursorOffset += size; - cmd->screenOffset += wcwidth(wc); - cmd->endOffset += wcwidth(wc); + for (int i = 0; i < size; i++) { + mbtowc(&wc, c + i, size); + cmd->screenOffset += wcwidth(wc); + cmd->endOffset += wcwidth(wc); + } showOnScreen(cmd); } @@ -179,6 +182,16 @@ void positionCursorHome(Command *cmd) { } } +void positionCursorMiddle(Command *cmd) { + if (cmd->endOffset > 0) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + cmd->cursorOffset = cmd->commandSize/2; + cmd->screenOffset = cmd->endOffset/2; + showOnScreen(cmd); + } +} + + void positionCursorEnd(Command *cmd) { assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index f5e5b8728011d102090caf0cb00c3b316e93028a..0108d92e8f6a73317bc510857e17d8c81a62d512 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -22,6 +22,7 @@ #include "tkey.h" #include "tscLog.h" +#include "shellAuto.h" #define OPT_ABORT 1 /* �Cabort */ @@ -62,8 +63,8 @@ void printHelp() { printf("%s%s%s\n", indent, indent, "Number of threads when using multi-thread to import data."); printf("%s%s\n", indent, "-R"); printf("%s%s%s\n", indent, indent, "Connect and interact with TDengine use restful."); - printf("%s%s\n", indent, "-t"); - printf("%s%s%s\n", indent, indent, "The token to use when connecting TDengine's cloud services."); + printf("%s%s\n", indent, "-E"); + printf("%s%s%s\n", indent, indent, "The DSN to use when connecting TDengine's cloud services."); exit(EXIT_SUCCESS); } @@ -76,20 +77,13 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { // for host if (strcmp(argv[i], "-h") == 0) { - if (i < argc - 1) { - char* arg = argv[++i]; - char* tmp = strstr(arg, ":"); - if (tmp == NULL) { - arguments->host = arg; - } else if ((tmp + 1) != NULL) { - arguments->port = atoi(tmp + 1); - tmp[0] = '\0'; - arguments->host = arg; - } - } else { - fprintf(stderr, "option -h requires an argument\n"); - exit(EXIT_FAILURE); - } + if (i < argc - 1) { + arguments->cloud = false; + arguments->host = argv[++i]; + } else { + fprintf(stderr, "option -h requires an argument\n"); + exit(EXIT_FAILURE); + } } // for password else if ((strncmp(argv[i], "-p", 2) == 0) @@ -116,6 +110,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { + arguments->cloud = false; arguments->port = atoi(argv[++i]); } else { fprintf(stderr, "option -P requires an argument\n"); @@ -132,6 +127,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } else if (strcmp(argv[i], "-c") == 0) { if (i < argc - 1) { + arguments->cloud = false; if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); @@ -203,14 +199,15 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } else if (strcmp(argv[i], "-R") == 0) { + arguments->cloud = false; arguments->restful = true; } - else if (strcmp(argv[i], "-t") == 0) { + else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { - arguments->token = argv[++i]; + arguments->cloudDsn = argv[++i]; } else { - fprintf(stderr, "options -t requires an argument\n"); + fprintf(stderr, "options -E requires an argument\n"); exit(EXIT_FAILURE); } } @@ -225,6 +222,16 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } + if (args.cloudDsn == NULL) { + if (args.cloud) { + args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); + if (args.cloudDsn == NULL) { + args.cloud = false; + } + } + } else { + args.cloud = true; + } } int32_t shellReadCommand(TAOS *con, char *command) { @@ -249,7 +256,12 @@ int32_t shellReadCommand(TAOS *con, char *command) { utf8_array[k] = c; } insertChar(&cmd, utf8_array, count); + pressOtherKey(c); + } else if (c == TAB_KEY) { + // press TAB key + pressTabKey(con, &cmd); } else if (c < '\033') { + pressOtherKey(c); // Ctrl keys. TODO: Implement ctrl combinations switch (c) { case 1: // ctrl A @@ -295,6 +307,9 @@ int32_t shellReadCommand(TAOS *con, char *command) { case 21: // Ctrl + U clearLineBefore(&cmd); break; + case 23: // Ctrl + W; + positionCursorMiddle(&cmd); + break; } } else if (c == '\033') { c = getchar(); @@ -371,9 +386,11 @@ int32_t shellReadCommand(TAOS *con, char *command) { break; } } else if (c == 0x7f) { + pressOtherKey(c); // press delete key backspaceChar(&cmd); } else { + pressOtherKey(c); insertChar(&cmd, &c, 1); } } @@ -573,23 +590,25 @@ void exitShell() { exit(EXIT_SUCCESS); } -int tcpConnect() { +int tcpConnect(char* host, int port) { struct sockaddr_in serv_addr; - if (args.port == 0) { + if (port == 0) { + port = 6041; args.port = 6041; } - if (NULL == args.host) { + if (NULL == host) { + host = "localhost"; args.host = "localhost"; } - struct hostent *server = gethostbyname(args.host); + struct hostent *server = gethostbyname(host); if ((server == NULL) || (server->h_addr == NULL)) { - fprintf(stderr, "no such host: %s\n", args.host); + fprintf(stderr, "no such host: %s\n", host); return -1; } memset(&serv_addr, 0, sizeof(struct sockaddr_in)); serv_addr.sin_family = AF_INET; - serv_addr.sin_port = htons(args.port); + serv_addr.sin_port = htons(port); memcpy(&(serv_addr.sin_addr.s_addr), server->h_addr, server->h_length); args.socket = socket(AF_INET, SOCK_STREAM, 0); if (args.socket < 0) { diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index b4c9aee4eab3e3bdc76ae8e585490b4cccda99a1..b0c3f4934d2d7180413b2e5a3463b8eb9da8428a 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -27,6 +27,7 @@ #include "tglobal.h" #include "tsclient.h" #include "cJSON.h" +#include "shellAuto.h" #include @@ -72,8 +73,7 @@ void shellInit(SShellArguments *_args) { _args->user = TSDB_DEFAULT_USER; } - if (_args->restful) { - _args->database = calloc(1, 128); + if (_args->restful || _args->cloud) { if (wsclient_handshake()) { exit(EXIT_FAILURE); } @@ -159,7 +159,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { // Analyse the command. if (regex_match(command, "^[ \t]*(quit|q|exit)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { - if (args.restful) { + if (args.restful || args.cloud) { close(args.socket); } else { taos_close(con); @@ -309,7 +309,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { printMode = true; // When output to a file, the switch does not work. } - if (args.restful) { + if (args.restful || args.cloud) { wsclient_query(command); return; } @@ -324,10 +324,16 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { int64_t oresult = atomic_load_64(&result); - if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { + if (regex_match(command, "^\\s*use\\s+([a-zA-Z0-9_]+|`.+`)\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\n\n"); fflush(stdout); +#ifndef WINDOWS + // call back auto tab module + callbackAutoTab(command, pSql, true); +#endif + + atomic_store_64(&result, 0); freeResultWithRid(oresult); return; @@ -366,6 +372,11 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { int num_rows_affacted = taos_affected_rows(pSql); et = taosGetTimestampUs(); printf("Query OK, %d of %d row(s) in database (%.6fs)\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6); + +#ifndef WINDOWS + // call auto tab + callbackAutoTab(command, pSql, false); +#endif } printf("\n"); @@ -1153,18 +1164,39 @@ int taos_base64_encode(unsigned char *source, size_t sourcelen, char *target, si return 1; } -char *last_strstr(const char *haystack, const char *needle) { - if (*needle == '\0') - return (char *) haystack; - - char *res = NULL; - for (;;) { - char *p = strstr(haystack, needle); - if (p == NULL) break; - res = p; - haystack = p + 1; +int parse_cloud_dsn() { + if (args.cloudDsn == NULL) { + fprintf(stderr, "Cannot read cloud service info\n"); + return 1; + } else { + char *start = strstr(args.cloudDsn, "http://"); + if (start != NULL) { + args.cloudHost = start + strlen("http://"); + } else { + start = strstr(args.cloudDsn, "https://"); + if (start != NULL) { + args.cloudHost = start + strlen("https://"); + } else { + args.cloudHost = args.cloudDsn; + } + } + char *port = strstr(args.cloudHost, ":"); + if (port == NULL) { + fprintf(stderr, "Invalid format in TDengine cloud dsn: %s\n", args.cloudDsn); + return 1; + } + char *token = strstr(port + strlen(":"), "?token="); + if ((token == NULL) || + (strlen(token + strlen("?token=")) == 0)) { + fprintf(stderr, "Invalid format in TDengine cloud dsn: %s\n", args.cloudDsn); + return -1; + } + port[0] = '\0'; + args.cloudPort = port + strlen(":"); + token[0] = '\0'; + args.cloudToken = token + strlen("?token="); } - return res; + return 0; } int wsclient_handshake() { @@ -1180,12 +1212,12 @@ int wsclient_handshake() { key_nonce[i] = rand() & 0xff; } taos_base64_encode(key_nonce, 16, websocket_key, 256); - if (args.token) { - snprintf(request_header, 1024, - "GET /rest/ws?token=%s HTTP/1.1\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nHost: " - "%s:%d\r\nSec-WebSocket-Key: " - "%s\r\nSec-WebSocket-Version: 13\r\n\r\n", - args.token, args.host, args.port, websocket_key); + if (args.cloud) { + snprintf(request_header, 1024, + "GET /rest/ws?token=%s HTTP/1.1\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nHost: " + "%s:%s\r\nSec-WebSocket-Key: " + "%s\r\nSec-WebSocket-Version: 13\r\n\r\n", + args.cloudToken, args.cloudHost, args.cloudPort, websocket_key); } else { snprintf(request_header, 1024, "GET /rest/ws HTTP/1.1\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nHost: %s:%d\r\nSec-WebSocket-Key: " @@ -1286,9 +1318,9 @@ int wsclient_send_sql(char *command, WS_ACTION_TYPE type, int id) { switch (type) { case WS_CONN: cJSON_AddStringToObject(json, "action", "conn"); - cJSON_AddStringToObject(_args, "user", "root"); - cJSON_AddStringToObject(_args, "password", "taosdata"); - cJSON_AddStringToObject(_args, "db", ""); + cJSON_AddStringToObject(_args, "user", args.user); + cJSON_AddStringToObject(_args, "password", args.password); + cJSON_AddStringToObject(_args, "db", args.database); break; case WS_QUERY: @@ -1341,6 +1373,12 @@ int wsclient_conn() { } if (code->valueint == 0) { cJSON_Delete(root); + if (args.cloud) { + fprintf(stdout, "Successfully connect to %s:%s in restful mode\n\n", args.cloudHost, args.cloudPort); + } else { + fprintf(stdout, "Successfully connect to %s:%d in restful mode\n\n", args.host, args.port); + } + return 0; } else { cJSON *message = cJSON_GetObjectItem(root, "message"); diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index b3a07b257cbfdd639d6834e7981fb10e89e43512..e74c31729f07925bb130fb52a293f493fcfc5ccb 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -93,8 +93,8 @@ static void shellCheckTablesSQLFile(const char *directoryName) { sprintf(shellTablesSQLFile, "%s/tables.sql", directoryName); - struct stat fstat; - if (stat(shellTablesSQLFile, &fstat) < 0) { + struct stat status; + if (stat(shellTablesSQLFile, &status) < 0) { shellTablesSQLFile[0] = 0; } } diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index b9a8005ec71749a2ec256db3174449066d3fa241..fd24a61c7d55c8a91ac9631a7263ca44b81d1606 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -20,11 +20,11 @@ #include "shellCommand.h" #include "tkey.h" #include "tulog.h" +#include "shellAuto.h" #define OPT_ABORT 1 /* �Cabort */ int indicator = 1; -int p_port = 6041; struct termios oldtio; extern int wcwidth(wchar_t c); @@ -54,7 +54,7 @@ static struct argp_option options[] = { {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."}, {"pkttype", 'S', "PKTTYPE", 0, "Choose packet type used for net test, default is TCP. Only speed test could be either TCP or UDP."}, {"restful", 'R', 0, 0, "Connect and interact with TDengine use restful."}, - {"token", 't', "TOKEN", 0, "The token to use when connecting TDengine's cloud services."}, + {0, 'E', "DSN", 0, "The DSN to use when connecting TDengine's cloud services."}, {0}}; static error_t parse_opt(int key, char *arg, struct argp_state *state) { @@ -64,23 +64,22 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { wordexp_t full_path; switch (key) { - case 'h':{ - char* tmp = strstr(arg, ":"); - if (tmp == NULL) { - arguments->host = arg; - } else if ((tmp + 1) != NULL) { - arguments->port = atoi(tmp + 1); - tmp[0] = '\0'; - arguments->host = arg; - } - break; - } + case 'h': + if (arg) { + args.cloud = false; + args.host = arg; + } else { + fprintf(stderr, "Invalid host\n"); + return -1; + } + break; case 'p': break; case 'P': if (arg) { + args.cloud = false; tsDnodeShellPort = atoi(arg); - p_port = atoi(arg); + args.port = atoi(arg); } else { fprintf(stderr, "Invalid port\n"); return -1; @@ -106,6 +105,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { wordfree(&full_path); return -1; } + args.cloud = false; tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; @@ -173,11 +173,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case OPT_ABORT: arguments->abort = 1; break; - case 't': - arguments->token = arg; - break; case 'R': arguments->restful = true; + arguments->cloud = false; + break; + case 'E': + if (arg) { + arguments->cloudDsn = arg; + } else { + fprintf(stderr, "Invalid -E option\n"); + return -1; + } break; default: return ARGP_ERR_UNKNOWN; @@ -231,10 +237,18 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } argp_parse(&argp, argc, argv, 0, 0, arguments); - if (arguments->token == NULL) { - arguments->port = p_port; + + if (args.cloudDsn == NULL) { + if (args.cloud) { + args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); + if (args.cloudDsn == NULL) { + args.cloud = false; + } + } + } else { + args.cloud = true; } - + if (arguments->abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); @@ -270,7 +284,12 @@ int32_t shellReadCommand(TAOS *con, char *command) { utf8_array[k] = c; } insertChar(&cmd, utf8_array, count); + pressOtherKey(c); + } else if (c == TAB_KEY) { + // press TAB key + pressTabKey(con, &cmd); } else if (c < '\033') { + pressOtherKey(c); // Ctrl keys. TODO: Implement ctrl combinations switch (c) { case 1: // ctrl A @@ -316,8 +335,12 @@ int32_t shellReadCommand(TAOS *con, char *command) { case 21: // Ctrl + U; clearLineBefore(&cmd); break; + case 23: // Ctrl + W; + positionCursorMiddle(&cmd); + break; } } else if (c == '\033') { + pressOtherKey(c); c = (char)getchar(); switch (c) { case '[': @@ -392,9 +415,11 @@ int32_t shellReadCommand(TAOS *con, char *command) { break; } } else if (c == 0x7f) { + pressOtherKey(c); // press delete key backspaceChar(&cmd); } else { + pressOtherKey(c); insertChar(&cmd, &c, 1); } } @@ -595,23 +620,25 @@ void exitShell() { exit(EXIT_SUCCESS); } -int tcpConnect() { +int tcpConnect(char* host, int port) { struct sockaddr_in serv_addr; - if (args.port == 0) { + if (port == 0) { + port = 6041; args.port = 6041; } - if (NULL == args.host) { + if (NULL == host) { + host = "localhost"; args.host = "localhost"; } - struct hostent *server = gethostbyname(args.host); + struct hostent *server = gethostbyname(host); if ((server == NULL) || (server->h_addr == NULL)) { - fprintf(stderr, "no such host: %s\n", args.host); + fprintf(stderr, "no such host: %s\n", host); return -1; } memset(&serv_addr, 0, sizeof(struct sockaddr_in)); serv_addr.sin_family = AF_INET; - serv_addr.sin_port = htons(args.port); + serv_addr.sin_port = htons(port); memcpy(&(serv_addr.sin_addr.s_addr), server->h_addr, server->h_length); args.socket = socket(AF_INET, SOCK_STREAM, 0); if (args.socket < 0) { diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index ecd6e22b72de835e4f28f6bcbd6fe907bc6702fe..2dcf1052168e0de384ed666fa2f7c0044f5f618b 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -17,6 +17,8 @@ #include "shell.h" #include "tconfig.h" #include "tnettest.h" +#include "shellCommand.h" +#include "shellAuto.h" pthread_t pid; static tsem_t cancelSem; @@ -76,7 +78,6 @@ SShellArguments args = {.host = NULL, .database = NULL, .timezone = NULL, .restful = false, - .token = NULL, .is_raw_time = false, .is_use_passwd = false, .dump_config = false, @@ -87,7 +88,12 @@ SShellArguments args = {.host = NULL, .pktLen = 1000, .pktNum = 100, .pktType = "TCP", - .netTestRole = NULL}; + .netTestRole = NULL, + .cloud = true, + .cloudHost = NULL, + .cloudPort = NULL, + .cloudToken = NULL, + }; /* * Main function. @@ -102,35 +108,6 @@ int main(int argc, char* argv[]) { exit(EXIT_FAILURE); } - char* cloud_url = getenv("TDENGINE_CLOUD_URL"); - if (cloud_url != NULL) { - char* start = strstr(cloud_url, "http://"); - if (start != NULL) { - cloud_url = start + strlen("http://"); - } else { - start = strstr(cloud_url, "https://"); - if (start != NULL) { - cloud_url = start + strlen("https://"); - } - } - - char* tmp = last_strstr(cloud_url, ":"); - if ((tmp == NULL) && ((tmp + 1) != NULL )) { - fprintf(stderr, "Invalid format in environment variable TDENGINE_CLOUD_URL: %s\n", cloud_url); - exit(EXIT_FAILURE); - } else { - args.port = atoi(tmp + 1); - tmp[0] = '\0'; - args.host = cloud_url; - } - } - - char* cloud_token = getenv("TDENGINE_CLOUD_TOKEN"); - - if (cloud_token != NULL) { - args.token = cloud_token; - } - shellParseArgument(argc, argv, &args); if (args.dump_config) { @@ -155,10 +132,17 @@ int main(int argc, char* argv[]) { exit(0); } - if (args.restful) { - if (tcpConnect()) { - exit(EXIT_FAILURE); - } + if (args.cloud) { + if (parse_cloud_dsn()) { + exit(EXIT_FAILURE); + } + if (tcpConnect(args.cloudHost, atoi(args.cloudPort))) { + exit(EXIT_FAILURE); + } + } else if (args.restful) { + if (tcpConnect(args.host, args.port)) { + exit(EXIT_FAILURE); + } } /* Initialize the shell */ @@ -180,10 +164,16 @@ int main(int argc, char* argv[]) { /* Get grant information */ shellGetGrantInfo(args.con); +#ifndef WINDOWS + shellAutoInit(); +#endif /* Loop to query the input. */ while (1) { pthread_create(&pid, NULL, shellLoopQuery, args.con); pthread_join(pid, NULL); } +#ifndef WINDOWS + shellAutoExit(); +#endif } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index a3aa7a6fe4894583763776ac14c46e53e2610252..0133caf997f60a17748a536371479c11b354888d 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -64,8 +64,8 @@ void printHelp() { printf("%s%s%s\n", indent, indent, "Packet numbers used for net test, default is 100."); printf("%s%s\n", indent, "-R"); printf("%s%s%s\n", indent, indent, "Connect and interact with TDengine use restful."); - printf("%s%s\n", indent, "-t"); - printf("%s%s%s\n", indent, indent, "The token to use when connecting TDengine's cloud services."); + printf("%s%s\n", indent, "-E"); + printf("%s%s%s\n", indent, indent, "The DSN to use when connecting TDengine's cloud services."); printf("%s%s\n", indent, "-S"); printf("%s%s%s\n", indent, indent, "Packet type used for net test, default is TCP."); printf("%s%s\n", indent, "-V"); @@ -80,15 +80,8 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { // for host if (strcmp(argv[i], "-h") == 0) { if (i < argc - 1) { - char* arg = argv[++i]; - char* tmp = strstr(arg, ":"); - if (tmp == NULL) { - arguments->host = arg; - } else if ((tmp + 1) != NULL) { - arguments->port = atoi(tmp + 1); - tmp[0] = '\0'; - arguments->host = arg; - } + arguments->cloud = false; + arguments->host = argv[++i]; } else { fprintf(stderr, "option -h requires an argument\n"); exit(EXIT_FAILURE); @@ -119,6 +112,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { + arguments->cloud = false; arguments->port = atoi(argv[++i]); } else { fprintf(stderr, "option -P requires an argument\n"); @@ -142,6 +136,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } else if (strcmp(argv[i], "-c") == 0) { if (i < argc - 1) { + arguments->cloud = false; char *tmp = argv[++i]; if (strlen(tmp) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); @@ -225,14 +220,15 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } else if (strcmp(argv[i], "-R") == 0) { + arguments->cloud = false; arguments->restful = true; } - else if (strcmp(argv[i], "-t") == 0) { + else if (strcmp(argv[i], "-E") == 0) { if (i < argc - 1) { - arguments->token = argv[++i]; + arguments->cloudDsn = argv[++i]; } else { - fprintf(stderr, "options -t requires an argument\n"); + fprintf(stderr, "options -E requires an argument\n"); exit(EXIT_FAILURE); } } @@ -251,6 +247,23 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } + if (args.cloudDsn == NULL) { + if (args.cloud) { + args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); + if (args.cloudDsn == NULL) { + args.cloud = false; + } else { + if (args.cloudDsn[strlen(args.cloudDsn) - 1] == '\"') { + args.cloudDsn[strlen(args.cloudDsn) - 1] = '\0'; + } + if (args.cloudDsn[0] == '\"') { + args.cloudDsn += 1; + } + } + } + } else { + args.cloud = true; + } } void shellPrintContinuePrompt() { printf("%s", CONTINUE_PROMPT); } @@ -363,21 +376,23 @@ void get_history_path(char *history) { void exitShell() { exit(EXIT_SUCCESS); } -int tcpConnect() { +int tcpConnect(char* host, int iport) { int iResult; WSADATA wsaData; struct addrinfo *aResult = NULL, *ptr = NULL, hints; - if (args.port == 0) { - args.port = 6041; + if (iport == 0) { + iport = 6041; + args.port = iport; } - if (NULL == args.host) { + if (NULL == host) { + host = "localhost"; args.host = "localhost"; } char port[10] = {0}; - sprintf_s(port, 10, "%d", args.port); + sprintf_s(port, 10, "%d", iport); iResult = WSAStartup(MAKEWORD(2,2), &wsaData); if (iResult != 0) { @@ -388,7 +403,7 @@ int tcpConnect() { hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; - iResult = getaddrinfo(args.host, port, &hints, &aResult); + iResult = getaddrinfo(host, port, &hints, &aResult); if ( iResult != 0 ) { printf("getaddrinfo failed with error: %d\n", iResult); WSACleanup(); @@ -420,4 +435,4 @@ int tcpConnect() { return 1; } return 0; -} \ No newline at end of file +} diff --git a/src/kit/shell/src/tire.c b/src/kit/shell/src/tire.c new file mode 100644 index 0000000000000000000000000000000000000000..b4dc7976bd53f11cccbac2f5db600edeeee861d5 --- /dev/null +++ b/src/kit/shell/src/tire.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define __USE_XOPEN + +#include "os.h" +#include "tire.h" + +// ----------- interface ------------- + +// create prefix search tree +STire* createTire(char type) { + STire* tire = malloc(sizeof(STire)); + memset(tire, 0, sizeof(STire)); + tire->ref = 1; // init is 1 + tire->type = type; + tire->root.d = (STireNode **)calloc(CHAR_CNT, sizeof(STireNode *)); + return tire; +} + +// free tire node +void freeTireNode(STireNode* node) { + if (node == NULL) + return ; + + // nest free sub node on array d + if(node->d) { + for (int i = 0; i < CHAR_CNT; i++) { + freeTireNode(node->d[i]); + } + tfree(node->d); + } + + // free self + tfree(node); +} + +// destroy prefix search tree +void freeTire(STire* tire) { + // free nodes + for (int i = 0; i < CHAR_CNT; i++) { + freeTireNode(tire->root.d[i]); + } + tfree(tire->root.d); + + // free from list + StrName * item = tire->head; + while (item) { + StrName * next = item->next; + // free string + tfree(item->name); + // free node + tfree(item); + + // move next + item = next; + } + tire->head = tire->tail = NULL; + + // free tire + tfree(tire); +} + +// insert a new word to list +bool insertToList(STire* tire, char* word) { + StrName * p = (StrName *)malloc(sizeof(StrName)); + p->name = strdup(word); + p->next = NULL; + + if(tire->head == NULL) { + tire->head = p; + tire->tail = p; + }else { + tire->tail->next = p; + tire->tail = p; + } + + return true; +} + +// insert a new word to tree +bool insertToTree(STire* tire, char* word, int len) { + int m = 0; + STireNode ** nodes = tire->root.d; + for (int i = 0; i < len; i++) { + m = word[i] - FIRST_ASCII; + if (m < 0 || m > CHAR_CNT) { + return false; + } + + if (nodes[m] == NULL) { + // no pointer + STireNode* p = (STireNode* )tmalloc(sizeof(STireNode)); + memset(p, 0, sizeof(STireNode)); + nodes[m] = p; + if (i == len - 1) { + // is end + p->end = true; + break; + } + } + + if (nodes[m]->d == NULL) { + // malloc d + nodes[m]->d = (STireNode **)calloc(CHAR_CNT, sizeof(STireNode *)); + } + + // move to next node + nodes = nodes[m]->d; + } + + // add count + tire->count += 1; + return true; +} + +// insert a new word +bool insertWord(STire* tire, char* word) { + int len = strlen(word); + if (len >= MAX_WORD_LEN) { + return false; + } + + switch (tire->type) { + case TIRE_TREE: + return insertToTree(tire, word, len); + case TIRE_LIST: + return insertToList(tire, word); + default: + break; + } + return false; +} + +// delete one word from list +bool deleteFromList(STire* tire, char* word) { + StrName * item = tire->head; + while (item) { + if (strcmp(item->name, word) == 0) { + // found, reset empty to delete + item->name[0] = 0; + } + + // move next + item = item->next; + } + return true; +} + +// delete one word from tree +bool deleteFromTree(STire* tire, char* word, int len) { + int m = 0; + bool del = false; + + STireNode** nodes = tire->root.d; + for (int i = 0; i < len; i++) { + m = word[i] - FIRST_ASCII; + if (m < 0 || m >= CHAR_CNT) { + return false; + } + + if (nodes[m] == NULL) { + // no found + return false; + } else { + // not null + if(i == len - 1) { + // this is last, only set end false , not free node + nodes[m]->end = false; + del = true; + break; + } + } + + if(nodes[m]->d == NULL) + break; + // move to next node + nodes = nodes[m]->d; + } + + // reduce count + if (del) { + tire->count -= 1; + } + + return del; +} + +// insert a new word +bool deleteWord(STire* tire, char* word) { + int len = strlen(word); + if (len >= MAX_WORD_LEN) { + return false; + } + + switch (tire->type) { + case TIRE_TREE: + return deleteFromTree(tire, word, len); + case TIRE_LIST: + return deleteFromList(tire, word); + default: + break; + } + return false; +} + +void addWordToMatch(SMatch* match, char* word){ + // malloc new + SMatchNode* node = (SMatchNode* )tmalloc(sizeof(SMatchNode)); + memset(node, 0, sizeof(SMatchNode)); + node->word = strdup(word); + + // append to match + if (match->head == NULL) { + match->head = match->tail = node; + } else { + match->tail->next = node; + match->tail = node; + } + match->count += 1; +} + +// enum all words from node +void enumAllWords(STireNode** nodes, char* prefix, SMatch* match) { + STireNode * c; + char word[MAX_WORD_LEN]; + int len = strlen(prefix); + for (int i = 0; i < CHAR_CNT; i++) { + c = nodes[i]; + + if (c == NULL) { + // chain end node + continue; + } else { + // combine word string + memset(word, 0, sizeof(word)); + strcpy(word, prefix); + word[len] = FIRST_ASCII + i; // append current char + + // chain middle node + if (c->end) { + // have end flag + addWordToMatch(match, word); + } + // nested call next layer + if (c->d) + enumAllWords(c->d, word, match); + } + } +} + +// match prefix from list +void matchPrefixFromList(STire* tire, char* prefix, SMatch* match) { + StrName * item = tire->head; + int len = strlen(prefix); + while (item) { + if ( strncmp(item->name, prefix, len) == 0) { + // prefix matched + addWordToMatch(match, item->name); + } + + // move next + item = item->next; + } +} + +// match prefix words, if match is not NULL , put all item to match and return match +void matchPrefixFromTree(STire* tire, char* prefix, SMatch* match) { + SMatch* root = match; + int m = 0; + STireNode* c = 0; + int len = strlen(prefix); + if (len >= MAX_WORD_LEN) { + return; + } + + STireNode** nodes = tire->root.d; + for (int i = 0; i < len; i++) { + m = prefix[i] - FIRST_ASCII; + if (m < 0 || m > CHAR_CNT) { + return; + } + + // match + c = nodes[m]; + if (c == NULL) { + // arrive end + break; + } + + // previous items already matched + if (i == len - 1) { + // malloc match if not pass by param match + if (root == NULL) { + root = (SMatch* )tmalloc(sizeof(SMatch)); + memset(root, 0, sizeof(SMatch)); + strcpy(root->pre, prefix); + } + + // prefix is match to end char + if (c->d) + enumAllWords(c->d, prefix, root); + } else { + // move to next node continue match + if(c->d == NULL) + break; + nodes = c->d; + } + } + + // return + return ; +} + +SMatch* matchPrefix(STire* tire, char* prefix, SMatch* match) { + if(match == NULL) { + match = (SMatch* )tmalloc(sizeof(SMatch)); + memset(match, 0, sizeof(SMatch)); + } + + switch (tire->type) { + case TIRE_TREE: + matchPrefixFromTree(tire, prefix, match); + case TIRE_LIST: + matchPrefixFromList(tire, prefix, match); + default: + break; + } + + // return if need + if (match->count == 0) { + freeMatch(match); + match = NULL; + } + + return match; +} + + +// get all items from tires tree +void enumFromList(STire* tire, SMatch* match) { + StrName * item = tire->head; + while (item) { + if (item->name[0] != 0) { + // not delete + addWordToMatch(match, item->name); + } + + // move next + item = item->next; + } +} + +// get all items from tires tree +void enumFromTree(STire* tire, SMatch* match) { + char pre[2] ={0, 0}; + STireNode* c; + + // enum first layer + for (int i = 0; i < CHAR_CNT; i++) { + pre[0] = FIRST_ASCII + i; + + // each node + c = tire->root.d[i]; + if (c == NULL) { + // this branch no data + continue; + } + + // this branch have data + if(c->end) + addWordToMatch(match, pre); + else + matchPrefix(tire, pre, match); + } +} + +// get all items from tires tree +SMatch* enumAll(STire* tire) { + SMatch* match = (SMatch* )tmalloc(sizeof(SMatch)); + memset(match, 0, sizeof(SMatch)); + + switch (tire->type) { + case TIRE_TREE: + enumFromTree(tire, match); + case TIRE_LIST: + enumFromList(tire, match); + default: + break; + } + + // return if need + if (match->count == 0) { + freeMatch(match); + match = NULL; + } + + return match; +} + + +// free match result +void freeMatchNode(SMatchNode* node) { + // first free next + if (node->next) + freeMatchNode(node->next); + + // second free self + if (node->word) + free(node->word); + free(node); +} + +// free match result +void freeMatch(SMatch* match) { + // first free next + if (match->head) { + freeMatchNode(match->head); + } + + // second free self + free(match); +} diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 3d5aa76f8c718dcffa100b45e4cbf313d499c356..7d5c1c016d2022d152a6aaa38589f2fbaa0d25a4 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 3d5aa76f8c718dcffa100b45e4cbf313d499c356 +Subproject commit 7d5c1c016d2022d152a6aaa38589f2fbaa0d25a4 diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c index 64cfa28917cf6923230f0b7f70500c921c0d5a84..0ec330841caaa288c418bd272f776ca7563ebd63 100644 --- a/src/mnode/src/mnodeAcct.c +++ b/src/mnode/src/mnodeAcct.c @@ -215,7 +215,7 @@ static int32_t mnodeCreateRootAcct() { taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass); pAcct->cfg = (SAcctCfg){ .maxUsers = 128, - .maxDbs = 128, + .maxDbs = INT16_MAX, .maxTimeSeries = INT32_MAX, .maxConnections = 1024, .maxStreams = 1000, diff --git a/src/mnode/src/mnodeCluster.c b/src/mnode/src/mnodeCluster.c index 553e8446ab449cb3eab8bcc3c15bef8715fe978a..e8f7484fd13afc7117956f3b8bbf4cac5f17f0c3 100644 --- a/src/mnode/src/mnodeCluster.c +++ b/src/mnode/src/mnodeCluster.c @@ -145,8 +145,8 @@ static int32_t mnodeCreateCluster() { SClusterObj *pCluster = malloc(sizeof(SClusterObj)); memset(pCluster, 0, sizeof(SClusterObj)); pCluster->createdTime = taosGetTimestampMs(); - bool getuid = taosGetSystemUid(pCluster->uid); - if (!getuid) { + bool bGetuid = taosGetSystemUid(pCluster->uid); + if (!bGetuid) { strcpy(pCluster->uid, "tdengine2.0"); mError("failed to get uid from system, set to default val %s", pCluster->uid); } else { @@ -260,4 +260,4 @@ int32_t mnodeCompactCluster() { mInfo("end to compact cluster table..."); return 0; -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 13dd06bcac733694475eee7be718afdc6c17466e..491d2e4b3603777466868736343c5b1135bd6bb0 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -210,7 +210,7 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { mInfos = *pMinfos; } else { mInfo("vgId:1, update mnodes epSet, numOfMnodes:%d", mnodeGetMnodesNum()); - int32_t index = 0; + int32_t idx = 0; void * pIter = NULL; while (1) { SMnodeObj *pMnode = NULL; @@ -220,10 +220,10 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId); if (pDnode != NULL) { set = true; - mInfos.mnodeInfos[index].mnodeId = pMnode->mnodeId; - strcpy(mInfos.mnodeInfos[index].mnodeEp, pDnode->dnodeEp); - if (pMnode->role == TAOS_SYNC_ROLE_MASTER) mInfos.inUse = index; - index++; + mInfos.mnodeInfos[idx].mnodeId = pMnode->mnodeId; + strcpy(mInfos.mnodeInfos[idx].mnodeEp, pDnode->dnodeEp); + if (pMnode->role == TAOS_SYNC_ROLE_MASTER) mInfos.inUse = idx; + idx++; } else { set = false; } @@ -232,7 +232,7 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { mnodeDecMnodeRef(pMnode); } - mInfos.mnodeNum = index; + mInfos.mnodeNum = idx; if (mInfos.mnodeNum < sdbGetReplicaNum()) { set = false; mDebug("vgId:1, mnodes info not synced, current:%d syncCfgNum:%d", mInfos.mnodeNum, sdbGetReplicaNum()); @@ -251,23 +251,23 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { tsMEpForPeer.numOfEps = tsMInfos.mnodeNum; mInfo("vgId:1, mnodes epSet is set, num:%d inUse:%d", tsMInfos.mnodeNum, tsMInfos.inUse); - for (int index = 0; index < mInfos.mnodeNum; ++index) { - SMInfo *pInfo = &tsMInfos.mnodeInfos[index]; - taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForShell.fqdn[index], &tsMEpForShell.port[index]); - taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForPeer.fqdn[index], &tsMEpForPeer.port[index]); - tsMEpForPeer.port[index] = tsMEpForPeer.port[index] + TSDB_PORT_DNODEDNODE; + for (int idx = 0; idx < mInfos.mnodeNum; ++idx) { + SMInfo *pInfo = &tsMInfos.mnodeInfos[idx]; + taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForShell.fqdn[idx], &tsMEpForShell.port[idx]); + taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForPeer.fqdn[idx], &tsMEpForPeer.port[idx]); + tsMEpForPeer.port[idx] = tsMEpForPeer.port[idx] + TSDB_PORT_DNODEDNODE; - mInfo("vgId:1, mnode:%d, fqdn:%s shell:%u peer:%u", pInfo->mnodeId, tsMEpForShell.fqdn[index], - tsMEpForShell.port[index], tsMEpForPeer.port[index]); + mInfo("vgId:1, mnode:%d, fqdn:%s shell:%u peer:%u", pInfo->mnodeId, tsMEpForShell.fqdn[idx], + tsMEpForShell.port[idx], tsMEpForPeer.port[idx]); - tsMEpForShell.port[index] = htons(tsMEpForShell.port[index]); - tsMEpForPeer.port[index] = htons(tsMEpForPeer.port[index]); + tsMEpForShell.port[idx] = htons(tsMEpForShell.port[idx]); + tsMEpForPeer.port[idx] = htons(tsMEpForPeer.port[idx]); pInfo->mnodeId = htonl(pInfo->mnodeId); } } else { mInfo("vgId:1, mnodes epSet not set, num:%d inUse:%d", tsMInfos.mnodeNum, tsMInfos.inUse); - for (int index = 0; index < tsMInfos.mnodeNum; ++index) { - mInfo("vgId:1, index:%d, ep:%s:%u", index, tsMEpForShell.fqdn[index], htons(tsMEpForShell.port[index])); + for (int idx = 0; idx < tsMInfos.mnodeNum; ++idx) { + mInfo("vgId:1, index:%d, ep:%s:%u", idx, tsMEpForShell.fqdn[idx], htons(tsMEpForShell.port[idx])); } } @@ -603,4 +603,4 @@ int32_t mnodeCompactMnodes() { mInfo("end to compact mnodes table..."); return 0; -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 1e3057f27021e8874b96c0116dd0319fcd999da7..cb39c2ae2bc6d93bfe154dcb01535c398a98b6a9 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -331,7 +331,7 @@ int32_t sdbUpdateSync(void *pMnodes) { mDebug("vgId:1, update sync config, pMnodes:%p", pMnodes); SSyncCfg syncCfg = {0}; - int32_t index = 0; + int32_t idx = 0; if (pMinfos == NULL) { mDebug("vgId:1, mInfos not input, use mInfos in sdb, numOfMnodes:%d", syncCfg.replica); @@ -342,29 +342,29 @@ int32_t sdbUpdateSync(void *pMnodes) { pIter = mnodeGetNextMnode(pIter, &pMnode); if (pMnode == NULL) break; - syncCfg.nodeInfo[index].nodeId = pMnode->mnodeId; + syncCfg.nodeInfo[idx].nodeId = pMnode->mnodeId; SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId); if (pDnode != NULL) { - syncCfg.nodeInfo[index].nodePort = pDnode->dnodePort + TSDB_PORT_SYNC; - tstrncpy(syncCfg.nodeInfo[index].nodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN); - index++; + syncCfg.nodeInfo[idx].nodePort = pDnode->dnodePort + TSDB_PORT_SYNC; + tstrncpy(syncCfg.nodeInfo[idx].nodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN); + idx++; } mnodeDecDnodeRef(pDnode); mnodeDecMnodeRef(pMnode); } - syncCfg.replica = index; + syncCfg.replica = idx; } else { mDebug("vgId:1, mInfos input, numOfMnodes:%d", pMinfos->mnodeNum); - for (index = 0; index < pMinfos->mnodeNum; ++index) { - SMInfo *node = &pMinfos->mnodeInfos[index]; - syncCfg.nodeInfo[index].nodeId = node->mnodeId; - taosGetFqdnPortFromEp(node->mnodeEp, syncCfg.nodeInfo[index].nodeFqdn, &syncCfg.nodeInfo[index].nodePort); - syncCfg.nodeInfo[index].nodePort += TSDB_PORT_SYNC; + for (idx = 0; idx < pMinfos->mnodeNum; ++idx) { + SMInfo *node = &pMinfos->mnodeInfos[idx]; + syncCfg.nodeInfo[idx].nodeId = node->mnodeId; + taosGetFqdnPortFromEp(node->mnodeEp, syncCfg.nodeInfo[idx].nodeFqdn, &syncCfg.nodeInfo[idx].nodePort); + syncCfg.nodeInfo[idx].nodePort += TSDB_PORT_SYNC; } - syncCfg.replica = index; + syncCfg.replica = idx; mnodeUpdateMnodeEpSet(pMnodes); } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 9a4ed705f8d9e25c7c2fa8130b0584be60b98ccd..f32d7841d3fb52cd73f87b53c79242d12b617258 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1235,7 +1235,7 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagName) { SSchema *schema = (SSchema *) pStable->schema; for (int32_t tag = 0; tag < pStable->numOfTags; tag++) { - if (strcasecmp(schema[pStable->numOfColumns + tag].name, tagName) == 0) { + if (strcmp(schema[pStable->numOfColumns + tag].name, tagName) == 0) { return tag; } } @@ -1388,7 +1388,7 @@ static int32_t mnodeModifySuperTableTagName(SMnodeMsg *pMsg, char *oldTagName, c static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName) { SSchema *schema = (SSchema *) pStable->schema; for (int32_t col = 0; col < pStable->numOfColumns; col++) { - if (strcasecmp(schema[col].name, colName) == 0) { + if (strcmp(schema[col].name, colName) == 0) { return col; } } diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h index 1fb21ff38b2a29a3884b88c184530d3bac6a9c74..449151cba09b05f2963902670f7ffc9eb93f5d47 100644 --- a/src/os/inc/osTime.h +++ b/src/os/inc/osTime.h @@ -106,7 +106,7 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision); int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision); int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision); -int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision); +int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t sliding, int64_t slidingUnit, int64_t intervalUnit, int32_t precision); int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision); int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision); diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index 17c844ed863c227fe1178b7d99fee4a300a0b3e2..d867c80af4cbf8906a33295f82158f03f3380cf9 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -45,8 +45,8 @@ void taosRemoveDir(char *rootDir) { uInfo("dir:%s is removed", rootDir); } -bool taosDirExist(const char* dirname) { - return access(dirname, F_OK) == 0; +bool taosDirExist(const char* dir) { + return access(dir, F_OK) == 0; } int32_t taosMkdirP(const char *dir, int keepLast) { diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index 910e6f15be48e9a757b87939dd95b3541967f9c3..6adcd1dae31bfa3c6365a73700b43020b4ab088a 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -44,11 +44,11 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[32] = {0}; + char rand_num[32] = {0}; - sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); + sprintf(rand_num, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); - snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand_num); } #else @@ -71,11 +71,11 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[32] = {0}; + char rand_num[32] = {0}; - sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); + sprintf(rand_num, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); - snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand_num); } #endif diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index 1575b3100f78a17b9bbe9dcb29b971054c55b294..7841e96cbfe95af8ba068ff661fc101da34b75ae 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -82,26 +82,26 @@ void deltaToUtcInitOnce() { } static int64_t parseFraction(char* str, char** end, int32_t timePrec); -static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char delim); -static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char delim, bool withDST); +static int32_t parseTimeWithTz(char* timestr, int64_t* pTime, int32_t timePrec, char delim); +static int32_t parseLocaltime(char* timestr, int64_t* pTime, int32_t timePrec, char delim, bool withDST); static char* forwardToTimeStringEnd(char* str); static bool checkTzPresent(char *str, int32_t len); int32_t taosGetTimestampSec() { return (int32_t)time(NULL); } -int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { +int32_t taosParseTime(char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t day_light) { /* parse datatime string in with tz */ if (strnchr(timestr, 'T', len, false) != NULL) { if (checkTzPresent(timestr, len)) { - return parseTimeWithTz(timestr, time, timePrec, 'T'); + return parseTimeWithTz(timestr, pTime, timePrec, 'T'); } else { - return parseLocaltime(timestr, time, timePrec, 'T', day_light); + return parseLocaltime(timestr, pTime, timePrec, 'T', day_light); } } else { if (checkTzPresent(timestr, len)) { - return parseTimeWithTz(timestr, time, timePrec, 0); + return parseTimeWithTz(timestr, pTime, timePrec, 0); } else { - return parseLocaltime(timestr, time, timePrec, 0, day_light); + return parseLocaltime(timestr, pTime, timePrec, 0, day_light); } } } @@ -121,8 +121,8 @@ bool checkTzPresent(char *str, int32_t len) { } -FORCE_INLINE int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { - return taosParseTime(timestr, time, len, timePrec, day_light); +FORCE_INLINE int32_t taos_parse_time(char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t day_light) { + return taosParseTime(timestr, pTime, len, timePrec, day_light); } char* forwardToTimeStringEnd(char* str) { @@ -243,7 +243,7 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) { * 2013-04-12T15:52:01+0800 * 2013-04-12T15:52:01.123+0800 */ -int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char delim) { +int32_t parseTimeWithTz(char* timestr, int64_t* pTime, int32_t timePrec, char delim) { int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); @@ -277,14 +277,14 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del if ((str[0] == 'Z' || str[0] == 'z') && str[1] == '\0') { /* utc time, no millisecond, return directly*/ - *time = seconds * factor; + *pTime = seconds * factor; } else if (str[0] == '.') { str += 1; if ((fraction = parseFraction(str, &str, timePrec)) < 0) { return -1; } - *time = seconds * factor + fraction; + *pTime = seconds * factor + fraction; char seg = str[0]; if (seg != 'Z' && seg != 'z' && seg != '+' && seg != '-') { @@ -297,18 +297,18 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del return -1; } - *time += tzOffset * factor; + *pTime += tzOffset * factor; } } else if (str[0] == '+' || str[0] == '-') { - *time = seconds * factor + fraction; + *pTime = seconds * factor + fraction; // parse the timezone if (parseTimezone(str, &tzOffset) == -1) { return -1; } - *time += tzOffset * factor; + *pTime += tzOffset * factor; } else { return -1; } @@ -316,8 +316,8 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del return 0; } -int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char delim, bool withDST) { - *time = 0; +int32_t parseLocaltime(char* timestr, int64_t* pTime, int32_t timePrec, char delim, bool withDST) { + *pTime = 0; struct tm tm = {0}; if (withDST) { tm.tm_isdst = -1; @@ -365,65 +365,65 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char deli int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); - *time = factor * seconds + fraction; + *pTime = factor * seconds + fraction; return 0; } -int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) { +int64_t convertTimePrecision(int64_t timeStamp, int32_t fromPrecision, int32_t toPrecision) { assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO || fromPrecision == TSDB_TIME_PRECISION_NANO); assert(toPrecision == TSDB_TIME_PRECISION_MILLI || toPrecision == TSDB_TIME_PRECISION_MICRO || toPrecision == TSDB_TIME_PRECISION_NANO); - double tempResult = (double)time; + double tempResult = (double)timeStamp; switch(fromPrecision) { case TSDB_TIME_PRECISION_MILLI: { switch (toPrecision) { case TSDB_TIME_PRECISION_MILLI: - return time; + return timeStamp; case TSDB_TIME_PRECISION_MICRO: tempResult *= 1000; - time *= 1000; + timeStamp *= 1000; goto end_; case TSDB_TIME_PRECISION_NANO: tempResult *= 1000000; - time *= 1000000; + timeStamp *= 1000000; goto end_; } } // end from milli case TSDB_TIME_PRECISION_MICRO: { switch (toPrecision) { case TSDB_TIME_PRECISION_MILLI: - return time / 1000; + return timeStamp / 1000; case TSDB_TIME_PRECISION_MICRO: - return time; + return timeStamp; case TSDB_TIME_PRECISION_NANO: tempResult *= 1000; - time *= 1000; + timeStamp *= 1000; goto end_; } } //end from micro case TSDB_TIME_PRECISION_NANO: { switch (toPrecision) { case TSDB_TIME_PRECISION_MILLI: - return time / 1000000; + return timeStamp / 1000000; case TSDB_TIME_PRECISION_MICRO: - return time / 1000; + return timeStamp / 1000; case TSDB_TIME_PRECISION_NANO: - return time; + return timeStamp; } } //end from nano default: { assert(0); - return time; // only to pass windows compilation + return timeStamp; // only to pass windows compilation } } //end switch fromPrecision end_: if (tempResult >= (double)INT64_MAX) return INT64_MAX; if (tempResult <= (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL - return time; + return timeStamp; } static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) { @@ -565,15 +565,29 @@ int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision) { return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision)); } +int64_t taosTimeTzOffset(int64_t intervalUnit, int32_t precision) { + int64_t tz_offset = 0; + if (intervalUnit == 'd' || intervalUnit == 'w') { + #if defined(WINDOWS) && _MSC_VER >= 1900 + // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 + int64_t timezone = _timezone; + #endif + tz_offset = -1 * timezone * TSDB_TICK_PER_SECOND(precision); + } + return tz_offset; +} -int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) { +int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t sliding, int64_t slidingUnit, int64_t intervalUnit, int32_t precision) { if (ekey < skey) { int64_t tmp = ekey; ekey = skey; skey = tmp; } - if (unit != 'n' && unit != 'y') { - return (int32_t)((ekey - skey) / interval); + + int64_t tz_offset = taosTimeTzOffset(intervalUnit, precision); + + if (slidingUnit != 'n' && slidingUnit != 'y') { + return (int32_t)((ekey+tz_offset)/sliding - (skey+tz_offset)/sliding) + 1; } skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision)); @@ -588,11 +602,11 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char localtime_r(&t, &tm); int emon = tm.tm_year * 12 + tm.tm_mon; - if (unit == 'y') { - interval *= 12; + if (slidingUnit == 'y') { + sliding *= 12; } - return (emon - smon) / (int32_t)interval; + return (int32_t)(emon/sliding - smon/sliding) + 1; } int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) { @@ -614,56 +628,22 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio if (pInterval->slidingUnit == 'y') { tm.tm_mon = 0; - tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding); + tm.tm_year -= (int)(tm.tm_year%pInterval->sliding); } else { int mon = tm.tm_year * 12 + tm.tm_mon; - mon = (int)(mon / pInterval->sliding * pInterval->sliding); + mon -= (int)(mon % pInterval->sliding); tm.tm_year = mon / 12; tm.tm_mon = mon % 12; } start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision)); } else { - int64_t delta = t - pInterval->interval; - int32_t factor = (delta >= 0) ? 1 : -1; - - start = (delta / pInterval->sliding + factor) * pInterval->sliding; - - if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') { - /* - * here we revised the start time of day according to the local time zone, - * but in case of DST, the start time of one day need to be dynamically decided. - */ - // todo refactor to extract function that is available for Linux/Windows/Mac platform - #if defined(WINDOWS) && _MSC_VER >= 1900 - // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 - int64_t timezone = _timezone; - int32_t daylight = _daylight; - char** tzname = _tzname; - #endif - - start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision)); - } - - int64_t end = 0; - - // not enough time range - if (start < 0 || INT64_MAX - start > pInterval->interval - 1) { - end = start + pInterval->interval - 1; - - while(end < t && ((start + pInterval->sliding) <= INT64_MAX)) { // move forward to the correct time window - start += pInterval->sliding; - - if (start < 0 || INT64_MAX - start > pInterval->interval - 1) { - end = start + pInterval->interval - 1; - } else { - end = INT64_MAX; - break; - } - } - } else { - end = INT64_MAX; - } + // To avoid the overly complicated effect of DST on size of sliding windows, the DST(daylight saving time) is + // better to be respected by users' input and output for display only, maintaining the status quo. + // In this way, the size of sliding windows keeps unchanging during each cycle of processing of requests. + start = t - pInterval->interval + pInterval->sliding; + int64_t tz_offset = taosTimeTzOffset(pInterval->intervalUnit, precision); + start -= (start+tz_offset)%pInterval->sliding; } if (pInterval->offset > 0) { diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index a82149dccb1f71e6fbdc2b62d066f04ff52c251d..d2d9d6d76cf093b1a4d3dcfcb48b3f745e6b7376 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -33,9 +33,9 @@ void* taosLoadDll(const char *filename) { void* taosLoadSym(void* handle, char* name) { void* sym = dlsym(handle, name); - char* error = NULL; + char* err = NULL; - if ((error = dlerror()) != NULL) { + if ((err = dlerror()) != NULL) { uWarn("load sym:%s failed, error:%s", name, dlerror()); return NULL; } diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index aeb7f538ce3c81dc619a124fe31bebd2902ea357..deba5a93e061922c1ac0a75c5fa420d2e43a0fc6 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -50,26 +50,37 @@ ELSE () IF (TD_LINUX) include(ExternalProject) + set(_upx_prefix "$ENV{HOME}/.taos/externals/upx") + ExternalProject_Add(upx + PREFIX "${_upx_prefix}" + URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + ) + ExternalProject_Add(taosadapter PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS taos + DEPENDS taos upx BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND + COMMAND cmake -E echo "building taosadapter ..." COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : + COMMAND ${_upx_prefix}/src/upx/upx taosadapter COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin ) + unset(_upx_prefix) ELSEIF (TD_DARWIN) include(ExternalProject) ExternalProject_Add(taosadapter diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index dac264e4180303a974271b0978449c502c86479d..6af0d2bf0afe2c0fe2289f847a3f4da200cbc7ff 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -637,7 +637,7 @@ static int32_t monBuildMasterUptimeSql(char *sql) { for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "role") == 0) { int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); - if (strncmp((char *)row[i], "master", charLen) == 0) { + if (strncmp((char *)row[i], "leader", charLen) == 0) { if (strcmp(fields[i + 1].name, "role_time") == 0) { int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); //master uptime in seconds @@ -703,8 +703,8 @@ static int32_t monBuildMnodesTotalSql(char *sql) { for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "role") == 0) { int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); - if (strncmp((char *)row[i], "master", charLen) == 0 || - strncmp((char *)row[i], "slave", charLen) == 0) { + if (strncmp((char *)row[i], "leader", charLen) == 0 || + strncmp((char *)row[i], "follower", charLen) == 0) { totalMnodesAlive += 1; } } @@ -719,13 +719,13 @@ static int32_t monBuildMnodesTotalSql(char *sql) { static int32_t monGetVgroupsTotalStats(char *dbName, int32_t *totalVgroups, int32_t *totalVgroupsAlive) { - char subsql[TSDB_DB_NAME_LEN + 14]; + char subsql[TSDB_DB_NAME_LEN + 16]; memset(subsql, 0, sizeof(subsql)); - snprintf(subsql, TSDB_DB_NAME_LEN + 13, "show %s.vgroups", dbName); + snprintf(subsql, sizeof(subsql) - 1, "show `%s`.vgroups", dbName); TAOS_RES *result = taos_query(tsMonitor.conn, subsql); int32_t code = taos_errno(result); if (code != TSDB_CODE_SUCCESS) { - monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code)); + monError("failed to execute cmd: show `%s`.vgroups, reason:%s", dbName, tstrerror(code)); } TAOS_ROW row; @@ -794,8 +794,8 @@ static int32_t monGetVnodesTotalStats(char *ep, int32_t *totalVnodes, for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "status") == 0) { int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); - if (strncmp((char *)row[i], "master", charLen) == 0 || - strncmp((char *)row[i], "slave", charLen) == 0) { + if (strncmp((char *)row[i], "leader", charLen) == 0 || + strncmp((char *)row[i], "follower", charLen) == 0) { *totalVnodesAlive += 1; } } @@ -957,7 +957,7 @@ static int32_t monBuildDnodeVnodesSql(char *sql) { for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "status") == 0) { int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); - if (strncmp((char *)row[i], "master", charLen) == 0) { + if (strncmp((char *)row[i], "leader", charLen) == 0) { masterNum += 1; } } @@ -992,7 +992,7 @@ static int32_t monBuildDnodeMnodeSql(char *sql) { } } else if (strcmp(fields[i].name, "role") == 0) { charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); - if (strncmp((char *)row[i], "master", charLen) == 0) { + if (strncmp((char *)row[i], "leader", charLen) == 0) { if (has_mnode_row) { monHasMnodeMaster = true; } @@ -1110,11 +1110,11 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { int64_t ts = taosGetTimestampUs(); memset(sql, 0, SQL_LENGTH + 1); - snprintf(sql, SQL_LENGTH, "show %s.vgroups", dbName); + snprintf(sql, SQL_LENGTH, "show `%s`.vgroups", dbName); TAOS_RES *result = taos_query(tsMonitor.conn, sql); int32_t code = taos_errno(result); if (code != TSDB_CODE_SUCCESS) { - monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code)); + monError("failed to execute cmd: show `%s`.vgroups, reason:%s", dbName, tstrerror(code)); } TAOS_ROW row; diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index edd13ea96230a609685895cd3952205446f7bb8e..5fc940309c5b996304fe89afa20816826b61b784 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -90,8 +90,17 @@ extern "C" { #define TSDB_FUNC_QSTOP 48 #define TSDB_FUNC_QDURATION 49 #define TSDB_FUNC_HYPERLOGLOG 50 +#define TSDB_FUNC_MIN_ROW 51 +#define TSDB_FUNC_MAX_ROW 52 +#define TSDB_FUNC_COL_DUMMY 53 -#define TSDB_FUNC_MAX_NUM 51 +#define TSDB_FUNC_MAX_NUM 54 + +enum { + FUNC_NOT_VAL, + FUNC_MIN_ROW, + FUNC_MAX_ROW +}; #define TSDB_FUNCSTATE_SO 0x1u // single output #define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM @@ -217,6 +226,10 @@ typedef struct SQLFunctionCtx { SHashObj **pModeSet; // for mode function STimeWindow qWindow; // for _qstart/_qstop/_qduration column int32_t allocRows; // rows allocated for output buffer + int16_t minRowIndex; + int16_t maxRowIndex; + int16_t minMaxRowType; + bool updateIndex; // whether update index after comparation } SQLFunctionCtx; typedef struct SAggFunctionInfo { diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 8091a300b339839f4c0a91f54bc78b97b5bd858d..2a75d4d9923ef53d4b8f3eb5a91e8e3596ca1b08 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -97,9 +97,11 @@ typedef struct SIntervalVal { SStrToken offset; } SIntervalVal; +typedef struct tSqlExpr tSqlExprTimestamp; + typedef struct SRangeVal { - void *start; - void *end; + tSqlExprTimestamp *start; + tSqlExprTimestamp *end; } SRangeVal; typedef struct SSessionWindowVal { diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 42e649be10a9eec75cc1ec610cba86586a0f27c0..d12bb28ab819cad0c041828ceeaa72ea924fe4f1 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -352,7 +352,7 @@ static uint64_t hllCountCnt(uint8_t *buckets) { static uint8_t hllCountNum(void *ele, int32_t elesize, int32_t *buk) { uint64_t hash = MurmurHash3_64(ele,elesize); - int32_t index = hash & HLL_BUCKET_MASK; + int32_t idx = hash & HLL_BUCKET_MASK; hash >>= HLL_BUCKET_BITS; hash |= ((uint64_t)1<buckets[index]; + int32_t idx = 0; + uint8_t count = hllCountNum(val,elesize,&idx); + uint8_t oldcount = pHLLInfo->buckets[idx]; if (count > oldcount) { - pHLLInfo->buckets[index] = count; + pHLLInfo->buckets[idx] = count; } } GET_RES_INFO(pCtx)->numOfRes = 1; @@ -419,8 +419,8 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI assert(functionId != TSDB_FUNC_SCALAR_EXPR); if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY || - functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ || - functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) + functionId == TSDB_FUNC_COL_DUMMY || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || + functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) { *type = (int16_t)dataType; *bytes = dataBytes; @@ -522,6 +522,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = (dataBytes + DATA_SET_FLAG_SIZE); *interBytes = *bytes; + return TSDB_CODE_SUCCESS; + } else if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = (dataBytes + DATA_SET_FLAG_SIZE); + *interBytes = *bytes; + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_SUM) { *type = TSDB_DATA_TYPE_BINARY; @@ -680,6 +686,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = (int16_t)dataType; *bytes = dataBytes; *interBytes = dataBytes + DATA_SET_FLAG_SIZE; + } else if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + *type = (int16_t)dataType; + *bytes = dataBytes; + *interBytes = dataBytes + DATA_SET_FLAG_SIZE; } else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) { *type = (int16_t)dataType; *bytes = dataBytes; @@ -1001,6 +1011,7 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { #define UPDATE_DATA(ctx, left, right, num, sign, k) \ do { \ if (((left) < (right)) ^ (sign)) { \ + (ctx)->updateIndex = true; \ (left) = (right); \ DO_UPDATE_TAG_COLUMNS(ctx, k); \ (num) += 1; \ @@ -1017,13 +1028,27 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { } while (0) #define LOOPCHECK_N(val, list, ctx, tsdbType, sign, num) \ + int32_t updateCount = 0; \ for (int32_t i = 0; i < ((ctx)->size); ++i) { \ if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \ continue; \ } \ TSKEY key = (ctx)->ptsList != NULL? GET_TS_DATA(ctx, i):0; \ + (ctx)->updateIndex = false; \ UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \ - } + if (!(ctx)->preAggVals.isSet) { \ + if ((ctx)->updateIndex) { \ + if (sign && (ctx)->preAggVals.statis.minIndex != i) { \ + (ctx)->preAggVals.statis.minIndex = i; \ + } \ + if (!sign && (ctx)->preAggVals.statis.maxIndex != i) { \ + (ctx)->preAggVals.statis.maxIndex = i; \ + } \ + updateCount++; \ + } \ + } \ + } \ + (ctx)->updateIndex = updateCount > 0 ? true : false; \ #define TYPED_LOOPCHECK_N(type, data, list, ctx, tsdbType, sign, notNullElems) \ do { \ @@ -1363,14 +1388,14 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } void* tval = NULL; - int16_t index = 0; + int16_t idx = 0; if (isMin) { tval = &pCtx->preAggVals.statis.min; - index = pCtx->preAggVals.statis.minIndex; + idx = pCtx->preAggVals.statis.minIndex; } else { tval = &pCtx->preAggVals.statis.max; - index = pCtx->preAggVals.statis.maxIndex; + idx = pCtx->preAggVals.statis.maxIndex; } TSKEY key = TSKEY_INITIAL_VAL; @@ -1381,12 +1406,12 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, * * The following codes of 3 lines will be removed later. */ -// if (index < 0 || index >= pCtx->size + pCtx->startOffset) { -// index = 0; +// if (idx < 0 || idx >= pCtx->size + pCtx->startOffset) { +// idx = 0; // } - // the index is the original position, not the relative position - key = pCtx->ptsList[index]; + // the idx is the original position, not the relative position + key = pCtx->ptsList[idx]; } if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { @@ -1406,6 +1431,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, #endif if ((*data < val) ^ isMin) { + pCtx->updateIndex = true; *data = (int32_t)val; for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) { SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i]; @@ -1465,13 +1491,17 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *pData = p; int32_t *retVal = (int32_t*) pOutput; + int32_t updateCount = 0; for (int32_t i = 0; i < pCtx->size; ++i) { if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) { continue; } + pCtx->updateIndex = false; + if ((*retVal < pData[i]) ^ isMin) { + pCtx->updateIndex = true; *retVal = pData[i]; if(tsList) { TSKEY k = tsList[i]; @@ -1479,7 +1509,21 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } } *notNullElems += 1; + + if (!pCtx->preAggVals.isSet) { + if (pCtx->updateIndex) { + if (isMin && pCtx->preAggVals.statis.minIndex != i) { + pCtx->preAggVals.statis.minIndex = i; + } + if (!isMin && pCtx->preAggVals.statis.maxIndex != i) { + pCtx->preAggVals.statis.maxIndex = i; + } + updateCount++; + } + } } + + pCtx->updateIndex = updateCount > 0 ? true : false; #if defined(_DEBUG_VIEW) qDebug("max value updated:%d", *retVal); #endif @@ -1737,6 +1781,152 @@ static void max_func_merge(SQLFunctionCtx *pCtx) { } } +static bool min_row_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { + return false; // not initialized since it has been initialized + } + + GET_TRUE_DATA_TYPE(); + + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)pCtx->pOutput) = INT8_MAX; + break; + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *) pCtx->pOutput = UINT8_MAX; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)pCtx->pOutput) = INT16_MAX; + break; + case TSDB_DATA_TYPE_USMALLINT: + *((uint16_t *)pCtx->pOutput) = UINT16_MAX; + break; + case TSDB_DATA_TYPE_INT: + *((int32_t *)pCtx->pOutput) = INT32_MAX; + break; + case TSDB_DATA_TYPE_UINT: + *((uint32_t *)pCtx->pOutput) = UINT32_MAX; + break; + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)pCtx->pOutput) = INT64_MAX; + break; + case TSDB_DATA_TYPE_UBIGINT: + *((uint64_t *)pCtx->pOutput) = UINT64_MAX; + break; + case TSDB_DATA_TYPE_FLOAT: + *((float *)pCtx->pOutput) = FLT_MAX; + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_VAL(((double *)pCtx->pOutput), DBL_MAX); + break; + default: + qError("illegal data type:%d in min_row query", pCtx->inputType); + } + + return true; +} + +static bool max_row_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { + return false; // not initialized since it has been initialized + } + + GET_TRUE_DATA_TYPE(); + + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)pCtx->pOutput) = INT8_MIN; + break; + case TSDB_DATA_TYPE_UTINYINT: + *((uint8_t *)pCtx->pOutput) = 0; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)pCtx->pOutput) = INT16_MIN; + break; + case TSDB_DATA_TYPE_USMALLINT: + *((uint16_t *)pCtx->pOutput) = 0; + break; + case TSDB_DATA_TYPE_INT: + *((int32_t *)pCtx->pOutput) = INT32_MIN; + break; + case TSDB_DATA_TYPE_UINT: + *((uint32_t *)pCtx->pOutput) = 0; + break; + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)pCtx->pOutput) = INT64_MIN; + break; + case TSDB_DATA_TYPE_UBIGINT: + *((uint64_t *)pCtx->pOutput) = 0; + break; + case TSDB_DATA_TYPE_FLOAT: + *((float *)pCtx->pOutput) = -FLT_MAX; + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_VAL(((double *)pCtx->pOutput), -DBL_MAX); + break; + default: + qError("illegal data type:%d in max_row query", pCtx->inputType); + } + + return true; +} + +static void min_row_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + minMax_function(pCtx, pCtx->pOutput, 1, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + pResInfo->hasResult = DATA_SET_FLAG; + + // set the flag for super table query + if (pCtx->stableQuery) { + *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; + } + } +} + +static void max_row_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + minMax_function(pCtx, pCtx->pOutput, 0, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + pResInfo->hasResult = DATA_SET_FLAG; + + // set the flag for super table query + if (pCtx->stableQuery) { + *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; + } + } +} + +static void min_row_func_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1); + + SET_VAL(pCtx, notNullElems, 1); + + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (notNullElems > 0) { + pResInfo->hasResult = DATA_SET_FLAG; + } +} + +static void max_row_func_merge(SQLFunctionCtx *pCtx) { + int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0); + + SET_VAL(pCtx, numOfElem, 1); + + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + if (numOfElem > 0) { + pResInfo->hasResult = DATA_SET_FLAG; + } +} + #define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \ for (int32_t i = 0; i < (ctx)->size; ++i) { \ if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \ @@ -2065,15 +2255,15 @@ static void first_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { +static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t idx) { int64_t *timestamp = GET_TS_LIST(pCtx); SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes); - if (pInfo->hasResult != DATA_SET_FLAG || timestamp[index] < pInfo->ts) { + if (pInfo->hasResult != DATA_SET_FLAG || timestamp[idx] < pInfo->ts) { memcpy(pCtx->pOutput, pData, pCtx->inputBytes); pInfo->hasResult = DATA_SET_FLAG; - pInfo->ts = timestamp[index]; + pInfo->ts = timestamp[idx]; DO_UPDATE_TAG_COLUMNS(pCtx, pInfo->ts); } @@ -2203,19 +2393,19 @@ static void last_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { +static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t idx) { int64_t *timestamp = GET_TS_LIST(pCtx); SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes); - if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) { + if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[idx]) { #if defined(_DEBUG_VIEW) - qDebug("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData); + qDebug("assign index:%d, ts:%" PRId64 ", val:%d, ", idx, timestamp[idx], *(int32_t *)pData); #endif memcpy(pCtx->pOutput, pData, pCtx->inputBytes); pInfo->hasResult = DATA_SET_FLAG; - pInfo->ts = timestamp[index]; + pInfo->ts = timestamp[idx]; DO_UPDATE_TAG_COLUMNS(pCtx, pInfo->ts); } @@ -2912,7 +3102,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { tMemBucket * pMemBucket = ppInfo->pMemBucket; if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null - assert(ppInfo->numOfElems == 0); + if (ppInfo->stage > 0) + assert(ppInfo->numOfElems == 0); setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } else { SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v)); @@ -3187,12 +3378,12 @@ static bool leastsquares_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo return true; } -#define LEASTSQR_CAL(p, x, y, index, step) \ +#define LEASTSQR_CAL(p, x, y, idx, step) \ do { \ (p)[0][0] += (double)(x) * (x); \ (p)[0][1] += (double)(x); \ - (p)[0][2] += (double)(x) * (y)[index]; \ - (p)[1][2] += (y)[index]; \ + (p)[0][2] += (double)(x) * (y)[idx]; \ + (p)[1][2] += (y)[idx]; \ (x) += step; \ } while (0) @@ -3347,6 +3538,12 @@ static void col_project_function(SQLFunctionCtx *pCtx) { memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes); } else { // DESC + if (pCtx->param[0].i64 == 1) { + // only output one row, copy first row to output + memcpy(pCtx->pOutput, pData, (size_t)pCtx->inputBytes); + return ; + } + for(int32_t i = 0; i < pCtx->size; ++i) { char* dst = pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes; char* src = pData + i * pCtx->inputBytes; @@ -3405,6 +3602,70 @@ static void copy_function(SQLFunctionCtx *pCtx) { assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); } +static char *get_data_by_offset(char *src, int16_t inputType, int32_t inputBytes, int32_t offset) { + char *res = NULL; + + switch (inputType) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + res = (char *) ((int8_t *) src + offset); + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + res = (char *) ((int16_t *) src + offset); + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + res = (char *) ((int32_t *) src + offset); + break; + case TSDB_DATA_TYPE_FLOAT: + res = (char *) ((float *) src + offset); + break; + case TSDB_DATA_TYPE_DOUBLE: + res = (char *) ((double *) src + offset); + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + res = (char *) ((int64_t *) src + offset); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + res = src + offset * inputBytes; + break; + default: { + res = src; + } + } + + return res; +} + +static void row_copy_function(SQLFunctionCtx *pCtx) { + int16_t index; + + if (pCtx->minMaxRowType == FUNC_NOT_VAL || !pCtx->updateIndex) { + return; + } + + if (pCtx->minMaxRowType == FUNC_MIN_ROW) { + index = pCtx->minRowIndex; + } else { + index = pCtx->maxRowIndex; + } + + if (index < 0) { + return; + } + + SET_VAL(pCtx, pCtx->size, 1); + + char *pData = GET_INPUT_DATA_LIST(pCtx); + pData = get_data_by_offset(pData, pCtx->inputType, pCtx->inputBytes, index); + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); +} + static void full_copy_function(SQLFunctionCtx *pCtx) { copy_function(pCtx); @@ -3639,7 +3900,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { SDiffFuncInfo *pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); void *data = GET_INPUT_DATA_LIST(pCtx); - bool isFirstBlock = (pDiffInfo->valueAssigned == false); int32_t notNullElems = 0; @@ -3662,7 +3922,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int32_t diff = (int32_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3688,7 +3948,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int64_t diff = pData[i] - pDiffInfo->i64Prev; if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3714,7 +3974,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { double diff = pData[i] - pDiffInfo->d64Prev; if (diff >= 0 || !pDiffInfo->ignoreNegative) { - SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null + SET_DOUBLE_VAL(pOutput, diff); *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3740,7 +4000,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { float diff = (float)(pData[i] - pDiffInfo->d64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3766,7 +4026,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int16_t diff = (int16_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3792,7 +4052,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int8_t diff = (int8_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3810,39 +4070,31 @@ static void diff_function(SQLFunctionCtx *pCtx) { qError("error input type"); } - // initial value is not set yet - if (!pDiffInfo->valueAssigned || notNullElems <= 0) { - /* - * 1. current block and blocks before are full of null - * 2. current block may be null value - */ - assert(pCtx->hasNull); - } else { + if (notNullElems > 0) { for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); } } - int32_t forwardStep = (isFirstBlock) ? notNullElems : notNullElems; - GET_RES_INFO(pCtx)->numOfRes += forwardStep; + GET_RES_INFO(pCtx)->numOfRes += notNullElems; } } char *getScalarExprColumnData(void *param, const char* name, int32_t colId) { SScalarExprSupport *pSupport = (SScalarExprSupport *)param; - int32_t index = -1; + int32_t idx = -1; for (int32_t i = 0; i < pSupport->numOfCols; ++i) { if (colId == pSupport->colList[i].colId) { - index = i; + idx = i; break; } } - assert(index >= 0); - return pSupport->data[index] + pSupport->offset * pSupport->colList[index].bytes; + assert(idx >= 0); + return pSupport->data[idx] + pSupport->offset * pSupport->colList[idx].bytes; } static void scalar_expr_function(SQLFunctionCtx *pCtx) { @@ -4051,14 +4303,14 @@ static double twa_get_area(SPoint1 s, SPoint1 e) { return val; } -static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t size) { +static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t idx, int32_t size) { int32_t notNullElems = 0; SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); TSKEY *tsList = GET_TS_LIST(pCtx); - int32_t i = index; + int32_t i = idx; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); SPoint1* last = &pInfo->p; @@ -4069,7 +4321,7 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si assert(last->key == INT64_MIN); last->key = tsList[i]; - GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, index)); + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, idx)); pInfo->dOutput += twa_get_area(pCtx->start, *last); @@ -4079,7 +4331,7 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si i += step; } else if (pInfo->p.key == INT64_MIN) { last->key = tsList[i]; - GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, index)); + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, idx)); pInfo->hasResult = DATA_SET_FLAG; pInfo->win.skey = last->key; @@ -5018,13 +5270,13 @@ static void mavg_function(SQLFunctionCtx *pCtx) { ////////////////////////////////////////////////////////////////////////////////// // Sample function with reservoir sampling algorithm -static void assignResultSample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t index, int64_t ts, void *pData, uint16_t type, int16_t bytes, char *inputTags) { - assignVal(pInfo->values + index*bytes, pData, bytes, type); - *(pInfo->timeStamps + index) = ts; +static void assignResultSample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t idx, int64_t ts, void *pData, uint16_t type, int16_t bytes, char *inputTags) { + assignVal(pInfo->values + idx*bytes, pData, bytes, type); + *(pInfo->timeStamps + idx) = ts; SExtTagsInfo* pTagInfo = &pCtx->tagInfo; int32_t posTag = 0; - char* tags = pInfo->taglists + index*pTagInfo->tagsLen; + char* tags = pInfo->taglists + idx*pTagInfo->tagsLen; if (pCtx->currentStage == MERGE_STAGE) { assert(inputTags != NULL); memcpy(tags, inputTags, (size_t)pTagInfo->tagsLen); @@ -6055,8 +6307,8 @@ int32_t functionCompatList[] = { 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, // tid_tag, deriv, csum, mavg, sample, block_info, elapsed, histogram, unique, mode, tail 6, 8, -1, -1, -1, 7, 1, -1, -1, 1, -1, - // stateCount, stateDuration, wstart, wstop, wduration, qstart, qstop, qduration, hyperloglog - 1, 1, 1, 1, 1, 1, 1, 1, 1, + // stateCount, stateDuration, wstart, wstop, wduration, qstart, qstop, qduration, hyperloglog, min_row, max_row + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ @@ -6673,5 +6925,41 @@ SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ hll_func_finalizer, hll_func_merge, dataBlockRequired, + }, + { + // 51 + "min_row", + TSDB_FUNC_MIN_ROW, + TSDB_FUNC_MIN_ROW, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + min_row_func_setup, + min_row_function, + function_finalizer, + min_row_func_merge, + dataBlockRequired, + }, + { + // 52 + "max_row", + TSDB_FUNC_MAX_ROW, + TSDB_FUNC_MAX_ROW, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, + max_row_func_setup, + max_row_function, + function_finalizer, + max_row_func_merge, + dataBlockRequired, + }, + { + // 53 + "col_dummy", + TSDB_FUNC_COL_DUMMY, + TSDB_FUNC_COL_DUMMY, + TSDB_BASE_FUNC_SO, + function_setup, + row_copy_function, + doFinalizer, + copy_function, + noDataRequired, } }; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f8a1871f53309799d58aa4266c0fb5d737fa8ce7..6ba582138c2aecbad0c0dde95177b67a4a0d72fb 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -413,7 +413,7 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput continue; } - if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { + if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY || functId == TSDB_FUNC_COL_DUMMY) { hasTags = true; continue; } @@ -437,7 +437,7 @@ static bool isScalarWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) { continue; } - if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { + if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY || functId == TSDB_FUNC_COL_DUMMY) { hasTags = true; continue; } @@ -519,9 +519,9 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult pResultRowInfo->curPos = 0; } else { // check if current pResultRowInfo contains the existed pResultRow SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo); - int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes)); - if (index != NULL) { - pResultRowInfo->curPos = (int32_t) *index; + int64_t* idx = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes)); + if (idx != NULL) { + pResultRowInfo->curPos = (int32_t) *idx; existed = true; } else { existed = false; @@ -557,9 +557,9 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult pResultRowInfo->curPos = pResultRowInfo->size; pResultRowInfo->pResult[pResultRowInfo->size++] = pResult; - int64_t index = pResultRowInfo->curPos; + int64_t idx = pResultRowInfo->curPos; SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo); - taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES); + taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &idx, POINTER_BYTES); } // too many time window in query @@ -633,7 +633,7 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t /* * query border check, skey should not be bounded by the query time range, since the value skey will - * be used as the time window index value. So we only change ekey of time window accordingly. + * be used as the time window idx value. So we only change ekey of time window accordingly. */ if (w.ekey > pQueryAttr->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) { w.ekey = pQueryAttr->window.ekey; @@ -945,6 +945,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; + int16_t minRowIndex = -1, maxRowIndex = -1; + bool updateIndex = false; + int32_t minMaxRowColIndex = -1; + int16_t minMaxRowType = FUNC_NOT_VAL; for (int32_t k = 0; k < numOfOutput; ++k) { bool hasAggregates = pCtx[k].preAggVals.isSet; @@ -977,7 +981,39 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); } else if (!TSDB_FUNC_IS_SCALAR(functionId)){ + if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + if (minMaxRowColIndex == -1) { + minMaxRowColIndex = k; + } + + if (functionId == TSDB_FUNC_MIN_ROW) { + minMaxRowType = FUNC_MIN_ROW; + } else { + minMaxRowType = FUNC_MAX_ROW; + } + + pCtx[k].updateIndex = false; + } else { + pCtx[k].minRowIndex = minRowIndex; + pCtx[k].maxRowIndex = maxRowIndex; + pCtx[k].updateIndex = updateIndex; + pCtx[k].minMaxRowType = minMaxRowType; + } + aAggs[functionId].xFunction(&pCtx[k]); + + if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + updateIndex = pCtx[k].updateIndex; + + // find the minIndex or maxIndex of this column to detemine the index of other columns + if (functionId == TSDB_FUNC_MIN_ROW) { + minRowIndex = pCtx[k].preAggVals.statis.minIndex; + } + + if (functionId == TSDB_FUNC_MAX_ROW) { + maxRowIndex = pCtx[k].preAggVals.statis.maxIndex; + } + } } else { assert(0); } @@ -992,6 +1028,58 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx pCtx[k].preAggVals.isSet = hasAggregates; pCtx[k].pInput = start; } + + // update the indices of columns before the one in min_row/max_row + if (updateIndex) { + for (int32_t k = 0; k < minMaxRowColIndex; ++k) { + bool hasAggregates = pCtx[k].preAggVals.isSet; + + pCtx[k].size = forwardStep; + pCtx[k].startTs = pWin->skey; + pCtx[k].endTs = pWin->ekey; + + // keep it temporarialy + char* start = pCtx[k].pInput; + + int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1); + if (pCtx[k].pInput != NULL) { + pCtx[k].pInput = (char *)pCtx[k].pInput + pos * pCtx[k].inputBytes; + } + + if (tsCol != NULL) { + pCtx[k].ptsList = &tsCol[pos]; + } + + // not a whole block involved in query processing, statistics data can not be used + // NOTE: the original value of isSet have been changed here + if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) { + pCtx[k].preAggVals.isSet = false; + } + + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { + int32_t functionId = pCtx[k].functionId; + if (functionId != TSDB_FUNC_COL_DUMMY) { + continue; + } + + pCtx[k].minRowIndex = minRowIndex; + pCtx[k].maxRowIndex = maxRowIndex; + pCtx[k].updateIndex = updateIndex; + pCtx[k].minMaxRowType = minMaxRowType; + + aAggs[functionId].xFunction(&pCtx[k]); + + pCtx[k].minRowIndex = -1; + pCtx[k].maxRowIndex = -1; + pCtx[k].updateIndex = false; + pCtx[k].minMaxRowType = FUNC_NOT_VAL; + } + + // restore it + pCtx[k].preAggVals.isSet = hasAggregates; + pCtx[k].pInput = start; + } + } } @@ -1233,6 +1321,10 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunctionCtx* pCtx, SSDataBlock* pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + int16_t minRowIndex = -1, maxRowIndex = -1; + bool updateIndex = false; + int32_t minMaxRowColIndex = -1; + int16_t minMaxRowType = FUNC_NOT_VAL; for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { @@ -1243,7 +1335,39 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); } else if (!TSDB_FUNC_IS_SCALAR(functionId)){ + if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + if (minMaxRowColIndex == -1) { + minMaxRowColIndex = k; + } + + if (functionId == TSDB_FUNC_MIN_ROW) { + minMaxRowType = FUNC_MIN_ROW; + } else { + minMaxRowType = FUNC_MAX_ROW; + } + + pCtx[k].updateIndex = false; + } else { + pCtx[k].minRowIndex = minRowIndex; + pCtx[k].maxRowIndex = maxRowIndex; + pCtx[k].updateIndex = updateIndex; + pCtx[k].minMaxRowType = minMaxRowType; + } + aAggs[functionId].xFunction(&pCtx[k]); + + if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) { + updateIndex = pCtx[k].updateIndex; + + // find the minIndex or maxIndex of this column to detemine the index of other columns + if (functionId == TSDB_FUNC_MIN_ROW) { + minRowIndex = pCtx[k].preAggVals.statis.minIndex; + } + + if (functionId == TSDB_FUNC_MAX_ROW) { + maxRowIndex = pCtx[k].preAggVals.statis.maxIndex; + } + } } else { assert(0); } @@ -1254,6 +1378,32 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction } } } + + // update the indices of columns before the one in min_row/max_row + if (updateIndex) { + for (int32_t k = 0; k < minMaxRowColIndex; ++k) { + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { + pCtx[k].startTs = startTs; + + int32_t functionId = pCtx[k].functionId; + if (functionId != TSDB_FUNC_COL_DUMMY) { + continue; + } + + pCtx[k].minRowIndex = minRowIndex; + pCtx[k].maxRowIndex = maxRowIndex; + pCtx[k].updateIndex = updateIndex; + pCtx[k].minMaxRowType = minMaxRowType; + + aAggs[functionId].xFunction(&pCtx[k]); + + pCtx[k].minRowIndex = -1; + pCtx[k].maxRowIndex = -1; + pCtx[k].updateIndex = false; + pCtx[k].minMaxRowType = FUNC_NOT_VAL; + } + } + } } static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { @@ -1293,8 +1443,8 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, } SColIndex * pColIndex = &pExpr[k].base.colInfo; - int16_t index = pColIndex->colIndex; - SColumnInfoData *pColInfo = taosArrayGet(pDataBlock, index); + int16_t idx = pColIndex->colIndex; + SColumnInfoData *pColInfo = taosArrayGet(pDataBlock, idx); assert(pColInfo->info.colId <= TSDB_RES_COL_ID || (pColInfo->info.colId >= 0 && pColInfo->info.colId == pColIndex->colId)); double v1 = 0, v2 = 0, v = 0; @@ -1302,7 +1452,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, if (functionId == TSDB_FUNC_INTERP) { if (type == RESULT_ROW_START_INTERP) { if (prevRowIndex == -1) { - COPY_DATA(&pCtx[k].start.val, (char *)pRuntimeEnv->prevRow[index]); + COPY_DATA(&pCtx[k].start.val, (char *)pRuntimeEnv->prevRow[idx]); } else { COPY_DATA(&pCtx[k].start.val, (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes); } @@ -1311,7 +1461,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { if (prevRowIndex == -1) { - pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index]; + pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[idx]; } else { pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes; } @@ -1319,7 +1469,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, } else { if (curRowIndex == -1) { - COPY_DATA(&pCtx[k].end.val, pRuntimeEnv->prevRow[index]); + COPY_DATA(&pCtx[k].end.val, pRuntimeEnv->prevRow[idx]); } else { COPY_DATA(&pCtx[k].end.val, (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes); } @@ -1334,7 +1484,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, assert(curTs != windowKey); if (prevRowIndex == -1) { - GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pRuntimeEnv->prevRow[index]); + GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pRuntimeEnv->prevRow[idx]); } else { GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes); } @@ -1554,7 +1704,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul while (1) { int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &nextWin, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); - if (startPos < 0) { + if (startPos < 0 || startPos >= pSDataBlock->info.rows) { break; } @@ -1588,6 +1738,8 @@ static bool initGroupbyInfo(const SSDataBlock *pSDataBlock, const SGroupbyExpr * return true; } pInfo->pGroupbyDataInfo = taosArrayInit(pGroupbyExpr->numOfGroupCols, sizeof(SGroupbyDataInfo)); + // head put key length (int32_t type) + pInfo->totalBytes = sizeof(int32_t); for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k); @@ -1624,7 +1776,8 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI *buf = NULL; return; } - *buf = p; + *buf = p; + p += sizeof(int32_t); for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) { SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i); @@ -1646,26 +1799,22 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); p += strlen(MULTI_KEY_DELIM); } + + // calc keyLen and save + int32_t keyLen = (p - *buf) - sizeof(int32_t); + *(int32_t *)(*buf) = keyLen; } static bool isGroupbyKeyEqual(void *a, void *b, void *ext) { - SGroupbyOperatorInfo *pInfo = (SGroupbyOperatorInfo *)ext; - if (memcmp(a, b, pInfo->totalBytes) == 0) { - return true; + int32_t len1 = *(int32_t *)a; + int32_t len2 = *(int32_t *)b; + if (len1 != len2) { + return false; } - int32_t offset = 0; - for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) { - SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i); + char *a1 = (char *)a + sizeof(int32_t); + char *b1 = (char *)b + sizeof(int32_t); - char *k1 = (char *)a + offset; - char *k2 = (char *)b + offset; - if (getComparFunc(pDataInfo->type, 0)(k1, k2) != 0) { - return false; - } - offset += pDataInfo->bytes; - offset += (int32_t)strlen(MULTI_KEY_DELIM); - } - return true; + return memcmp(a1, b1, len1) == 0; } static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { @@ -1708,12 +1857,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex); + char *preKey = pInfo->prevData + sizeof(int32_t); + int32_t keyLen = *(int32_t *)pInfo->prevData; + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } - doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + int32_t offset = QUERY_IS_ASC_QUERY(pQueryAttr) ? j - num : j - 1; + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, offset, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); num = 1; @@ -1729,11 +1881,14 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex); + char *preKey = pInfo->prevData + sizeof(int32_t); + int32_t keyLen = *(int32_t *)pInfo->prevData; + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } - doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + int32_t offset = QUERY_IS_ASC_QUERY(pQueryAttr) ? pSDataBlock->info.rows - num : pSDataBlock->info.rows - 1; + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, offset, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); } } @@ -1944,7 +2099,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { continue; } - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { //ts_select ts,top(col,2) + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) { //ts_select ts,top(col,2) tagLen += pCtx[i].outputBytes; pTagCtx[num++] = &pCtx[i]; } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { @@ -2019,6 +2174,9 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr pCtx->end.key = INT64_MIN; pCtx->startTs = INT64_MIN; + pCtx->minRowIndex = -1; + pCtx->maxRowIndex = -1; + pCtx->qWindow = pQueryAttr->window; pCtx->allocRows = numOfRows; @@ -2656,6 +2814,7 @@ static bool notContainSessionOrStateWindow(SQueryAttr *pQueryAttr) { return !(pQ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { bool hasFirstLastFunc = false; bool hasOtherFunc = false; + bool hasCount = false; if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) { return status; @@ -2671,6 +2830,8 @@ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { hasFirstLastFunc = true; + } else if(functionId == TSDB_FUNC_COUNT) { + hasCount = true; } else { hasOtherFunc = true; } @@ -2678,7 +2839,7 @@ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) { if(!hasOtherFunc) { - return BLK_DATA_DISCARD; + return hasCount ? BLK_DATA_NO_NEEDED : BLK_DATA_DISCARD; } else { return BLK_DATA_ALL_NEEDED; } @@ -3499,7 +3660,7 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt continue; } - // todo use tag column index to optimize performance + // todo use tag column idx to optimize performance GET_JSON_KEY(pLocalExprInfo) doSetTagValueInParam(pTable, param, paramLen, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType, pLocalExprInfo->base.resBytes); @@ -3517,7 +3678,7 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt offset += pLocalExprInfo->base.resBytes; } - //todo : use index to avoid iterator all possible output columns + //todo : use idx to avoid iterator all possible output columns if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { setParamForStableStddev(pRuntimeEnv, pCtx, numOfOutput, pExprInfo); } @@ -4310,14 +4471,15 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction // find colid in dataBlock int32_t bytes, offset = 0; char* val = NULL; + char* prevData = pInfo->prevData + sizeof(int32_t); // head is key length (int32_t type) for (int32_t idx = 0; idx < taosArrayGetSize(pInfo->pGroupbyDataInfo); idx++) { SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, idx); if (pDataInfo->index == pExpr1->colInfo.colId) { bytes = pDataInfo->bytes; - val = pInfo->prevData + offset; + val = prevData + offset; break; } - offset += pDataInfo->bytes; + offset += pDataInfo->bytes + strlen(MULTI_KEY_DELIM); // multi value split by MULTI_KEY_DELIM } if (val == NULL) { continue; } @@ -4828,10 +4990,10 @@ void queryCostStatis(SQInfo *pQInfo) { // TSKEY key = pTableQueryInfo->win.skey; // // pWindowResInfo->prevSKey = tw.skey; -// int32_t index = pRuntimeEnv->resultRowInfo.curIndex; +// int32_t idx = pRuntimeEnv->resultRowInfo.curIndex; // // int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); -// pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index +// pRuntimeEnv->resultRowInfo.curIndex = idx; // restore the window idx // // qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, // GET_QID(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, @@ -5158,15 +5320,17 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr } static void doTableQueryInfoTimeWindowCheck(SQueryAttr* pQueryAttr, STableQueryInfo* pTableQueryInfo) { + // + // current subscribe can not ensure pTableQueryInfo->lastKey >= pTableQueryInfo->win.skey, so remove this condition check + // reason is subscribe calc query windows skey is all child table smallest skey, so bigest child table block->skey maybe large than this table's pTableQueryInfo->win.skey + // if (QUERY_IS_ASC_QUERY(pQueryAttr)) { assert( (pTableQueryInfo->win.skey <= pTableQueryInfo->win.ekey) && - (pTableQueryInfo->lastKey >= pTableQueryInfo->win.skey) && (pTableQueryInfo->win.skey >= pQueryAttr->window.skey && pTableQueryInfo->win.ekey <= pQueryAttr->window.ekey)); } else { assert( (pTableQueryInfo->win.skey >= pTableQueryInfo->win.ekey) && - (pTableQueryInfo->lastKey <= pTableQueryInfo->win.skey) && (pTableQueryInfo->win.skey <= pQueryAttr->window.skey && pTableQueryInfo->win.ekey >= pQueryAttr->window.ekey)); } } @@ -5268,6 +5432,28 @@ static SSDataBlock* doTableScanImpl(void* param, bool* newgroup) { break; } + // check windows condition + if (pBlock->info.window.skey != INT64_MIN && pBlock->info.window.skey != INT64_MAX && + pBlock->info.window.ekey != INT64_MIN && pBlock->info.window.ekey != INT64_MAX) { + // normal block not specail block like last_row + int64_t skey = (*pTableQueryInfo)->win.skey; + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + // ASC + if ( skey > pBlock->info.window.ekey ) { + qWarn(" pTableQueryInfo skey(%" PRId64 ") > pBlock ekey(%" PRId64 "), so remove this block. pBlock skey=%" PRId64 " tid=%d", + skey, pBlock->info.window.ekey, pBlock->info.window.skey, pBlock->info.tid); + continue; + } + } else { + // DESC + if ( skey < pBlock->info.window.skey ) { + qWarn(" pTableQueryInfo skey(%" PRId64 ") < pBlock skey(%" PRId64 "), so remove this block. pBlock ekey=%" PRId64 "tid=%d", + skey, pBlock->info.window.skey, pBlock->info.window.ekey, pBlock->info.tid); + continue; + } + } + } + pRuntimeEnv->current = *pTableQueryInfo; doTableQueryInfoTimeWindowCheck(pQueryAttr, *pTableQueryInfo); @@ -5637,15 +5823,15 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) { { numOfCols = (int32_t) taosArrayGetSize(pOrderColumns); for(int32_t i = 0; i < numOfCols; ++i) { - SColIndex* index = taosArrayGet(pOrderColumns, i); + SColIndex* idx = taosArrayGet(pOrderColumns, i); for(int32_t j = 0; j < pQuery->numOfOutput; ++j) { SSqlExpr* pExpr = &pQuery->pExpr1[j].base; int32_t functionId = pExpr->functionId; - if (index->colId == pExpr->colInfo.colId && + if (idx->colId == pExpr->colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { - index->colIndex = j; - index->colId = pExpr->resColId; + idx->colIndex = j; + idx->colId = pExpr->resColId; } } } @@ -5669,24 +5855,24 @@ SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) { } for (int32_t i = 0; i < numOfCols; ++i) { - SColIndex* index = taosArrayGet(pOrderColumns, i); + SColIndex* idx = taosArrayGet(pOrderColumns, i); bool found = false; for(int32_t j = 0; j < pQuery->numOfOutput; ++j) { SSqlExpr* pExpr = &pQuery->pExpr1[j].base; // TSDB_FUNC_TAG_DUMMY function needs to be ignored - if (index->colId == pExpr->colInfo.colId && + if (idx->colId == pExpr->colInfo.colId && ((TSDB_COL_IS_TAG(pExpr->colInfo.flag) && ((pExpr->functionId == TSDB_FUNC_TAG) || (pExpr->functionId == TSDB_FUNC_TAGPRJ))) || (TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && pExpr->functionId == TSDB_FUNC_PRJ))) { - index->colIndex = j; - index->colId = pExpr->resColId; + idx->colIndex = j; + idx->colId = pExpr->resColId; found = true; break; } } - assert(found && index->colIndex >= 0 && index->colIndex < pQuery->numOfOutput); + assert(found && idx->colIndex >= 0 && idx->colIndex < pQuery->numOfOutput); } return pOrderColumns; @@ -5768,8 +5954,8 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, for(int32_t i = 0; i < numOfCols; ++i) { pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; - SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); - offset += pExpr[index->colIndex].base.resBytes; + SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[idx->colIndex].base.resBytes; } numOfCols = (pInfo->groupColumnList != NULL)? (int32_t)taosArrayGetSize(pInfo->groupColumnList):0; @@ -5783,8 +5969,8 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, for(int32_t i = 0; i < numOfCols; ++i) { pInfo->currentGroupColData[i] = (char*)pInfo->currentGroupColData + offset; - SColIndex* index = taosArrayGet(pInfo->groupColumnList, i); - offset += pExpr[index->colIndex].base.resBytes; + SColIndex* idx = taosArrayGet(pInfo->groupColumnList, i); + offset += pExpr[idx->colIndex].base.resBytes; } initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); @@ -5851,8 +6037,8 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx for(int32_t i = 0; i < numOfCols; ++i) { pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; - SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); - offset += pExpr[index->colIndex].base.colBytes; + SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[idx->colIndex].base.colBytes; } } @@ -6490,7 +6676,7 @@ static bool doEveryInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlo STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*)pOperatorInfo->info; SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); - int32_t gidx = pRuntimeEnv->current->groupIndex; + int32_t gindex = pRuntimeEnv->current->groupIndex; SQLFunctionCtx* pCtx = NULL; *needApply = false; @@ -6696,7 +6882,7 @@ static bool doEveryInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlo group_finished_exit: - qDebug("group idx[%d] interp finished", gidx); + qDebug("group index[%d] interp finished", gindex); if (pQueryAttr->needReverseScan) { pQueryAttr->range.skey = INT64_MIN; @@ -8126,8 +8312,8 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator for(int32_t i = 0; i < numOfCols; ++i) { pInfo->prevRow[i] = (char*)pInfo->prevRow + offset; - SColIndex* index = taosArrayGet(pInfo->orderColumnList, i); - offset += pExpr[index->colIndex].base.resBytes; + SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i); + offset += pExpr[idx->colIndex].base.resBytes; } pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); @@ -8882,7 +9068,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += tListLen(param->pGroupColIndex[i].name); } - //pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx); + //pQueryMsg->orderByIndex = htons(pQueryMsg->orderByIndex); pQueryMsg->groupOrderType = htons(pQueryMsg->groupOrderType); } @@ -9435,11 +9621,11 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t nu pExprs[i].base.resType = pExprs[i].pExpr->resultType; pExprs[i].base.interBytes = 0; } else { - int32_t index = pExprs[i].base.colInfo.colIndex; - assert(prevExpr[index].base.resColId == pExprs[i].base.colInfo.colId); + int32_t idx = pExprs[i].base.colInfo.colIndex; + assert(prevExpr[idx].base.resColId == pExprs[i].base.colInfo.colId); - type = prevExpr[index].base.resType; - bytes = prevExpr[index].base.resBytes; + type = prevExpr[idx].base.resType; + bytes = prevExpr[idx].base.resBytes; int32_t param = (int32_t)pExprs[i].base.param[0].i64; if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, @@ -9470,7 +9656,7 @@ SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pCo pGroupbyExpr->numOfGroupCols = pQueryMsg->numOfGroupCols; pGroupbyExpr->orderType = pQueryMsg->groupOrderType; - //pGroupbyExpr->orderIndex = pQueryMsg->orderByIdx; + //pGroupbyExpr->orderIndex = pQueryMsg->orderByIndex; pGroupbyExpr->columnInfo = taosArrayInit(pQueryMsg->numOfGroupCols, sizeof(SColIndex)); for(int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) { @@ -9780,7 +9966,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; STimeWindow window = pQueryAttr->window; - int32_t index = 0; + int32_t idx = 0; for(int32_t i = 0; i < numOfGroups; ++i) { SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); @@ -9796,7 +9982,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S STableKeyInfo* info = taosArrayGet(pa, j); window.skey = info->lastKey; - void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); + void* buf = (char*) pQInfo->pBuf + idx * sizeof(STableQueryInfo); STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf); if (item == NULL) { goto _cleanup; @@ -9807,7 +9993,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S STableId* id = TSDB_TABLEID(info->pTable); taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES); - index += 1; + idx += 1; } } diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 4a4ae3ca422a5e65e74d1c566dcd4c99de4e83a2..e2c649e99c01d5c8b9f6497eddbaf126e644ce09 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -485,9 +485,9 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, int32_t compare_aRv(SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order) { for (int32_t i = 0; i < numOfCols; ++i) { SColIndex* pColIndex = taosArrayGet(colIndex, i); - int32_t index = pColIndex->colIndex; + int32_t idx = pColIndex->colIndex; - SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index); + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx); assert(pColIndex->colId == pColInfo->info.colId); char* data = pColInfo->pData + rowIndex * pColInfo->info.bytes; @@ -1176,14 +1176,14 @@ void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxE } } -SSchema1* getColumnModelSchema(SColumnModel *pColumnModel, int32_t index) { - assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols); - return &pColumnModel->pFields[index].field; +SSchema1* getColumnModelSchema(SColumnModel *pColumnModel, int32_t idx) { + assert(pColumnModel != NULL && idx >= 0 && idx < pColumnModel->numOfCols); + return &pColumnModel->pFields[idx].field; } -int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t index) { - assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols); - return pColumnModel->pFields[index].offset; +int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t idx) { + assert(pColumnModel != NULL && idx >= 0 && idx < pColumnModel->numOfCols); + return pColumnModel->pFields[idx].offset; } void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t blockCapacity, int32_t s, int32_t e) { @@ -1257,17 +1257,17 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { tfree(pDesc); } -void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) { - assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols); +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t idx, __compar_fn_t compareFn) { + assert(numOfRows > 0 && numOfCols > 0 && idx >= 0 && idx < numOfCols); - int32_t bytes = pSchema[index].bytes; + int32_t bytes = pSchema[idx].bytes; int32_t size = bytes + sizeof(int32_t); char* buf = calloc(1, size * numOfRows); for(int32_t i = 0; i < numOfRows; ++i) { char* dest = buf + size * i; - memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes); + memcpy(dest, ((char*) pCols[idx]) + bytes * i, bytes); *(int32_t*)(dest+bytes) = i; } @@ -1279,7 +1279,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf for(int32_t i = 0; i < numOfCols; ++i) { int32_t bytes1 = pSchema[i].bytes; - if (i == index) { + if (i == idx) { for(int32_t j = 0; j < numOfRows; ++j){ char* src = buf + (j * size); char* dest = ((char*)pCols[i]) + (j * bytes1); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index d83620c78fe0cc87a1fff61b6c58dff9852ecbec..c3524c84754091d988473bdaae78601723825ebf 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -63,8 +63,8 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); // set the primary timestamp column value - int32_t index = pFillInfo->numOfCurrent; - char* val = elePtrAt(data[0], TSDB_KEYSIZE, index); + int32_t idx = pFillInfo->numOfCurrent; + char* val = elePtrAt(data[0], TSDB_KEYSIZE, idx); *(TSKEY*) val = pFillInfo->currentKey; // set the other values @@ -78,11 +78,11 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData continue; } - char* output = elePtrAt(data[i], pCol->col.bytes, index); + char* output = elePtrAt(data[i], pCol->col.bytes, idx); assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); } } else { // no prev value yet, set the value for NULL - setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx); } } else if (pFillInfo->type == TSDB_FILL_NEXT) { char* p = next; @@ -94,11 +94,11 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData continue; } - char* output = elePtrAt(data[i], pCol->col.bytes, index); + char* output = elePtrAt(data[i], pCol->col.bytes, idx); assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); } } else { // no prev value yet, set the value for NULL - setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx); } } else if (pFillInfo->type == TSDB_FILL_LINEAR) { if (prev != NULL && !outOfBound) { @@ -111,7 +111,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData int16_t type = pCol->col.type; int16_t bytes = pCol->col.bytes; - char *val1 = elePtrAt(data[i], pCol->col.bytes, index); + char *val1 = elePtrAt(data[i], pCol->col.bytes, idx); if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) { setNull(val1, pCol->col.type, bytes); continue; @@ -128,7 +128,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData taosGetLinearInterpolationVal(&point, type, &point1, &point2, type, &exceedMax, &exceedMin); } } else { - setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx); } } else { // fill the default value */ for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { @@ -137,12 +137,12 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData continue; } - char* val1 = elePtrAt(data[i], pCol->col.bytes, index); + char* val1 = elePtrAt(data[i], pCol->col.bytes, idx); assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } } - setTagsValue(pFillInfo, data, index); + setTagsValue(pFillInfo, data, idx); pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision); pFillInfo->numOfCurrent++; } @@ -303,11 +303,11 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t numOfTags += 1; bool exists = false; - int32_t index = -1; + int32_t idx = -1; for (int32_t j = 0; j < k; ++j) { if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { exists = true; - index = j; + idx = j; break; } } @@ -323,7 +323,7 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t k += 1; } else { - pColInfo->tagIndex = index; + pColInfo->tagIndex = idx; } } @@ -462,8 +462,8 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, + pFillInfo->interval.intervalUnit, pFillInfo->precision); - numOfRes += 1; if(numOfRes < numOfRows || pFillInfo->currentKey < lastKey) { // set currentKey max pFillInfo->currentKey = tsList[0]; @@ -485,6 +485,15 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) { return false; } +bool validateAlignOfSlidingWindows(TSKEY lastKey, TSKEY currentKey, SFillInfo *pFillInfo) { + if (pFillInfo->interval.slidingUnit != 'n' && pFillInfo->interval.slidingUnit != 'y') { + assert((lastKey-currentKey)%pFillInfo->interval.sliding == 0 && + "Sliding windows not aligned." + "Most likely caused by mismatched timezones between client and/or dnodes"); + } + return true; +} + int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { int64_t* tsList = (int64_t*) pFillInfo->pData[0]; @@ -498,14 +507,16 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma int64_t numOfRes = -1; if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set. TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; + if(!validateAlignOfSlidingWindows(lastKey, pFillInfo->currentKey, pFillInfo)) return 0; numOfRes = taosTimeCountInterval( lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, + pFillInfo->interval.intervalUnit, pFillInfo->precision); - numOfRes += 1; - assert(numOfRes >= numOfRows); + assert(numOfRes >= numOfRows && + "Sliding windows to fill mismatched"); } else { // reach the end of data if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) || (ekey1 > pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) { @@ -516,8 +527,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, + pFillInfo->interval.intervalUnit, pFillInfo->precision); - numOfRes += 1; } return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes; diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index a3421c801bc184a8c2018ee71b978e9ceea7c097..0d9fdb814a5af9df0901cf0c6d61e2a12554cfea 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -975,7 +975,7 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) { return TSDB_CODE_SUCCESS; } -int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool tolower) { +int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool bTolower) { SBufferReader br = tbufInitReader(buf, len, false); uint32_t sType = tbufReadUint32(&br); SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(tType), true, false); @@ -1158,7 +1158,7 @@ int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint3 t = varDataLen(tmp); pvar = varDataVal(tmp); - if (tolower) { + if (bTolower) { strntolower_s(pvar, pvar, (int32_t)t); } break; @@ -2746,7 +2746,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t memset(info->blkUnitRes, 0, sizeof(*info->blkUnitRes) * info->unitNum); for (uint32_t k = 0; k < info->unitNum; ++k) { - int32_t index = -1; + int32_t idx = -1; SFilterComUnit *cunit = &info->cunits[k]; if (FILTER_NO_MERGE_DATA_TYPE(cunit->dataType)) { @@ -2755,16 +2755,16 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t for(int32_t i = 0; i < numOfCols; ++i) { if (pDataStatis[i].colId == cunit->colId) { - index = i; + idx = i; break; } } - if (index == -1) { + if (idx == -1) { continue; } - if (pDataStatis[index].numOfNull <= 0) { + if (pDataStatis[idx].numOfNull <= 0) { if (cunit->optr == TSDB_RELATION_ISNULL) { info->blkUnitRes[k] = -1; rmUnit = 1; @@ -2777,7 +2777,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t continue; } } else { - if (pDataStatis[index].numOfNull == numOfRows) { + if (pDataStatis[idx].numOfNull == numOfRows) { if (cunit->optr == TSDB_RELATION_ISNULL) { info->blkUnitRes[k] = 1; rmUnit = 1; @@ -2796,7 +2796,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t continue; } - SDataStatis* pDataBlockst = &pDataStatis[index]; + SDataStatis* pDataBlockst = &pDataStatis[idx]; void *minVal, *maxVal; float minv = 0; float maxv = 0; @@ -2868,17 +2868,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t for (uint32_t g = 0; g < info->groupNum; ++g) { SFilterGroup *group = &info->groups[g]; + // first is block unint num for a group, following append unitNum blkUnitIdx for this group *unitNum = group->unitNum; all = 0; empty = 0; - + + // save group idx start pointer + uint32_t * pGroupIdx = unitIdx; for (uint32_t u = 0; u < group->unitNum; ++u) { uint32_t uidx = group->unitIdxs[u]; if (info->blkUnitRes[uidx] == 1) { + // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group --(*unitNum); all = 1; continue; } else if (info->blkUnitRes[uidx] == -1) { + // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum *unitNum = 0; empty = 1; break; @@ -2888,6 +2893,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t } if (*unitNum == 0) { + // if unit num is zero, reset unitIdx to start on this group + unitIdx = pGroupIdx; + --info->blkGroupNum; assert(empty || all); @@ -3578,17 +3586,17 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num void *minVal, *maxVal; for (uint32_t k = 0; k < info->colRangeNum; ++k) { - int32_t index = -1; + int32_t idx = -1; SFilterRangeCtx *ctx = info->colRange[k]; for(int32_t i = 0; i < numOfCols; ++i) { if (pDataStatis[i].colId == ctx->colId) { - index = i; + idx = i; break; } } // no statistics data, load the true data block - if (index == -1) { + if (idx == -1) { break; } @@ -3597,13 +3605,13 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num break; } - if (pDataStatis[index].numOfNull <= 0) { + if (pDataStatis[idx].numOfNull <= 0) { if (ctx->isnull && !ctx->notnull && !ctx->isrange) { ret = false; break; } - } else if (pDataStatis[index].numOfNull > 0) { - if (pDataStatis[index].numOfNull == numOfRows) { + } else if (pDataStatis[idx].numOfNull > 0) { + if (pDataStatis[idx].numOfNull == numOfRows) { if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) { ret = false; break; @@ -3617,7 +3625,7 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num } } - SDataStatis* pDataBlockst = &pDataStatis[index]; + SDataStatis* pDataBlockst = &pDataStatis[idx]; SFilterRangeNode *r = ctx->rs; float minv = 0; diff --git a/src/query/src/qHistogram.c b/src/query/src/qHistogram.c index 8544224a647c0497677814ef448498bbf73fab04..752c7b96a5594ea49a2e11332d332ffa4e4aab37 100644 --- a/src/query/src/qHistogram.c +++ b/src/query/src/qHistogram.c @@ -45,15 +45,15 @@ //} // ////min heap -// void tHeapAdjust(SHeapEntry* pEntry, int32_t index, int32_t len) { +// void tHeapAdjust(SHeapEntry* pEntry, int32_t idx, int32_t len) { // SHeapEntry* ptr = NULL; // // int32_t end = len - 1; // -// SHeapEntry p1 = pEntry[index]; -// int32_t next = index; +// SHeapEntry p1 = pEntry[idx]; +// int32_t next = idx; // -// for(int32_t i=index; i<=(end-1)/2; ) { +// for(int32_t i=idx; i<=(end-1)/2; ) { // int32_t lc = (i<<1) + 1; // int32_t rc = (i+1) << 1; // @@ -119,7 +119,7 @@ // } //} -static int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val); +static int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t idx, double val); SHistogramInfo* tHistogramCreate(int32_t numOfEntries) { /* need one redundant slot */ @@ -191,7 +191,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { tSkipListNode* pResNode = SSkipListPut((*pHisto)->pList, entry, &key, 0); SHistBin* pEntry1 = (SHistBin*)pResNode->pData; - pEntry1->index = -1; + pEntry1->idx = -1; tSkipListNode* pLast = NULL; @@ -209,7 +209,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { SLoserTreeInfo* pTree = (*pHisto)->pLoserTree; (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].pData = pResNode; - pEntry1->index = (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].index; + pEntry1->idx = (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].index; // update the loser tree if ((*pHisto)->ordered) { @@ -390,39 +390,39 @@ static void histogramMergeImpl(SHistBin* pHistBin, int32_t* size) { int32_t oldSize = *size; double delta = DBL_MAX; - int32_t index = -1; + int32_t idx = -1; for (int32_t i = 1; i < oldSize; ++i) { double d = pHistBin[i].val - pHistBin[i - 1].val; if (d < delta) { delta = d; - index = i - 1; + idx = i - 1; } } - SHistBin* s1 = &pHistBin[index]; - SHistBin* s2 = &pHistBin[index + 1]; + SHistBin* s1 = &pHistBin[idx]; + SHistBin* s2 = &pHistBin[idx + 1]; double newVal = (s1->val * s1->num + s2->val * s2->num) / (s1->num + s2->num); s1->val = newVal; s1->num = s1->num + s2->num; - memmove(&pHistBin[index + 1], &pHistBin[index + 2], (oldSize - index - 2) * sizeof(SHistBin)); + memmove(&pHistBin[idx + 1], &pHistBin[idx + 2], (oldSize - idx - 2) * sizeof(SHistBin)); (*size) -= 1; #endif } /* optimize this procedure */ -int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { +int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t idx, double val) { #if defined(USE_ARRAYLIST) - int32_t remain = pHisto->numOfEntries - index; + int32_t remain = pHisto->numOfEntries - idx; if (remain > 0) { - memmove(&pHisto->elems[index + 1], &pHisto->elems[index], sizeof(SHistBin) * remain); + memmove(&pHisto->elems[idx + 1], &pHisto->elems[idx], sizeof(SHistBin) * remain); } - assert(index >= 0 && index <= pHisto->maxEntries); + assert(idx >= 0 && idx <= pHisto->maxEntries); - pHisto->elems[index].num = 1; - pHisto->elems[index].val = val; + pHisto->elems[idx].num = 1; + pHisto->elems[idx].val = val; pHisto->numOfEntries += 1; /* we need to merge the slot */ diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index 8428c339f4e89d6a5e988448f3aadadf522102b1..0210888b178c2b73c913cf28a45fa9eb11d8ad12 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -122,54 +122,54 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { int64_t v = 0; GET_TYPED_DATA(v, int64_t, pBucket->type, value); - int32_t index = -1; + int32_t idx = -1; if (v > pBucket->range.i64MaxVal || v < pBucket->range.i64MinVal) { - return index; + return idx; } // divide the value range into 1024 buckets uint64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal; if (span < pBucket->numOfSlots) { int64_t delta = v - pBucket->range.i64MinVal; - index = (delta % pBucket->numOfSlots); + idx = (delta % pBucket->numOfSlots); } else { double slotSpan = (double)span / pBucket->numOfSlots; - index = (int32_t)(((double)v - pBucket->range.i64MinVal) / slotSpan); - if (index == pBucket->numOfSlots) { - index -= 1; + idx = (int32_t)(((double)v - pBucket->range.i64MinVal) / slotSpan); + if (idx == pBucket->numOfSlots) { + idx -= 1; } } - assert(index >= 0 && index < pBucket->numOfSlots); - return index; + assert(idx >= 0 && idx < pBucket->numOfSlots); + return idx; } int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) { uint64_t v = 0; GET_TYPED_DATA(v, uint64_t, pBucket->type, value); - int32_t index = -1; + int32_t idx = -1; if (v > pBucket->range.u64MaxVal || v < pBucket->range.u64MinVal) { - return index; + return idx; } // divide the value range into 1024 buckets uint64_t span = pBucket->range.u64MaxVal - pBucket->range.u64MinVal; if (span < pBucket->numOfSlots) { int64_t delta = v - pBucket->range.u64MinVal; - index = (int32_t) (delta % pBucket->numOfSlots); + idx = (int32_t) (delta % pBucket->numOfSlots); } else { double slotSpan = (double)span / pBucket->numOfSlots; - index = (int32_t)(((double)v - pBucket->range.u64MinVal) / slotSpan); - if (index == pBucket->numOfSlots) { - index -= 1; + idx = (int32_t)(((double)v - pBucket->range.u64MinVal) / slotSpan); + if (idx == pBucket->numOfSlots) { + idx -= 1; } } - assert(index >= 0 && index < pBucket->numOfSlots); - return index; + assert(idx >= 0 && idx < pBucket->numOfSlots); + return idx; } int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { @@ -180,27 +180,27 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { v = GET_DOUBLE_VAL(value); } - int32_t index = -1; + int32_t idx = -1; if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { - return index; + return idx; } // divide a range of [dMinVal, dMaxVal] into 1024 buckets double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (span < pBucket->numOfSlots) { int32_t delta = (int32_t)(v - pBucket->range.dMinVal); - index = (delta % pBucket->numOfSlots); + idx = (delta % pBucket->numOfSlots); } else { double slotSpan = span / pBucket->numOfSlots; - index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); - if (index == pBucket->numOfSlots) { - index -= 1; + idx = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); + if (idx == pBucket->numOfSlots) { + idx -= 1; } } - assert(index >= 0 && index < pBucket->numOfSlots); - return index; + assert(idx >= 0 && idx < pBucket->numOfSlots); + return idx; } static __perc_hash_func_t getHashFunc(int32_t type) { @@ -332,18 +332,18 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { for (int32_t i = 0; i < size; ++i) { char *d = (char *) data + i * bytes; - int32_t index = (pBucket->hashFunc)(pBucket, d); - if (index < 0) { + int32_t idx = (pBucket->hashFunc)(pBucket, d); + if (idx < 0) { continue; } count += 1; - tMemBucketSlot *pSlot = &pBucket->pSlots[index]; + tMemBucketSlot *pSlot = &pBucket->pSlots[idx]; tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type); // ensure available memory pages to allocate - int32_t groupId = getGroupId(pBucket->numOfSlots, index, pBucket->times); + int32_t groupId = getGroupId(pBucket->numOfSlots, idx, pBucket->times); int32_t pageId = -1; if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) { @@ -387,7 +387,7 @@ static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int3 return pMemBucket->pSlots[j].range; } -static bool isIdenticalData(tMemBucket *pMemBucket, int32_t index); +static bool isIdenticalData(tMemBucket *pMemBucket, int32_t idx); static double getIdenticalDataVal(tMemBucket* pMemBucket, int32_t slotIndex) { assert(isIdenticalData(pMemBucket, slotIndex)); @@ -532,8 +532,8 @@ double getPercentile(tMemBucket *pMemBucket, double percent) { /* * check if data in one slot are all identical only need to compare with the bounding box */ -bool isIdenticalData(tMemBucket *pMemBucket, int32_t index) { - tMemBucketSlot *pSeg = &pMemBucket->pSlots[index]; +bool isIdenticalData(tMemBucket *pMemBucket, int32_t idx) { + tMemBucketSlot *pSeg = &pMemBucket->pSlots[idx]; if (IS_FLOAT_TYPE(pMemBucket->type)) { return fabs(pSeg->range.dMaxVal - pSeg->range.dMinVal) < DBL_EPSILON; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index 95c7f81ed68d0ef8f303ee45deda89e347d163d9..eda920063f503d1795f4a5cf86cd925c67501140 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -126,9 +126,9 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo* for (int32_t i = 0; i < numOfCols; ++i) { SColumn* pCol = taosArrayGetP(tableCols, i); - SColumnIndex index = {.tableIndex = 0, .columnIndex = pCol->columnIndex}; - STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, index.tableIndex); - SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes, + SColumnIndex idx = {.tableIndex = 0, .columnIndex = pCol->columnIndex}; + STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, idx.tableIndex); + SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &idx, pCol->info.type, pCol->info.bytes, pCol->info.colId, 0, TSDB_COL_NORMAL); strncpy(p->base.aliasName, pSchema[pCol->columnIndex].name, tListLen(p->base.aliasName)); diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index cee5130651c6c8ff9e2db321592dc73503454da3..fe459ee460bbbcc0072f647b88f9a9ef51117a2a 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -857,8 +857,8 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) { return pList; } -SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { - if (pList == NULL || pVar == NULL || index >= taosArrayGetSize(pList)) { +SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t idx) { + if (pList == NULL || pVar == NULL || idx >= taosArrayGetSize(pList)) { return tVariantListAppend(NULL, pVar, sortOrder); } @@ -867,7 +867,7 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int item.pVar = *pVar; item.sortOrder = sortOrder; - taosArrayInsert(pList, index, &item); + taosArrayInsert(pList, idx, &item); return pList; } @@ -878,7 +878,8 @@ SRelationInfo *setTableNameList(SRelationInfo* pRelationInfo, SStrToken *pName, } pRelationInfo->type = SQL_NODE_FROM_TABLELIST; - SRelElementPair p = {.tableName = *pName}; + SRelElementPair p; + p.tableName = *pName; if (pAlias != NULL) { p.aliasName = *pAlias; } else { @@ -917,7 +918,8 @@ SRelationInfo* addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrT pRelationInfo->type = SQL_NODE_FROM_SUBQUERY; - SRelElementPair p = {.pSubquery = pSub}; + SRelElementPair p; + p.pSubquery = pSub; if (pAlias != NULL) { p.aliasName = *pAlias; } else { @@ -1181,6 +1183,10 @@ void destroySqlNode(SSqlNode *pSqlNode) { pSqlNode->fillType = NULL; tSqlExprDestroy(pSqlNode->pHaving); + + tSqlExprDestroy(pSqlNode->pRange.start); + tSqlExprDestroy(pSqlNode->pRange.end); + free(pSqlNode); } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 1628c2d5111268111ca88cbe511095e8334453ed..60ae900eb75d14e1d84100898154c593a00c1020 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -183,9 +183,9 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 } // TODO refactor: use macro -SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t* offset) { - assert(index >= 0 && offset != NULL); - return (SResultRowCellInfo*)((char*) pRow->pCellInfo + offset[index]); +SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t idx, int32_t* offset) { + assert(idx >= 0 && offset != NULL); + return (SResultRowCellInfo*)((char*) pRow->pCellInfo + offset[idx]); } size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) { diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index b2941a8fe00511902165403bd0e36f6cad8219af..7187637b705a0adb39b1da08a90989964139c746 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -320,7 +320,6 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex pthread_mutex_lock(&pQInfo->lock); - assert(pQInfo->rspContext == NULL); if (pQInfo->dataReady == QUERY_RESULT_READY) { *buildRes = true; qDebug("QInfo:0x%"PRIx64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQueryAttr->resultRowSize, @@ -597,7 +596,7 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { //kill by qid int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) { - int32_t error = TSDB_CODE_SUCCESS; + int32_t err = TSDB_CODE_SUCCESS; void** handle = qAcquireQInfo(pMgmt, qId); if(handle == NULL) return terrno; @@ -613,13 +612,13 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo while (pQInfo->owner != 0) { taosMsleep(waitMs); if(loop++ > waitCount){ - error = TSDB_CODE_FAILED; + err = TSDB_CODE_FAILED; break; } } qReleaseQInfo(pMgmt, (void **)&handle, true); - return error; + return err; } // local struct @@ -643,7 +642,7 @@ static int compareLongQuery(const void* p1, const void* p2) { } // callback for taosCacheRefresh -static void cbFoundItem(void* handle, void* param1) { +static void cbFoundLongQuery(void* handle, void* param1) { SQInfo * qInfo = *(SQInfo**) handle; if(qInfo == NULL) return ; SArray* qids = (SArray*) param1; @@ -655,7 +654,7 @@ static void cbFoundItem(void* handle, void* param1) { SMemTable* imem = qInfo->query.memRef.snapshot.imem; if(mem == NULL || T_REF_VAL_GET(mem) == 0) usedMem = false; - if(imem == NULL || T_REF_VAL_GET(mem) == 0) + if(imem == NULL || T_REF_VAL_GET(imem) == 0) usedIMem = false ; if(!usedMem && !usedIMem) @@ -676,7 +675,7 @@ void* qObtainLongQuery(void* param){ SArray* qids = taosArrayInit(4, sizeof(int64_t*)); if(qids == NULL) return NULL; // Get each item - taosCacheRefresh(qMgmt->qinfoPool, cbFoundItem, qids); + taosCacheRefresh(qMgmt->qinfoPool, cbFoundLongQuery, qids); size_t cnt = taosArrayGetSize(qids); if(cnt == 0) { diff --git a/src/rpc/inc/rpcTcp.h b/src/rpc/inc/rpcTcp.h index 6ef8fc2d921a3379532bbc0efd2f226ef3389fc5..a47fa39ceb3b3a642c7839902885cbc8728abe16 100644 --- a/src/rpc/inc/rpcTcp.h +++ b/src/rpc/inc/rpcTcp.h @@ -32,6 +32,8 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin void taosCloseTcpConnection(void *chandle); int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chandle); +SOCKET taosGetFdID(void *chandle); + #ifdef __cplusplus } #endif diff --git a/src/rpc/src/rpcCache.c b/src/rpc/src/rpcCache.c index 60a12c26b78626ed81cbc182d76c836c6ee74498..d18aa12c13ef30b62dc286008008a54f0de5778e 100644 --- a/src/rpc/src/rpcCache.c +++ b/src/rpc/src/rpcCache.c @@ -49,7 +49,7 @@ static int rpcHashConn(void *handle, char *fqdn, uint16_t port, int8_t connType static void rpcLockCache(int64_t *lockedBy); static void rpcUnlockCache(int64_t *lockedBy); static void rpcCleanConnCache(void *handle, void *tmrId); -static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t time); +static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t timeStamp); void *rpcOpenConnCache(int maxSessions, void (*cleanFp)(void *), void *tmrCtrl, int64_t keepTimer) { SConnHash **connHashList; @@ -118,7 +118,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in SConnHash * pNode; SConnCache *pCache; - uint64_t time = taosGetTimestampMs(); + uint64_t timeStamp = taosGetTimestampMs(); pCache = (SConnCache *)handle; assert(pCache); @@ -131,7 +131,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in pNode->connType = connType; pNode->data = data; pNode->prev = NULL; - pNode->time = time; + pNode->time = timeStamp; rpcLockCache(pCache->lockedBy+hash); @@ -140,7 +140,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in pCache->connHashList[hash] = pNode; pCache->count[hash]++; - rpcRemoveExpiredNodes(pCache, pNode->next, hash, time); + rpcRemoveExpiredNodes(pCache, pNode->next, hash, timeStamp); rpcUnlockCache(pCache->lockedBy+hash); @@ -159,15 +159,15 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy pCache = (SConnCache *)handle; assert(pCache); - uint64_t time = taosGetTimestampMs(); + uint64_t timeStamp = taosGetTimestampMs(); hash = rpcHashConn(pCache, fqdn, port, connType); rpcLockCache(pCache->lockedBy+hash); pNode = pCache->connHashList[hash]; while (pNode) { - if (time >= pCache->keepTimer + pNode->time) { - rpcRemoveExpiredNodes(pCache, pNode, hash, time); + if (timeStamp >= pCache->keepTimer + pNode->time) { + rpcRemoveExpiredNodes(pCache, pNode, hash, timeStamp); pNode = NULL; break; } @@ -178,7 +178,7 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy } if (pNode) { - rpcRemoveExpiredNodes(pCache, pNode->next, hash, time); + rpcRemoveExpiredNodes(pCache, pNode->next, hash, timeStamp); if (pNode->prev) { pNode->prev->next = pNode->next; @@ -217,12 +217,12 @@ static void rpcCleanConnCache(void *handle, void *tmrId) { if (pCache->pTimer != tmrId) return; pthread_mutex_lock(&pCache->mutex); - uint64_t time = taosGetTimestampMs(); + uint64_t timeStamp = taosGetTimestampMs(); for (hash = 0; hash < pCache->maxSessions; ++hash) { rpcLockCache(pCache->lockedBy+hash); pNode = pCache->connHashList[hash]; - rpcRemoveExpiredNodes(pCache, pNode, hash, time); + rpcRemoveExpiredNodes(pCache, pNode, hash, timeStamp); rpcUnlockCache(pCache->lockedBy+hash); } @@ -231,8 +231,8 @@ static void rpcCleanConnCache(void *handle, void *tmrId) { pthread_mutex_unlock(&pCache->mutex); } -static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t time) { - if (pNode == NULL || (time < pCache->keepTimer + pNode->time) ) return; +static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t timeStamp) { + if (pNode == NULL || (timeStamp < pCache->keepTimer + pNode->time) ) return; SConnHash *pPrev = pNode->prev, *pNext; diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 95931fcbc6f46bac1e535e4684750ab1874e8f0c..6566bf9e79f3f58557a80c037e99027cd19753ba 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -69,6 +69,13 @@ typedef struct { struct SRpcConn *connList; // connection list } SRpcInfo; +typedef struct SSendInfo { + void *pContext; + void *pConn; + void *pFdObj; + SOCKET fd; +} SSendInfo; + typedef struct { SRpcInfo *pRpc; // associated SRpcInfo SRpcEpSet epSet; // ip list provided by app @@ -86,6 +93,7 @@ typedef struct { SRpcMsg *pRsp; // for synchronous API tsem_t *pSem; // for synchronous API SRpcEpSet *pSet; // for synchronous API + SSendInfo sendInfo; // save last send information char msg[0]; // RpcHead starts from here } SRpcReqContext; @@ -124,6 +132,7 @@ typedef struct SRpcConn { int8_t connType; // connection type int64_t lockedBy; // lock for connection SRpcReqContext *pContext; // request context + int64_t rid; // probe msg use rid get pContext } SRpcConn; int tsRpcMaxUdpSize = 15000; // bytes @@ -193,10 +202,10 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc); static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv); static SRpcConn *rpcGetConnObj(SRpcInfo *pRpc, int sid, SRecvInfo *pRecv); -static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext); +static TBOOL rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext); static void rpcSendQuickRsp(SRpcConn *pConn, int32_t code); static void rpcSendErrorMsgToPeer(SRecvInfo *pRecv, int32_t code); -static void rpcSendMsgToPeer(SRpcConn *pConn, void *data, int dataLen); +static bool rpcSendMsgToPeer(SRpcConn *pConn, void *data, int dataLen); static void rpcSendReqHead(SRpcConn *pConn); static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv); @@ -361,8 +370,8 @@ void *rpcMallocCont(int contLen) { void rpcFreeCont(void *cont) { if (cont) { char *temp = ((char *)cont) - sizeof(SRpcHead) - sizeof(SRpcReqContext); - free(temp); tTrace("free mem: %p", temp); + free(temp); } } @@ -385,7 +394,7 @@ void *rpcReallocCont(void *ptr, int contLen) { return start + sizeof(SRpcReqContext) + sizeof(SRpcHead); } -void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { +TBOOL rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { SRpcInfo *pRpc = (SRpcInfo *)shandle; SRpcReqContext *pContext; @@ -415,7 +424,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 pContext->rid = taosAddRef(tsRpcRefId, pContext); if (pRid) *pRid = pContext->rid; - rpcSendReqToServer(pRpc, pContext); + return rpcSendReqToServer(pRpc, pContext); } void rpcSendResponse(const SRpcMsg *pRsp) { @@ -573,8 +582,8 @@ void rpcCancelRequest(int64_t rid) { static void rpcFreeMsg(void *msg) { if ( msg ) { char *temp = (char *)msg - sizeof(SRpcReqContext); - free(temp); tTrace("free mem: %p", temp); + free(temp); } } @@ -980,6 +989,10 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont return NULL; } + if (pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN || pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN_RSP) { + return pConn; + } + rpcLockConn(pConn); if (rpcIsReq(pHead->msgType)) { @@ -1076,6 +1089,58 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) { rpcUnlockConn(pConn); } +// process probe msg , return true is probe msg, false is not probe msg +static void rpcProcessProbeMsg(SRecvInfo *pRecv, SRpcConn *pConn) { + SRpcHead *pHead = (SRpcHead *)pRecv->msg; + uint64_t ahandle = pHead->ahandle; + if (pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN) { + // response to + char msg[RPC_MSG_OVERHEAD]; + SRpcHead *pRspHead; + + // set msg header + memset(msg, 0, sizeof(SRpcHead)); + pRspHead = (SRpcHead *)msg; + + pRspHead->msgType = TSDB_MSG_TYPE_PROBE_CONN_RSP; + pRspHead->version = 1; + pRspHead->ahandle = pHead->ahandle; + pRspHead->tranId = pHead->tranId; + pRspHead->code = 0; + pRspHead->linkUid = pHead->linkUid; + + rpcLockConn(pConn); + pRspHead->sourceId = pConn->ownId; + pRspHead->destId = pConn->peerId; + memcpy(pRspHead->user, pHead->user, tListLen(pHead->user)); + + bool ret = rpcSendMsgToPeer(pConn, pRspHead, sizeof(SRpcHead)); + tInfo("PROBE 0x%" PRIx64 " recv probe msg and do response. ret=%d", ahandle, ret); + + rpcUnlockConn(pConn); + rpcFreeMsg(pRecv->msg); + } else if (pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN_RSP) { + if(pConn) { + rpcLockConn(pConn); + // get req content + SRpcReqContext *pContext = taosAcquireRef(tsRpcRefId, pConn->rid); + + if (pContext) { + rpcProcessIncomingMsg(pConn, pHead, pContext); + taosReleaseRef(tsRpcRefId, pConn->rid); + } else { + tInfo("PROBE 0x%" PRIx64 " recv response probe msg but pContext is NULL. pConn->rid=0x%" PRIX64, ahandle, pConn->rid); + rpcFreeMsg(pRecv->msg); + } + + rpcUnlockConn(pConn); + } else { + tInfo("PROBE 0x%" PRIx64 " recv response probe msg but pConn is NULL.", ahandle); + rpcFreeMsg(pRecv->msg); + } + } +} + static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { SRpcHead *pHead = (SRpcHead *)pRecv->msg; SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle; @@ -1095,6 +1160,12 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { SRpcReqContext *pContext; pConn = rpcProcessMsgHead(pRpc, pRecv, &pContext); + // deal probe msg + if (pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN || pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN_RSP) { + rpcProcessProbeMsg(pRecv, pConn); + return pConn; + } + if (pHead->msgType >= 1 && pHead->msgType < TSDB_MSG_TYPE_MAX) { tDebug("%s %p %p, %s received from 0x%x:%hu, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType], pRecv->ip, pRecv->port, terrno, pRecv->msgLen, @@ -1147,7 +1218,10 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { } // free the request message - taosRemoveRef(tsRpcRefId, pContext->rid); + if(pMsg->msgType != TSDB_MSG_TYPE_PROBE_CONN && pMsg->msgType != TSDB_MSG_TYPE_PROBE_CONN_RSP) { + taosRemoveRef(tsRpcRefId, pContext->rid); + } + } static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext) { @@ -1185,6 +1259,14 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte // it's a response rpcMsg.handle = pContext; rpcMsg.ahandle = pContext->ahandle; + + if (pHead->msgType == TSDB_MSG_TYPE_PROBE_CONN_RSP) { + // probe msg + rpcNotifyClient(pContext, &rpcMsg); + return ; + } + + // reset pConn NULL pContext->pConn = NULL; // for UDP, port may be changed by server, the port in epSet shall be used for cache @@ -1302,7 +1384,7 @@ static void rpcSendErrorMsgToPeer(SRecvInfo *pRecv, int32_t code) { return; } -static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { +static TBOOL rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { SRpcHead *pHead = rpcHeadFromCont(pContext->pCont); char *msg = (char *)pHead; int msgLen = rpcMsgLenFromCont(pContext->contLen); @@ -1312,8 +1394,9 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { SRpcConn *pConn = rpcSetupConnToServer(pContext); if (pConn == NULL) { pContext->code = terrno; + // in rpcProcessConnError if numOfTry over limit, could call rpcNotifyClient to stop query taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl); - return; + return BOOL_ASYNC; } pContext->pConn = pConn; @@ -1341,17 +1424,35 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { pConn->pReqMsg = msg; pConn->reqMsgLen = msgLen; pConn->pContext = pContext; + if(pContext) + pConn->rid = pContext->rid; - rpcSendMsgToPeer(pConn, msg, msgLen); + // save + pContext->sendInfo.pConn = pConn; + pContext->sendInfo.pFdObj = pConn->chandle; + pContext->sendInfo.fd = taosGetFdID(pConn->chandle); + + bool ret = rpcSendMsgToPeer(pConn, msg, msgLen); if (pConn->connType != RPC_CONN_TCPC) taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer); rpcUnlockConn(pConn); + + if(ret == BOOL_FALSE) { + // try next ip again + pContext->code = terrno; + // in rpcProcessConnError if numOfTry over limit, could call rpcNotifyClient to stop query + taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl); + return BOOL_ASYNC; + } + + return BOOL_TRUE; } -static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { +static bool rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { int writtenLen = 0; SRpcHead *pHead = (SRpcHead *)msg; + bool ret = true; msgLen = rpcAddAuthPart(pConn, msg, msgLen); @@ -1371,9 +1472,11 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { if (writtenLen != msgLen) { tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno)); + ret = false; } tDump(msg, msgLen); + return ret; } static void rpcProcessConnError(void *param, void *id) { @@ -1385,8 +1488,6 @@ static void rpcProcessConnError(void *param, void *id) { return; } - tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); - if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; @@ -1394,9 +1495,11 @@ static void rpcProcessConnError(void *param, void *id) { rpcMsg.pCont = NULL; rpcMsg.contLen = 0; + tWarn("%s %p, connection error. notify client query over. numOfTry=%d msgType=%d", pRpc->label, pContext->ahandle, pContext->numOfTry, pContext->msgType); rpcNotifyClient(pContext, &rpcMsg); } else { // move to next IP + tWarn("%s %p, connection error. retry to send request again. numOfTry=%d msgType=%d", pRpc->label, pContext->ahandle, pContext->numOfTry, pContext->msgType); pContext->epSet.inUse++; pContext->epSet.inUse = pContext->epSet.inUse % pContext->epSet.numOfEps; rpcSendReqToServer(pRpc, pContext); @@ -1683,4 +1786,110 @@ int32_t rpcUnusedSession(void * rpcInfo, bool bLock) { if(info == NULL) return 0; return taosIdPoolNumOfFree(info->idPool, bLock); +} + +bool doRpcSendProbe(SRpcConn *pConn) { + char msg[RPC_MSG_OVERHEAD]; + SRpcHead *pHead; + int code = 0; + + // set msg header + memset(msg, 0, sizeof(SRpcHead)); + pHead = (SRpcHead *)msg; + pHead->version = 1; + pHead->msgVer = htonl(tsVersion >> 8); + pHead->msgType = TSDB_MSG_TYPE_PROBE_CONN; + pHead->spi = pConn->spi; + pHead->encrypt = 0; + pHead->tranId = (uint16_t)(taosRand() & 0xFFFF); // rand + pHead->sourceId = pConn->ownId; + pHead->destId = pConn->peerId; + pHead->linkUid = pConn->linkUid; + pHead->ahandle = (uint64_t)pConn->ahandle; + memcpy(pHead->user, pConn->user, tListLen(pHead->user)); + pHead->code = htonl(code); + + bool ret = rpcSendMsgToPeer(pConn, msg, sizeof(SRpcHead) + sizeof(int32_t)); + + return ret; +} + +// send server syn +bool rpcSendProbe(int64_t rpcRid, void* pPrevContext, bool *pReqOver) { + // return false can kill query + bool ret = false; + if(rpcRid < 0) { + tError("PROBE rpcRid=0x%" PRIx64 " less than zero, invalid.", rpcRid); + return true; + } + + // get req content + SRpcReqContext *pContext = taosAcquireRef(tsRpcRefId, rpcRid); + if (pContext == NULL) { + tError("PROBE rpcRid=0x%" PRIx64 " get context NULL. sql finished no need send probe.", rpcRid); + return true; + } + + // context same + if(pContext != pPrevContext) { + tError("PROBE rpcRid=0x%" PRIx64 " context diff. pContext=%p pPreContent=%p", rpcRid, pContext, pPrevContext); + goto _END; + } + + // conn same + if(pContext->pConn == NULL) { + tInfo("PROBE rpcRid=0x%" PRIx64 " reqContext->pConn is NULL. The req is finished.", rpcRid); + if (pReqOver) + *pReqOver = true; + + ret = true; + goto _END; + } else if (pContext->pConn != pContext->sendInfo.pConn) { + tInfo("PROBE rpcRid=0x%" PRIx64 " connect obj diff. pContext->pConn=%p pPreConn=%p", rpcRid, pContext->pConn, pContext->sendInfo.pConn); + goto _END; + } + + // fdObj same + if (pContext->pConn->chandle != pContext->sendInfo.pFdObj) { + tInfo("PROBE rpcRid=0x%" PRIx64 " connect fdObj diff. pContext->pConn->chandle=%p pPrevFdObj=%p", rpcRid, pContext->pConn->chandle, pContext->sendInfo.pFdObj); + goto _END; + } + + // fd same + SOCKET fd = taosGetFdID(pContext->pConn->chandle); + if (fd != pContext->sendInfo.fd) { + tInfo("PROBE rpcRid=0x%" PRIx64 " connect fd diff.fd=%d prevFd=%d", rpcRid, fd, pContext->sendInfo.fd); + goto _END; + } + + // send syn + if (!doRpcSendProbe(pContext->pConn)) { + tError("PROBE rpcRid=0x%" PRIx64 " fd=%d rpc send probe data error.", rpcRid, fd); + } + ret = true; + +_END: + // put back req context + taosReleaseRef(tsRpcRefId, rpcRid); + return ret; +} + +// after sql request send , save conn info +bool rpcSaveSendInfo(int64_t rpcRid, void** ppContext) { + if(rpcRid < 0) { + tError("PROBE saveSendInfo rpcRid=0x%" PRIx64 " less than zero, invalid.", rpcRid); + return false; + } + // get req content + SRpcReqContext *pContext = taosAcquireRef(tsRpcRefId, rpcRid); + if (pContext == NULL) { + tError("PROBE saveSendInfo rpcRid=0x%" PRIx64 " get context NULL.", rpcRid); + return false; + } + + if (ppContext) + *ppContext = pContext; + + taosReleaseRef(tsRpcRefId, rpcRid); + return true; } \ No newline at end of file diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 740a1e2b7d2784347b19be328319fc19f417f25d..43c386b8b50ab79aa51403b1e71d109619cdb935 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -32,6 +32,7 @@ typedef struct SFdObj { struct SThreadObj *pThreadObj; struct SFdObj *prev; struct SFdObj *next; + uint64_t ctime; // create time } SFdObj; typedef struct SThreadObj { @@ -280,6 +281,7 @@ static void *taosAcceptTcpConnection(void *arg) { if (pFdObj) { pFdObj->ip = caddr.sin_addr.s_addr; pFdObj->port = htons(caddr.sin_port); + pFdObj->ctime = taosGetTimestampMs(); tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); } else { @@ -392,9 +394,9 @@ void taosCleanUpTcpClient(void *chandle) { void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) { SClientObj * pClientObj = shandle; - int32_t index = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads; - atomic_store_32(&pClientObj->index, index + 1); - SThreadObj *pThreadObj = pClientObj->pThreadObj[index]; + int32_t idx = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads; + atomic_store_32(&pClientObj->index, idx + 1); + SThreadObj *pThreadObj = pClientObj->pThreadObj[idx]; SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip); #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) @@ -403,12 +405,12 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin if (fd <= 0) return NULL; #endif - struct sockaddr_in sin; + struct sockaddr_in sockin; uint16_t localPort = 0; - unsigned int addrlen = sizeof(sin); - if (getsockname(fd, (struct sockaddr *)&sin, &addrlen) == 0 && - sin.sin_family == AF_INET && addrlen == sizeof(sin)) { - localPort = (uint16_t)ntohs(sin.sin_port); + unsigned int addrlen = sizeof(sockin); + if (getsockname(fd, (struct sockaddr *)&sockin, &addrlen) == 0 && + sockin.sin_family == AF_INET && addrlen == sizeof(sockin)) { + localPort = (uint16_t)ntohs(sockin.sin_port); } SFdObj *pFdObj = taosMallocFdObj(pThreadObj, fd); @@ -417,6 +419,7 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin pFdObj->thandle = thandle; pFdObj->port = port; pFdObj->ip = ip; + pFdObj->ctime = taosGetTimestampMs(); tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds); } else { @@ -432,7 +435,7 @@ void taosCloseTcpConnection(void *chandle) { if (pFdObj == NULL || pFdObj->signature != pFdObj) return; SThreadObj *pThreadObj = pFdObj->pThreadObj; - tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj); + tDebug("DEEP %s shutdown3 fd=%d ip=0x%x port=%d ctime=%" PRId64, pThreadObj->label, pFdObj->fd, pFdObj->ip, pFdObj->port, pFdObj->ctime); // pFdObj->thandle = NULL; pFdObj->closedByApp = 1; @@ -441,11 +444,28 @@ void taosCloseTcpConnection(void *chandle) { int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chandle) { SFdObj *pFdObj = chandle; - if (pFdObj == NULL || pFdObj->signature != pFdObj) return -1; + if (pFdObj == NULL) { + tError("DEEP TCP send data failed(chandle null). data=0x%p len=%d ip=0x%0x port=%d", data, len, ip, port); + return -1; + } + if(pFdObj->signature != pFdObj) { + tError("DEEP TCP send data failed(sig diff). pFdObj=0x%p sig=0x%p data=%p len=%d ip=0x%x port=%d", pFdObj, pFdObj->signature, data, len, ip, port); + return -2; + } + SThreadObj *pThreadObj = pFdObj->pThreadObj; int ret = taosWriteMsg(pFdObj->fd, data, len); tTrace("%s %p TCP data is sent, FD:%p fd:%d bytes:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, ret); + if(ret < 0) { + tError("DEEP %s %p TCP data sent failed and try again, FD:%p fd:%d ctime=%" PRId64 " ret=%d le=%d ip=0x%x port=%d threadid=%d numofFds=%d", + pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, pFdObj->ctime, ret, len, ip, port, pThreadObj->threadId, pThreadObj->numOfFds); + ret = taosWriteMsg(pFdObj->fd, data, len); + if(ret < 0) { + tError("DEEP %s %p Second TCP data sent failed, FD:%p fd:%d ctime=%" PRId64 " ret=%d le=%d ip=0x%x port=%d threadid=%d numofFds=%d", + pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, pFdObj->ctime, ret, len, ip, port, pThreadObj->threadId, pThreadObj->numOfFds); + } + } return ret; } @@ -457,6 +477,7 @@ static void taosReportBrokenLink(SFdObj *pFdObj) { // notify the upper layer, so it will clean the associated context if (pFdObj->closedByApp == 0) { shutdown(pFdObj->fd, SHUT_WR); + tDebug("DEEP %s shutdown2 fd=%d ip=0x%x port=%d ctime=%" PRId64, pThreadObj->label, pFdObj->fd, pFdObj->ip, pFdObj->port, pFdObj->ctime); SRecvInfo recvInfo; recvInfo.msg = NULL; @@ -574,6 +595,7 @@ static void *taosProcessTcpData(void *param) { } if (taosReadTcpData(pFdObj, &recvInfo) < 0) { + tDebug("DEEP %s shutdown1 fd=%d ip=0x%x port=%d ctime=%" PRId64, pThreadObj->label, pFdObj->fd, pFdObj->ip, pFdObj->port, pFdObj->ctime); shutdown(pFdObj->fd, SHUT_WR); continue; } @@ -650,7 +672,10 @@ static void taosFreeFdObj(SFdObj *pFdObj) { pFdObj->signature = NULL; epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL); + tDebug("DEEP %s close1 fd=%d pFdObj=%p ip=0x%x port=%d ctime=%" PRId64 " thandle=%p", + pThreadObj->label, pFdObj->fd, pFdObj, pFdObj->ip, pFdObj->port, pFdObj->ctime, pFdObj->thandle); taosCloseSocket(pFdObj->fd); + pThreadObj->numOfFds--; if (pThreadObj->numOfFds < 0) @@ -674,3 +699,12 @@ static void taosFreeFdObj(SFdObj *pFdObj) { tfree(pFdObj); } + +SOCKET taosGetFdID(void *chandle) { + SFdObj * pFdObj = chandle; + if(pFdObj == NULL) + return -1; + if (pFdObj->signature != pFdObj) + return -1; + return pFdObj->fd; +} \ No newline at end of file diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 46313543d861ab1a2b56a236b0416cb373295bb7..7b0f27a3d63baf3479ade0579f7203b3774c370e 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -97,11 +97,11 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads break; } - struct sockaddr_in sin; - unsigned int addrlen = sizeof(sin); - if (getsockname(pConn->fd, (struct sockaddr *)&sin, &addrlen) == 0 && - sin.sin_family == AF_INET && addrlen == sizeof(sin)) { - pConn->localPort = (uint16_t)ntohs(sin.sin_port); + struct sockaddr_in sockin; + unsigned int addrlen = sizeof(sockin); + if (getsockname(pConn->fd, (struct sockaddr *)&sockin, &addrlen) == 0 && + sockin.sin_family == AF_INET && addrlen == sizeof(sockin)) { + pConn->localPort = (uint16_t)ntohs(sockin.sin_port); } tstrncpy(pConn->label, label, sizeof(pConn->label)); diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 2f4433f1bb32e965de66a40d7d6ae36c6804a06c..42b77e624ed9bc599034b705f7557a79a9232380 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -70,7 +70,7 @@ static void *sendRequest(void *param) { } int main(int argc, char *argv[]) { - SRpcInit rpcInit; + SRpcInit rpcInitial; SRpcEpSet epSet; int msgSize = 128; int numOfReqs = 0; @@ -90,18 +90,18 @@ int main(int argc, char *argv[]) { strcpy(epSet.fqdn[1], "192.168.0.1"); // client info - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.localPort = 0; - rpcInit.label = "APP"; - rpcInit.numOfThreads = 1; - rpcInit.cfp = processResponse; - rpcInit.sessions = 100; - rpcInit.idleTime = tsShellActivityTimer*1000; - rpcInit.user = "michael"; - rpcInit.secret = secret; - rpcInit.ckey = "key"; - rpcInit.spi = 1; - rpcInit.connType = TAOS_CONN_CLIENT; + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.localPort = 0; + rpcInitial.label = "APP"; + rpcInitial.numOfThreads = 1; + rpcInitial.cfp = processResponse; + rpcInitial.sessions = 100; + rpcInitial.idleTime = tsShellActivityTimer*1000; + rpcInitial.user = "michael"; + rpcInitial.secret = secret; + rpcInitial.ckey = "key"; + rpcInitial.spi = 1; + rpcInitial.connType = TAOS_CONN_CLIENT; for (int i=1; ireplica; ++index) { - const SNodeInfo *pNodeInfo = pCfg->nodeInfo + index; - pNode->peerInfo[index] = syncAddPeer(pNode, pNodeInfo); - if (pNode->peerInfo[index] == NULL) { + for (int32_t idx = 0; idx < pCfg->replica; ++idx) { + const SNodeInfo *pNodeInfo = pCfg->nodeInfo + idx; + pNode->peerInfo[idx] = syncAddPeer(pNode, pNodeInfo); + if (pNode->peerInfo[idx] == NULL) { sError("vgId:%d, node:%d fqdn:%s port:%u is not configured, stop taosd", pNode->vgId, pNodeInfo->nodeId, pNodeInfo->nodeFqdn, pNodeInfo->nodePort); syncStop(pNode->rid); @@ -210,7 +210,7 @@ int64_t syncStart(const SSyncInfo *pInfo) { } if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) { - pNode->selfIndex = index; + pNode->selfIndex = idx; } } @@ -256,8 +256,8 @@ int64_t syncStart(const SSyncInfo *pInfo) { } syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb - for (int32_t index = 0; index < pNode->replica; ++index) { - syncStartCheckPeerConn(pNode->peerInfo[index]); + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + syncStartCheckPeerConn(pNode->peerInfo[idx]); } return pNode->rid; @@ -277,8 +277,8 @@ void syncStop(int64_t rid) { if (pNode->pFwdTimer) taosTmrStop(pNode->pFwdTimer); if (pNode->pRoleTimer) taosTmrStop(pNode->pRoleTimer); - for (int32_t index = 0; index < pNode->replica; ++index) { - pPeer = pNode->peerInfo[index]; + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + pPeer = pNode->peerInfo[idx]; if (pPeer) syncRemovePeer(pPeer); } @@ -303,8 +303,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { pthread_mutex_lock(&pNode->mutex); syncStopCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb - for (int32_t index = 0; index < pNode->replica; ++index) { - syncStopCheckPeerConn(pNode->peerInfo[index]); + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + syncStopCheckPeerConn(pNode->peerInfo[idx]); } for (i = 0; i < pNode->replica; ++i) { @@ -364,8 +364,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { } syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb - for (int32_t index = 0; index < pNode->replica; ++index) { - syncStartCheckPeerConn(pNode->peerInfo[index]); + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + syncStartCheckPeerConn(pNode->peerInfo[idx]); } pthread_mutex_unlock(&pNode->mutex); @@ -629,16 +629,16 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { } void syncBroadcastStatus(SSyncNode *pNode) { - for (int32_t index = 0; index < pNode->replica; ++index) { - if (index == pNode->selfIndex) continue; - SSyncPeer *pPeer = pNode->peerInfo[index]; + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + if (idx == pNode->selfIndex) continue; + SSyncPeer *pPeer = pNode->peerInfo[idx]; syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_BROADCAST, syncGenTranId()); } } static void syncResetFlowCtrl(SSyncNode *pNode) { - for (int32_t index = 0; index < pNode->replica; ++index) { - pNode->peerInfo[index]->numOfRetrieves = 0; + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + pNode->peerInfo[idx]->numOfRetrieves = 0; } if (pNode->notifyFlowCtrlFp) { @@ -649,7 +649,7 @@ static void syncResetFlowCtrl(SSyncNode *pNode) { static void syncChooseMaster(SSyncNode *pNode) { SSyncPeer *pPeer; int32_t onlineNum = 0; - int32_t index = -1; + int32_t idx = -1; int32_t replica = pNode->replica; for (int32_t i = 0; i < pNode->replica; ++i) { @@ -660,13 +660,13 @@ static void syncChooseMaster(SSyncNode *pNode) { if (onlineNum == pNode->replica) { // if all peers are online, peer with highest version shall be master - index = 0; + idx = 0; for (int32_t i = 1; i < pNode->replica; ++i) { - if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) { - index = i; + if (pNode->peerInfo[i]->version > pNode->peerInfo[idx]->version) { + idx = i; } } - sDebug("vgId:%d, master:%s may be choosed, index:%d", pNode->vgId, pNode->peerInfo[index]->id, index); + sDebug("vgId:%d, master:%s may be choosed, index:%d", pNode->vgId, pNode->peerInfo[idx]->id, idx); } else { sDebug("vgId:%d, no master election since onlineNum:%d replica:%d", pNode->vgId, onlineNum, pNode->replica); } @@ -683,26 +683,26 @@ static void syncChooseMaster(SSyncNode *pNode) { } } - if (index < 0 && onlineNum > replica / 2.0) { + if (idx < 0 && onlineNum > replica / 2.0) { // over half of nodes are online for (int32_t i = 0; i < pNode->replica; ++i) { // slave with highest version shall be master pPeer = pNode->peerInfo[i]; if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) { - if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) { - index = i; + if (idx < 0 || pPeer->version > pNode->peerInfo[idx]->version) { + idx = i; } } } - if (index >= 0) { + if (idx >= 0) { sDebug("vgId:%d, master:%s may be choosed, index:%d onlineNum(arb):%d replica:%d", pNode->vgId, - pNode->peerInfo[index]->id, index, onlineNum, replica); + pNode->peerInfo[idx]->id, idx, onlineNum, replica); } } - if (index >= 0) { - if (index == pNode->selfIndex) { + if (idx >= 0) { + if (idx == pNode->selfIndex) { sInfo("vgId:%d, start to work as master", pNode->vgId); nodeRole = TAOS_SYNC_ROLE_MASTER; @@ -712,7 +712,7 @@ static void syncChooseMaster(SSyncNode *pNode) { syncResetFlowCtrl(pNode); (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } else { - pPeer = pNode->peerInfo[index]; + pPeer = pNode->peerInfo[idx]; sInfo("%s, it shall work as master", pPeer->id); } } else { @@ -725,8 +725,8 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { int32_t masterIndex = -1; int32_t replica = pNode->replica; - for (int32_t index = 0; index < pNode->replica; ++index) { - if (pNode->peerInfo[index]->role != TAOS_SYNC_ROLE_OFFLINE) { + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + if (pNode->peerInfo[idx]->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; } } @@ -751,19 +751,19 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } } else { - for (int32_t index = 0; index < pNode->replica; ++index) { - SSyncPeer *pTemp = pNode->peerInfo[index]; + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + SSyncPeer *pTemp = pNode->peerInfo[idx]; if (pTemp->role != TAOS_SYNC_ROLE_MASTER) continue; if (masterIndex < 0) { - masterIndex = index; - sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, index); + masterIndex = idx; + sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, idx); } else { // multiple masters, it shall not happen if (masterIndex == pNode->selfIndex) { sError("%s, peer is master, work as slave instead", pTemp->id); nodeRole = TAOS_SYNC_ROLE_SLAVE; (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } else { - sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, index); + sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, idx); } } } @@ -783,9 +783,9 @@ static int32_t syncValidateMaster(SSyncPeer *pPeer) { (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); code = -1; - for (int32_t index = 0; index < pNode->replica; ++index) { - if (index == pNode->selfIndex) continue; - syncRestartPeer(pNode->peerInfo[index]); + for (int32_t idx = 0; idx < pNode->replica; ++idx) { + if (idx == pNode->selfIndex) continue; + syncRestartPeer(pNode->peerInfo[idx]); } } @@ -825,15 +825,15 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new } else { // master not there, if all peer's state and version are consistent, choose the master int32_t consistent = 0; - int32_t index = 0; + int32_t idx = 0; if (peersStatus != NULL) { - for (index = 0; index < pNode->replica; ++index) { - SSyncPeer *pTemp = pNode->peerInfo[index]; - if (pTemp->role != peersStatus[index].role) break; - if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[index].version)) break; + for (idx = 0; idx < pNode->replica; ++idx) { + SSyncPeer *pTemp = pNode->peerInfo[idx]; + if (pTemp->role != peersStatus[idx].role) break; + if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[idx].version)) break; } - if (index >= pNode->replica) consistent = 1; + if (idx >= pNode->replica) consistent = 1; } else { if (pNode->replica == 2) consistent = 1; } @@ -1331,7 +1331,7 @@ static void syncProcessBrokenLink(int64_t rid, int32_t closedByApp) { static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandle) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; - int64_t time = taosGetTimestampMs(); + int64_t lastTime = taosGetTimestampMs(); if (pSyncFwds->fwds >= SYNC_MAX_FWDS) { // pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS; @@ -1348,7 +1348,7 @@ static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandl memset(pFwdInfo, 0, sizeof(SFwdInfo)); pFwdInfo->version = _version; pFwdInfo->mhandle = mhandle; - pFwdInfo->time = time; + pFwdInfo->time = lastTime; pSyncFwds->fwds++; sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, _version, pSyncFwds->fwds); @@ -1400,10 +1400,10 @@ static void syncMonitorNodeRole(void *param, void *tmrId) { SSyncNode *pNode = syncAcquireNode(rid); if (pNode == NULL) return; - for (int32_t index = 0; index < pNode->replica; index++) { - if (index == pNode->selfIndex) continue; + for (int32_t idx = 0; idx < pNode->replica; idx++) { + if (idx == pNode->selfIndex) continue; - SSyncPeer *pPeer = pNode->peerInfo[index]; + SSyncPeer *pPeer = pNode->peerInfo[idx]; if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue; if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue; @@ -1425,16 +1425,16 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; if (pSyncFwds) { - int64_t time = taosGetTimestampMs(); + int64_t lastTime = taosGetTimestampMs(); if (pSyncFwds->fwds > 0) { pthread_mutex_lock(&pNode->mutex); for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS; - if (ABS(time - pFwdInfo->time) < 10000) break; + if (ABS(lastTime - pFwdInfo->time) < 10000) break; sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId, - pFwdInfo->version, time, pFwdInfo->time); + pFwdInfo->version, lastTime, pFwdInfo->time); syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_SYN_CONFIRM_EXPIRED); } diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 623d6e3cc0c20ef3c69b4ebfb6752616c1ff56b0..f0fcf6d6dd34e50a9810878aa7fbed2905a7f615 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -228,7 +228,7 @@ static int64_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi return code; } -static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { +static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t idx) { SSyncNode *pNode = pPeer->pSyncNode; int32_t once = 0; // last WAL has once ever been processed int64_t offset = 0; @@ -290,12 +290,12 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) { char wname[TSDB_FILENAME_LEN * 2]; int32_t size; int64_t code = -1; - int64_t index = 0; + int64_t idx = 0; while (1) { // retrieve wal info wname[0] = 0; - code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index); + code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &idx); if (code < 0) { sError("%s, failed to get wal info since:%s, code:0x%" PRIx64, pPeer->id, strerror(errno), code); break; @@ -308,7 +308,7 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) { } if (code == 0) { // last wal - code = syncProcessLastWal(pPeer, wname, index); + code = syncProcessLastWal(pPeer, wname, idx); sInfo("%s, last wal processed, code:%" PRId64, pPeer->id, code); break; } @@ -317,14 +317,14 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) { snprintf(fname, sizeof(fname), "%s/%s", pNode->path, wname); // send wal file, old wal file won't be modified, even remove is ok - struct stat fstat; - if (stat(fname, &fstat) < 0) { + struct stat fstatus; + if (stat(fname, &fstatus) < 0) { code = -1; sInfo("%s, failed to stat wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } - size = fstat.st_size; + size = fstatus.st_size; sInfo("%s, retrieve wal:%s size:%d", pPeer->id, fname, size); int32_t sfd = open(fname, O_RDONLY | O_BINARY); diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index 75e95631513e354960df5119b25ac3b6620a29d8..8436786157364ee3c4df717dfcb9f4b49c5a4561 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -257,7 +257,13 @@ static FORCE_INLINE int tsdbAppendDFile(SDFile* pDFile, void* buf, int64_t nbyte return -1; } - ASSERT(pDFile->info.size == toffset); + //bug fix. To avoid data corruption, + //the end offset of current file should be checked with file size, + //if not equal, known as file corrupted and return error. + if (pDFile->info.size != toffset) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return -1; + } if (offset) { *offset = toffset; diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6b19fdf3c1ffad31efeb06497c05fa2740636c01..3abc3e9acc6c8f6e909d4d6ef5f043dc2ee3e156 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -275,7 +275,7 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) { // =================== Commit Meta Data -static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) { +static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool bOpen) { STsdbFS * pfs = REPO_FS(pRepo); SMFile * pOMFile = pfs->cstatus->pmf; SDiskID did; @@ -287,7 +287,7 @@ static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) { did.id = TFS_PRIMARY_ID; tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); - if (open && tsdbCreateMFile(pMf, true) < 0) { + if (bOpen && tsdbCreateMFile(pMf, true) < 0) { tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } @@ -295,7 +295,7 @@ static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) { tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf)); } else { tsdbInitMFileEx(pMf, pOMFile); - if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) { + if (bOpen && tsdbOpenMFile(pMf, O_WRONLY) < 0) { tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } @@ -1813,4 +1813,4 @@ int tsdbCommitControl(STsdbRepo* pRepo, SControlDataInfo* pCtlDataInfo) { tsem_post(&pRepo->readyToCommit); return ret; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index b3d52e8fad1c81c55003470d024fd48278384f8c..bfeb61e4f1ab3141a8d05cba1a84bac6f4c72669 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -1217,13 +1217,13 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { bool isOneFSetFinish = true; int lastFType = -1; // one fileset ends when (1) the array ends or (2) encounter different fid - for (size_t index = 0; index < fArraySize; ++index) { + for (size_t idx = 0; idx < fArraySize; ++idx) { int tvid = -1, tfid = -1; TSDB_FILE_T ttype = TSDB_FILE_MAX; uint32_t tversion = -1; char bname[TSDB_FILENAME_LEN] = "\0"; - pf = taosArrayGet(fArray, index); + pf = taosArrayGet(fArray, idx); tfsbasename(pf, bname); tsdbParseDFilename(bname, &tvid, &tfid, &ttype, &tversion); ASSERT(tvid == REPO_ID(pRepo)); @@ -1237,7 +1237,7 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { lastFType = ttype; - if (index == 0) { + if (idx == 0) { memset(&fset, 0, sizeof(SDFileSet)); TSDB_FSET_SET_CLOSED(&fset); nDFiles = 1; @@ -1249,7 +1249,7 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { ++nDFiles; pDFile->f = *pf; // (1) the array ends - if (index == fArraySize - 1) { + if (idx == fArraySize - 1) { if (tsdbIsDFileSetValid(nDFiles)) { tsdbInfo("vgId:%d DFileSet %d is fetched, nDFiles=%" PRIu8, REPO_ID(pRepo), fset.fid, nDFiles); isOneFSetFinish = true; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 2ae215ad36288349c41d18ad000823063da37805..63ea4ab6df0e84e81a9308de509ccd24c933c54e 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -128,7 +128,7 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { tsdbStopStream(pRepo); if(pRepo->pthread){ - taosDestoryThread(pRepo->pthread); + taosDestroyThread(pRepo->pthread); pRepo->pthread = NULL; } @@ -344,7 +344,7 @@ int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg) { #endif } -uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) { +uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *idx, uint32_t eindex, int64_t *size) { // TODO return 0; #if 0 @@ -356,16 +356,16 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t struct stat fState; - tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *index, eindex); - ASSERT(*index <= eindex); + tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *idx, eindex); + ASSERT(*idx <= eindex); if (name[0] == 0) { // get the file from index or after, but not larger than eindex - int fid = (*index) / TSDB_FILE_TYPE_MAX; + int fid = (*idx) / TSDB_FILE_TYPE_MAX; if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { - if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) { + if (*idx <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) { fname = tsdbGetMetaFileName(pRepo->rootDir); - *index = TSDB_META_FILE_INDEX; + *idx = TSDB_META_FILE_INDEX; magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta); sprintf(name, "tsdb/%s", TSDB_META_FILE_NAME); } else { @@ -375,7 +375,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t SFileGroup *pFGroup = taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE); if (pFGroup->fileId == fid) { - SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX]; + SFile *pFile = &pFGroup->files[(*idx) % TSDB_FILE_TYPE_MAX]; fname = strdup(TSDB_FILE_NAME(pFile)); magic = pFile->info.magic; char *tfname = strdup(fname); @@ -385,7 +385,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < (int)eindex) { SFile *pFile = &pFGroup->files[0]; fname = strdup(TSDB_FILE_NAME(pFile)); - *index = pFGroup->fileId * TSDB_FILE_TYPE_MAX; + *idx = pFGroup->fileId * TSDB_FILE_TYPE_MAX; magic = pFile->info.magic; char *tfname = strdup(fname); sprintf(name, "tsdb/%s/%s", TSDB_DATA_DIR_NAME, basename(tfname)); @@ -402,7 +402,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t tfree(fname); return 0; } - if (*index == TSDB_META_FILE_INDEX) { // get meta file + if (*idx == TSDB_META_FILE_INDEX) { // get meta file tsdbGetStoreInfo(fname, &magic, size); } else { char tfname[TSDB_FILENAME_LEN] = "\0"; diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index a095bff61e05822f6389a51671f98fc5a33e0bbe..a7aa310152a4bd445ba732b6ae7c8e671263afd4 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -28,13 +28,13 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rm static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper); static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable); static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid); -static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup); -static int tsdbTableSetName(STableCfg *config, char *name, bool dup); -static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup); -static int tsdbTableSetSName(STableCfg *config, char *sname, bool dup); +static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool duplicate); +static int tsdbTableSetName(STableCfg *config, char *name, bool duplicate); +static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool duplicate); +static int tsdbTableSetSName(STableCfg *config, char *sname, bool duplicate); static int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid); -static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup); -static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup); +static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool duplicate); +static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool duplicate); static int tsdbEncodeTableName(void **buf, tstr *name); static void * tsdbDecodeTableName(void *buf, tstr **name); static int tsdbEncodeTable(void **buf, STable *pTable); @@ -1236,8 +1236,8 @@ static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, in return 0; } -static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup) { - if (dup) { +static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool duplicate) { + if (duplicate) { config->schema = tdDupSchema(pSchema); if (config->schema == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1249,8 +1249,8 @@ static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup) { return 0; } -static int tsdbTableSetName(STableCfg *config, char *name, bool dup) { - if (dup) { +static int tsdbTableSetName(STableCfg *config, char *name, bool duplicate) { + if (duplicate) { config->name = strdup(name); if (config->name == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1263,13 +1263,13 @@ static int tsdbTableSetName(STableCfg *config, char *name, bool dup) { return 0; } -static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup) { +static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool duplicate) { if (config->type != TSDB_CHILD_TABLE) { terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; return -1; } - if (dup) { + if (duplicate) { config->tagSchema = tdDupSchema(pSchema); if (config->tagSchema == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1281,13 +1281,13 @@ static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup) return 0; } -static int tsdbTableSetSName(STableCfg *config, char *sname, bool dup) { +static int tsdbTableSetSName(STableCfg *config, char *sname, bool duplicate) { if (config->type != TSDB_CHILD_TABLE) { terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; return -1; } - if (dup) { + if (duplicate) { config->sname = strdup(sname); if (config->sname == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1309,13 +1309,13 @@ static int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid) { return 0; } -static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup) { +static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool duplicate) { if (config->type != TSDB_CHILD_TABLE) { terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; return -1; } - if (dup) { + if (duplicate) { config->tagValues = tdKVRowDup(row); if (config->tagValues == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1328,13 +1328,13 @@ static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup) { return 0; } -static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup) { +static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool duplicate) { if (config->type != TSDB_STREAM_TABLE) { terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; return -1; } - if (dup) { + if (duplicate) { config->sql = strdup(sql); if (config->sql == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 56e215e0b28828ec18255f36e516fc39e59f0b4a..3d72a7bde3bb87eca567819503ceb9746968768b 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -986,7 +986,9 @@ static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, return rmem; } else { pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; - extraRow = rimem; + if (extraRow) { + *extraRow = rimem; + } return rmem; } } else { @@ -1298,7 +1300,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6 range.from = i; } } - range.to = 0; + range.to = sblock; taosArrayPush(pArray, &range); range.from = -1; break; @@ -1314,7 +1316,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6 if(range.from == -1) { range.from = i; } else { - if(range.to + 1 != i) { + if(range.to - 1 != i) { // add the previous taosArrayPush(pArray, &range); range.from = i; @@ -1359,16 +1361,17 @@ static void shrinkBlocksByQuery(STsdbQueryHandle *pQueryHandle, STableCheckInfo SBlockIdx *compIndex = pQueryHandle->rhelper.pBlkIdx; bool order = ASCENDING_TRAVERSE(pQueryHandle->order); + TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; if (order) { assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey); + s = pQueryHandle->window.skey; + e = pQueryHandle->window.ekey; } else { assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey); + e = pQueryHandle->window.skey; + s = pQueryHandle->window.ekey; } - TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; - s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey); - e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey); - // discard the unqualified data block based on the query time window int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC); if (s > pCompInfo->blocks[start].keyLast) { @@ -1653,7 +1656,9 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, if (asc) { // query ended in/started from current block - if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { + if ((pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst ) + && pCheckInfo->lastKey <= pBlock->keyLast) { + // if mem lastKey > block lastKey , should deal with handleDatamergeIfNeed if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { *exists = false; return code; @@ -1669,10 +1674,9 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, cur->pos = 0; } - assert(pCheckInfo->lastKey <= pBlock->keyLast); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { // the whole block is loaded in to buffer - cur->pos = asc? 0:(pBlock->numOfRows - 1); + cur->pos = 0; code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } else { //desc order, query ended in current block @@ -1692,7 +1696,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, assert(pCheckInfo->lastKey >= pBlock->keyFirst); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { - cur->pos = asc? 0:(pBlock->numOfRows-1); + cur->pos = pBlock->numOfRows - 1; code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } @@ -2587,10 +2591,10 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO while (numOfTotal < cnt) { int32_t pos = pTree->pNode[0].index; - int32_t index = sup.blockIndexArray[pos]++; + int32_t idx = sup.blockIndexArray[pos]++; STableBlockInfo* pBlocksInfo = sup.pDataBlockInfo[pos]; - pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfo[index]; + pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfo[idx]; // set data block index overflow, in order to disable the offset comparator if (sup.blockIndexArray[pos] >= sup.numOfBlocksPerTable[pos]) { @@ -3329,7 +3333,7 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) { size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); assert(numOfTables > 0); - int64_t stime = taosGetTimestampUs(); + int64_t lastTime = taosGetTimestampUs(); while(pQueryHandle->activeIndex < numOfTables) { if (loadBlockOfActiveTable(pQueryHandle)) { @@ -3347,7 +3351,7 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) { terrno = TSDB_CODE_SUCCESS; - int64_t elapsedTime = taosGetTimestampUs() - stime; + int64_t elapsedTime = taosGetTimestampUs() - lastTime; pQueryHandle->cost.checkForNextTime += elapsedTime; } @@ -3366,8 +3370,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) { return false; } - int64_t stime = taosGetTimestampUs(); - int64_t elapsedTime = stime; + int64_t lastTime = taosGetTimestampUs(); + int64_t elapsedTime = lastTime; // TODO refactor: remove "type" if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST) { @@ -3394,7 +3398,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) { } if (exists) { - pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime); + pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - lastTime); return exists; } @@ -3406,7 +3410,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) { bool ret = doHasDataInBuffer(pQueryHandle); terrno = TSDB_CODE_SUCCESS; - elapsedTime = taosGetTimestampUs() - stime; + elapsedTime = taosGetTimestampUs() - lastTime; pQueryHandle->cost.checkForNextTime += elapsedTime; return ret; } @@ -3755,7 +3759,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta return TSDB_CODE_SUCCESS; } - int64_t stime = taosGetTimestampUs(); + int64_t lastTime = taosGetTimestampUs(); int statisStatus = tsdbLoadBlockStatis(&pHandle->rhelper, pBlockInfo->compBlock); if (statisStatus < TSDB_STATIS_OK) { return terrno; @@ -3789,7 +3793,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta } } - int64_t elapsed = taosGetTimestampUs() - stime; + int64_t elapsed = taosGetTimestampUs() - lastTime; pHandle->cost.statisInfoLoadTime += elapsed; *pBlockStatis = pHandle->statis; diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fd9a340a25a752b18ab07a8fbb2691038af3b71b..872da82a8e16549facd03fb3249b03b150a8f842 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 131 +#define TSDB_CFG_MAX_NUM 134 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h index 7443ad706dcbef529d857fe823cddd0cc1efbdd3..9ef1c230359c154d54f7c577a3387cea0d57c551 100644 --- a/src/util/inc/tthread.h +++ b/src/util/inc/tthread.h @@ -26,7 +26,7 @@ extern "C" { // create new thread pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param); // destory thread -bool taosDestoryThread(pthread_t* pthread); +bool taosDestroyThread(pthread_t* pthread); // thread running return true bool taosThreadRunning(pthread_t* pthread); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index e2fd37fdc41479743d21e43f451c4fc4270b01d8..d4d42976155cb1e11b4abdf6c1d6fa6855921971 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -47,7 +47,7 @@ typedef struct SHashEntry { SHashNode *next; } SHashEntry; -typedef struct SHashObj { +struct SHashObj { SHashEntry **hashList; size_t capacity; // number of slots size_t size; // number of elements in hash table @@ -58,7 +58,7 @@ typedef struct SHashObj { SHashLockTypeE type; // lock type bool enableUpdate; // enable update SArray *pMemBlock; // memory block allocated for SHashEntry -} SHashObj; +}; /* * Function definition @@ -303,7 +303,7 @@ int32_t taosHashGetSize(const SHashObj *pHashObj) { if (pHashObj == NULL) { return 0; } - return (int32_t)atomic_load_64(&pHashObj->size); + return (int32_t)atomic_load_64((int32_t *) &pHashObj->size); } static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) { diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 20f6d5b250264e61b4be87061370e3f48ed2a924..efccf7dff8f1730a29d5306c7ff1ff8d46ece341 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -180,15 +180,15 @@ void* taosArrayPop(SArray* pArray) { return TARRAY_GET_ELEM(pArray, pArray->size); } -void* taosArrayGet(const SArray* pArray, size_t index) { - assert(index < pArray->size); - return TARRAY_GET_ELEM(pArray, index); +void* taosArrayGet(const SArray* pArray, size_t idx) { + assert(idx < pArray->size); + return TARRAY_GET_ELEM(pArray, idx); } -void* taosArrayGetP(const SArray* pArray, size_t index) { - assert(index < pArray->size); +void* taosArrayGetP(const SArray* pArray, size_t idx) { + assert(idx < pArray->size); - void* d = TARRAY_GET_ELEM(pArray, index); + void* d = TARRAY_GET_ELEM(pArray, idx); return *(void**)d; } @@ -204,12 +204,12 @@ void taosArraySetSize(SArray* pArray, size_t size) { pArray->size = size; } -void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { +void* taosArrayInsert(SArray* pArray, size_t idx, void* pData) { if (pArray == NULL || pData == NULL) { return NULL; } - if (index >= pArray->size) { + if (idx >= pArray->size) { return taosArrayPush(pArray, pData); } @@ -221,9 +221,9 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { } } - void* dst = TARRAY_GET_ELEM(pArray, index); + void* dst = TARRAY_GET_ELEM(pArray, idx); - int32_t remain = (int32_t)(pArray->size - index); + int32_t remain = (int32_t)(pArray->size - idx); memmove((char*)dst + pArray->elemSize, (char*)dst, pArray->elemSize * remain); memcpy(dst, pData, pArray->elemSize); @@ -232,21 +232,21 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { return dst; } -void taosArraySet(SArray* pArray, size_t index, void* pData) { - assert(index < pArray->size); - memcpy(TARRAY_GET_ELEM(pArray, index), pData, pArray->elemSize); +void taosArraySet(SArray* pArray, size_t idx, void* pData) { + assert(idx < pArray->size); + memcpy(TARRAY_GET_ELEM(pArray, idx), pData, pArray->elemSize); } -void taosArrayRemove(SArray* pArray, size_t index) { - assert(index < pArray->size); +void taosArrayRemove(SArray* pArray, size_t idx) { + assert(idx < pArray->size); - if (index == pArray->size - 1) { + if (idx == pArray->size - 1) { taosArrayPop(pArray); return; } - size_t remain = pArray->size - index - 1; - memmove((char*)pArray->pData + index * pArray->elemSize, (char*)pArray->pData + (index + 1) * pArray->elemSize, remain * pArray->elemSize); + size_t remain = pArray->size - idx - 1; + memmove((char*)pArray->pData + idx * pArray->elemSize, (char*)pArray->pData + (idx + 1) * pArray->elemSize, remain * pArray->elemSize); pArray->size -= 1; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 39b674fe4f23b1e5f4ef034df7d5bb1f654d80e7..6fac32e22df06007ae7b90eb02ceb5a6fdb9e1ec 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -624,9 +624,9 @@ void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force) { return; } - const char* stat[] = {"false", "true"}; + const char* status[] = {"false", "true"}; uDebug("cache:%s start to cleanup trashcan, numOfElem in trashcan:%d, free:%s", pCacheObj->name, - pCacheObj->numOfElemsInTrash, (force? stat[1]:stat[0])); + pCacheObj->numOfElemsInTrash, (force? status[1]:status[0])); STrashElem *pElem = pCacheObj->pTrash; while (pElem) { @@ -683,10 +683,10 @@ bool travHashTableFn(void* param, void* data) { return true; } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) { +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t timeStamp, __cache_trav_fn_t fp, void* param1) { assert(pCacheObj != NULL); - SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1}; + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = timeStamp, .param1 = param1}; taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 2ab5ddbbe0ac1025be72e69fe88050d2aa3f73ad..565878f3e151e71a0fcf9b5b3b4b182249f51a79 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -436,9 +436,9 @@ int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size, return TSDB_PATTERN_MATCH; } - uint32_t accept[3] = {towupper(c), towlower(c), 0}; + uint32_t accept_array[3] = {towupper(c), towlower(c), 0}; while (1) { - size_t n = taosWcscspn(str, accept); + size_t n = taosWcscspn(str, accept_array); str += n; if (str[0] == 0 || (n >= size)) { diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 334207022d61fdcf80f26fe626edcb7de9628944..f66152988ee31543cd559c050fba82d3353aa9fa 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -68,6 +68,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, "Client and server's t TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, "Database not ready") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VERSION, "Invalid app version") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SHORTCUT, "Shortcut") //common & util TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, "Operation not supported") @@ -123,6 +124,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_TOO_MANY_SML_LINES, "Too many lines in batch") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SEND_DATA_FAILED, "Client send request data failed") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") diff --git a/src/util/src/tmempool.c b/src/util/src/tmempool.c index 678c965eb1a7315977616778c0e4b39ceb4c7525..b580f9d9ab3cfafa4e957e26334cd1e728387eb9 100644 --- a/src/util/src/tmempool.c +++ b/src/util/src/tmempool.c @@ -89,19 +89,19 @@ char *taosMemPoolMalloc(mpool_h handle) { } void taosMemPoolFree(mpool_h handle, char *pMem) { - int index; + int idx; pool_t *pool_p = (pool_t *)handle; if (pMem == NULL) return; - index = (int)(pMem - pool_p->pool) % pool_p->blockSize; - if (index != 0) { + idx = (int)(pMem - pool_p->pool) % pool_p->blockSize; + if (idx != 0) { uError("invalid free address:%p\n", pMem); return; } - index = (int)((pMem - pool_p->pool) / pool_p->blockSize); - if (index < 0 || index >= pool_p->numOfBlock) { + idx = (int)((pMem - pool_p->pool) / pool_p->blockSize); + if (idx < 0 || idx >= pool_p->numOfBlock) { uError("mempool: error, invalid address:%p\n", pMem); return; } @@ -110,7 +110,7 @@ void taosMemPoolFree(mpool_h handle, char *pMem) { pthread_mutex_lock(&pool_p->mutex); - pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = index; + pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = idx; pool_p->numOfFree++; pthread_mutex_unlock(&pool_p->mutex); diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 2094c3d4be59c5636c6f987a790c163bd61c2227..407884759a633c8947f763fd5d5d167a4eb264c8 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -314,27 +314,27 @@ static void taosNetCheckPort(uint32_t hostIp, int32_t startPort, int32_t endPort } void *taosNetInitRpc(char *secretEncrypt, char spi) { - SRpcInit rpcInit; + SRpcInit rpcInitial; void * pRpcConn = NULL; char user[] = "nettestinternal"; char pass[] = "nettestinternal"; taosEncryptPass((uint8_t *)pass, strlen(pass), secretEncrypt); - memset(&rpcInit, 0, sizeof(rpcInit)); - rpcInit.localPort = 0; - rpcInit.label = "NT"; - rpcInit.numOfThreads = 1; // every DB connection has only one thread - rpcInit.cfp = NULL; - rpcInit.sessions = 16; - rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.user = user; - rpcInit.idleTime = 2000; - rpcInit.ckey = "key"; - rpcInit.spi = spi; - rpcInit.secret = secretEncrypt; - - pRpcConn = rpcOpen(&rpcInit); + memset(&rpcInitial, 0, sizeof(rpcInitial)); + rpcInitial.localPort = 0; + rpcInitial.label = "NT"; + rpcInitial.numOfThreads = 1; // every DB connection has only one thread + rpcInitial.cfp = NULL; + rpcInitial.sessions = 16; + rpcInitial.connType = TAOS_CONN_CLIENT; + rpcInitial.user = user; + rpcInitial.idleTime = 2000; + rpcInitial.ckey = "key"; + rpcInitial.spi = spi; + rpcInitial.secret = secretEncrypt; + + pRpcConn = rpcOpen(&rpcInitial); return pRpcConn; } diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index 1ffa94b0df6b63dac914649c7003d37bbedbdb24..7b23b708b1ea9b480d8b7fecfb5e41bbdbafe9b7 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -86,9 +86,8 @@ void taosCloseQueue(taos_queue param) { } pthread_mutex_destroy(&queue->mutex); - free(queue); - uTrace("queue:%p is closed", queue); + free(queue); } void *taosAllocateQitem(int size) { diff --git a/src/util/src/tref.c b/src/util/src/tref.c index 33323889c68162219b3c6faf886ac29b2a975ffa..bff8b12aaefc1734318e891efab7a9b02e6557f4 100644 --- a/src/util/src/tref.c +++ b/src/util/src/tref.c @@ -54,7 +54,7 @@ static void taosLockList(int64_t *lockedBy); static void taosUnlockList(int64_t *lockedBy); static void taosIncRsetCount(SRefSet *pSet); static void taosDecRsetCount(SRefSet *pSet); -static int taosDecRefCount(int rsetId, int64_t rid, int remove); +static int taosDecRefCount(int rsetId, int64_t rid, int rm); int taosOpenRef(int max, void (*fp)(void *)) { @@ -389,7 +389,7 @@ int taosListRef() { return num; } -static int taosDecRefCount(int rsetId, int64_t rid, int remove) { +static int taosDecRefCount(int rsetId, int64_t rid, int rm) { int hash; SRefSet *pSet; SRefNode *pNode; @@ -428,7 +428,7 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) { if (pNode) { pNode->count--; - if (remove) pNode->removed = 1; + if (rm) pNode->removed = 1; if (pNode->count <= 0) { if (pNode->prev) { diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 0ebe9c8f8af23186fb4cd60824bfa5641d2090a9..19c72d2e0b2b4e059883a506e3c4ad1f1b4cf809 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -129,15 +129,18 @@ int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) { if (nwritten <= 0) { if (errno == EINTR /* || errno == EAGAIN || errno == EWOULDBLOCK */) continue; - else - return -1; + else { + uError("DEEP write socket failed3. errno=%d fd=%d writeten=%d left=%d", errno, fd, nwritten, nleft); + return -3; + } } else { nleft -= nwritten; ptr += nwritten; } if (errno == SIGPIPE || errno == EPIPE) { - return -1; + uError("DEEP write socket failed4. errno=%d fd=%d writeten=%d left=%d", errno, fd, nwritten, nleft); + return -4; } } diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c index 043b2de2f241297d209041294428dde2c55e974e..f77dea592e8454dcc15e05f5c03c9db56e0ccc6b 100644 --- a/src/util/src/tthread.c +++ b/src/util/src/tthread.c @@ -38,7 +38,7 @@ pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) { } // destory thread -bool taosDestoryThread(pthread_t* pthread) { +bool taosDestroyThread(pthread_t* pthread) { if(pthread == NULL) return false; if(taosThreadRunning(pthread)) { pthread_cancel(*pthread); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 81ab56ccea1a585ae7bf89a57244edd25818c0d5..f215453f740b979e5b71a4d59a2698b6dd569ff7 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -461,7 +461,7 @@ void vnodeStopWaitingThread(SVnodeObj* pVnode) { if(loop == 0) { vInfo("vgId:%d :SDEL force kill thread to quit. pthread=%p pWrite=%p", pVnode->vgId, pWaitThread->pthread, pWaitThread->param); // thread not stop , so need kill - taosDestoryThread(pWaitThread->pthread); + taosDestroyThread(pWaitThread->pthread); // write msg need remove from queue SVWriteMsg* pWrite = (SVWriteMsg* )pWaitThread->param; if (pWrite) @@ -586,9 +586,9 @@ void vnodeCleanUp(SVnodeObj *pVnode) { // stop replication module if (pVnode->sync > 0) { - int64_t sync = pVnode->sync; + int64_t syncRid = pVnode->sync; pVnode->sync = -1; - syncStop(sync); + syncStop(syncRid); } vDebug("vgId:%d, vnode is cleaned, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); @@ -692,4 +692,4 @@ bool vnodeWaitTooMany(void* vparam) { tsem_t* vnodeSemWait(void* vparam) { SVnodeObj* pVnode = (SVnodeObj* )vparam; return &pVnode->semWait; -} \ No newline at end of file +} diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 2bdfd2ead3a31d8c2cba94d93239de965d2e07dc..6edcadcf715907bc69090c0e6b3a396020057c8a 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -22,7 +22,7 @@ #include "vnodeMain.h" #include "vnodeStatus.h" -uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fver) { +uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *idx, uint32_t eindex, int64_t *size, uint64_t *fver) { SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) { vError("vgId:%d, vnode not found while get file info", vgId); @@ -30,7 +30,7 @@ uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t ei } *fver = pVnode->fversion; - uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, index, eindex, size); + uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, idx, eindex, size); vnodeRelease(pVnode); return ret; diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c index 505728fbe4c4a6fbc126aa18ff6db93a28388173..ffb9767bb4333f52237af2ddb1893b129bb2c1af 100644 --- a/src/wal/test/waltest.c +++ b/src/wal/test/waltest.c @@ -113,17 +113,17 @@ int main(int argc, char *argv[]) { printf("%d wal files are written\n", total); - int64_t index = 0; + int64_t idx = 0; char name[256]; while (1) { - int code = walGetWalFile(pWal, name, &index); + int code = walGetWalFile(pWal, name, &idx); if (code == -1) { - printf("failed to get wal file, index:%" PRId64 "\n", index); + printf("failed to get wal file, index:%" PRId64 "\n", idx); break; } - printf("index:%" PRId64 " wal:%s\n", index, name); + printf("index:%" PRId64 " wal:%s\n", idx, name); if (code == 0) break; } diff --git a/tests/develop-test/3-connectors/R/test.sh b/tests/develop-test/3-connectors/R/test.sh index 90b94893659f04328d7eaef810018bb6a13c8c09..dd4577a35678b8d3435dc7835c21a926bb00e12d 100644 --- a/tests/develop-test/3-connectors/R/test.sh +++ b/tests/develop-test/3-connectors/R/test.sh @@ -22,7 +22,9 @@ cd ../../ WKC=`pwd` #echo "WKC:${WKC}" -JDBC_PATH=${WKC}'/src/connector/jdbc/' +git clone git@github.com:taosdata/taos-connector-jdbc.git --branch 2.0 --single-branch --depth 1 + +JDBC_PATH=${WKC}'/taos-connector-jdbc/' CASE_PATH=${WKC}'/tests/examples/R/' cd ${JDBC_PATH} #echo "JDBC_PATH:${JDBC_PATH}" diff --git a/tests/develop-test/3-connectors/c#/test.sh b/tests/develop-test/3-connectors/c#/test.sh index 8cfb3fe4fcff6ab820b53698e508189e557676ca..f77536c1fd8ba6595788fbcbcb5288bd72764e45 100755 --- a/tests/develop-test/3-connectors/c#/test.sh +++ b/tests/develop-test/3-connectors/c#/test.sh @@ -15,25 +15,45 @@ rm -rf /var/lib/taos/* rm -rf /var/log/taos/* nohup taosd -c /etc/taos/ > /dev/null 2>&1 & sleep 10 + +# define fun to check if execute correct. +check(){ +if [ $1 -eq 0 ] +then + echo "===================$2 succeed===================" +else + echo "===================$2 failed===================" + exit 1 +fi +} + cd ../../ WKC=`pwd` -cd ${WKC}/src/connector/C# -dotnet test -# run example under Driver -cd ${WKC}/src/connector/C#/examples -dotnet run - -#dotnet run --project src/test/Cases/Cases.csproj +echo "WKC:${WKC}" # run example with neuget package cd ${WKC}/tests/examples/C# + dotnet run --project C#checker/C#checker.csproj +check $? C#checker.csproj + dotnet run --project TDengineTest/TDengineTest.csproj +check $? TDengineTest.csproj + dotnet run --project schemaless/schemaless.csproj +check $? schemaless.csproj + dotnet run --project jsonTag/jsonTag.csproj +check $? jsonTag.csproj + dotnet run --project stmt/stmt.csproj +check $? stmt.csproj + +dotnet run --project insertCn/insertCn.csproj +check $? insertCn.csproj cd ${WKC}/tests/examples/C#/taosdemo dotnet build -c Release tree | true ./bin/Release/net5.0/taosdemo -c /etc/taos -y +check $? taosdemo diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py index bcba5d8cc248348b9575fec9ab8f52f5d5fc8182..c86d5300f59e66eb680579a63e075de099aabff7 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py @@ -93,8 +93,6 @@ class TDTestCase: tdSql.checkData(27, 1, "SMALLINT UNSIGNED") tdSql.checkData(28, 1, "BINARY") tdSql.checkData(28, 2, 19) - tdSql.query("select count(*) from db.stb where c0 >= 0 and c0 <= 10") - tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb where c1 >= 0 and c1 <= 10") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb where c2 >= 0 and c2 <= 10") @@ -119,8 +117,6 @@ class TDTestCase: tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb where c13 = 'b1' or c13 = 'b2'") tdSql.checkData(0, 0, 160) - tdSql.query("select count(*) from db.stb where t0 >= 0 and t0 <= 10") - tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb where t1 >= 0 and t1 <= 10") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb where t2 >= 0 and t2 <= 10") @@ -328,4 +324,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json index 8f652b9d73f5d9f4cb72e2d146a8c66b49dd3533..6558212816f8ad936b1fc74f0159955c7de96895 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, @@ -88,4 +87,4 @@ }] }] }] -} \ No newline at end of file +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json index a4706bf47dab35992e7b82a46f6ceff332f9da2d..cebabf95b6e72b5d7f970906b18374c5b3749d5e 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, @@ -84,4 +83,4 @@ "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] }] -} \ No newline at end of file +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json index 01e3950502ff363a42ab2837d0fd24938e54c743..0a10458283e047c98c2dfcddb4469c9c748fe5eb 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json index 62846bf2b67015cb4f7a4c6b69dd3df2149cbcad..5cbd67e1543d207a57a7572217b9aebc59827d88 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, @@ -84,4 +83,4 @@ "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] }] -} \ No newline at end of file +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json index 8722d124d69bf630266e85456623f49ed9fed2ac..0cf152604107f536c9cad4cfb2ac420bd725e917 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json index 1aa5b093486e682afe251c331884d9a0bf2f8a79..a0eec1c9afcc00a5525b5409589c2465cff3742f 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json index 8806b52a1e63ea209196f194c62378d63982dc18..4e7f6472f69f32ca2e163255a90f4b168f41bc38 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json index 1e9e28d4e847cd351c1ce9fee3699e14b3eb77f3..2737f6e631564e3f00f66aee7dd7614859ff0481 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp": 2, - "walLevel": 1, "cachelast": 0, "quorum": 1, "fsync": 3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json index deef77fdefa994487607ddc1edb3bf3a5cd83d32..f19f3734ef603ce2d7c6e0ccf200985ac4b46a2c 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp": 2, - "walLevel": 1, "cachelast": 0, "quorum": 1, "fsync": 3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json index 8893a73467bf36a1687cc25a0815a232a1ae86e9..f083782c07adbdffcdcc3c383c62dc27e53e2d0e 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp": 2, - "walLevel": 1, "cachelast": 0, "quorum": 1, "fsync": 3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json index 0bf363e6734cdffdede500965f7c7fdc51f3c4a3..9c590ae83c0fdd974598dc474adc6df828726ccd 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json index 84419760c1f29bc5b69ce54ed0a55557703c5384..60af5dfe546c83453a1fed2c933be272e3ab586d 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json index 2e5965c14b6c6882bb90a2e8248d5f6605817085..5d74e960e2207bd6ab2b0b1b4b1ebf073c1971ad 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, @@ -84,4 +83,4 @@ "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] }] -} \ No newline at end of file +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json index 48f4e231669b150e5823e2dceafd429cd6af8b3c..f7266ee71f4062ab435b6b02a8ac323bf1db7af9 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json index 06c5be47bf7ca2effd70a0a8ee51915e57cf1fe8..ba7660658f632d8fd7d2f973567ce7b75ad8a5c9 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json index 1bb03b4fab5640fb98289384466b84307ea8ceb0..62685eb3c7105d31f541b8d2d788bb7b335b96aa 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json index 893b203aa85c49ea7bd069fa3a5b30e8c7ea6bc6..ef0b1d27840c1e315e1457abe0d9a30f04187b74 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json index b0e903347d5df00a229391144e77ee57b743b3f0..546885db7d2283be34b7dc65d2f461924023e9b7 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json index 4d42ed63fa81660a8ca84d40089f1d35c0afa54a..5869410c03a12dabb5631d3b480e365c9a32656d 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":1, "quorum":1, "fsync":3000, @@ -59,4 +58,4 @@ "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR"}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY"}] }] }] -} \ No newline at end of file +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json index 22bd13d5be32a9c35b6abed6fc632da3432a2a57..d9eb17ea1f52b5d911b630871b17d42fe0b64b14 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py index b7f3fcd8262d171ee16f4e7b8f0f5ebb7aa84b38..e2160f3c9c23db2e27d54b1559a4b808a9968d31 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py @@ -79,8 +79,6 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 40) - tdSql.query("select distinct(c1) from db.stb") - tdSql.checkData(0, 0, None) tdSql.query("select distinct(c3) from db.stb") tdSql.checkData(0, 0, None) tdSql.query("select distinct(c4) from db.stb") diff --git a/tests/docs-examples-test/test_R.sh b/tests/docs-examples-test/test_R.sh index 8d2db4546f47f55a61f2a96d37ec7d4dbd725a5b..d59daf5d34a8d5fa8713c0a41a5c4c90967757d9 100755 --- a/tests/docs-examples-test/test_R.sh +++ b/tests/docs-examples-test/test_R.sh @@ -5,9 +5,9 @@ set -e pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/R +cd ../../docs/examples/R -jar_path=`find ../../../debug/build -name taos-jdbcdriver-*-dist.jar` +jar_path=`find ../../../../debug/build -name taos-jdbcdriver-*-dist.jar` echo jar_path=$jar_path R -f connect_native.r --args $jar_path # R -f connect_rest.r --args $jar_path # bug 14704 diff --git a/tests/docs-examples-test/test_c.sh b/tests/docs-examples-test/test_c.sh index 2d47eff585b26659cb90a1669317efe074f1adde..8d4c51e817f54332e963a6a637f3c3e8a8be0be6 100755 --- a/tests/docs-examples-test/test_c.sh +++ b/tests/docs-examples-test/test_c.sh @@ -5,7 +5,7 @@ set -e taosd >> /dev/null 2>&1 & taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/c +cd ../../docs/examples/c # 1 gcc connect_example.c -o connect_example -ltaos diff --git a/tests/docs-examples-test/test_csharp.sh b/tests/docs-examples-test/test_csharp.sh index 64d28d945f811db160c073643262f766e67fa59b..f26fa22d191d574338f71b5d038342464a5438ee 100755 --- a/tests/docs-examples-test/test_csharp.sh +++ b/tests/docs-examples-test/test_csharp.sh @@ -4,7 +4,7 @@ set -e pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/csharp +cd ../../docs/examples/csharp dotnet run --project connect.csproj diff --git a/tests/docs-examples-test/test_go.sh b/tests/docs-examples-test/test_go.sh index d959d8c1ed646a130b5ffd8c3387e7abe965621e..dc2332f237b81b87c3f9ce1cfac260a6fd91a814 100755 --- a/tests/docs-examples-test/test_go.sh +++ b/tests/docs-examples-test/test_go.sh @@ -5,7 +5,7 @@ set -e taosd >> /dev/null 2>&1 & taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/go +cd ../../docs/examples/go go mod tidy diff --git a/tests/docs-examples-test/test_java.sh b/tests/docs-examples-test/test_java.sh index 0e8e8266a18e3c1e403750f4a33c0fb765f8030f..12ebda79369c1047a1d52e336cc72fa133aba261 100755 --- a/tests/docs-examples-test/test_java.sh +++ b/tests/docs-examples-test/test_java.sh @@ -4,6 +4,6 @@ set -e taosd >> /dev/null 2>&1 & taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/java +cd ../../docs/examples/java mvn test \ No newline at end of file diff --git a/tests/docs-examples-test/test_node.sh b/tests/docs-examples-test/test_node.sh index 14aab236f9f96b372f864384adf92cbc9c55559f..2dd4d5735dd81fcf02e9a9f7d18983d431e0da03 100755 --- a/tests/docs-examples-test/test_node.sh +++ b/tests/docs-examples-test/test_node.sh @@ -5,7 +5,7 @@ set -e pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/node +cd ../../docs/examples/node npm install cd restexample; diff --git a/tests/docs-examples-test/test_python.sh b/tests/docs-examples-test/test_python.sh index 2b96311b29736951e71851af49f84f074428be72..0886b2b3efa74531e18b799a5e95c80039a3a820 100755 --- a/tests/docs-examples-test/test_python.sh +++ b/tests/docs-examples-test/test_python.sh @@ -5,7 +5,7 @@ set -e taosd >> /dev/null 2>&1 & taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/python +cd ../../docs/examples/python # 1 taos -s "create database if not exists log" diff --git a/tests/docs-examples-test/test_rust.sh b/tests/docs-examples-test/test_rust.sh index 6bf0fe457af81d577cbc33c6a809717847b40c86..394d3d22c7a492d78483e0356a4bc49afe3b48f1 100755 --- a/tests/docs-examples-test/test_rust.sh +++ b/tests/docs-examples-test/test_rust.sh @@ -5,7 +5,7 @@ set -e pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & -cd ../../docs-examples/rust +cd ../../docs/examples/rust cargo run -p nativeexample --example connect cargo run -p restexample --example connect diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a5b683d4c884d80b54642332ca211fe419f2f67e..c2c462324e0cdc02b248ac704064d7c2ec76dfeb 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1,4 +1,5 @@ # 20,,pytest,python3 insert/retentionpolicy.py change date time +500,,docs-examples-test,./test_node.sh 299,,pytest,python3 test.py -f update/merge_commit_data-0.py 290,,pytest,python3 test.py -f update/merge_commit_data.py 241,,pytest,python3 test.py -f update/merge_commit_data2.py @@ -236,6 +237,7 @@ 30,,script,./test.sh -f general/import/commit.sim 30,,script,./test.sh -f general/compute/diff2.sim 30,,develop-test,bash 3-connectors/R/test.sh +30,,develop-test,bash 3-connectors/c#/test.sh 29,,system-test,python3 ./test.py -f 0-others/create_col_tag.py 29,,script,./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim 29,,script,./test.sh -f general/wal/maxtables.sim @@ -592,7 +594,6 @@ 8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeInt.py 8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeDouble.py 8,,pytest,python3 test.py -f update/update2.py -7,,docs-examples-test,./test_node.sh 7,,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonSml-otherPara.py 7,,pytest,python3 test.py -f tools/taosdumpTest2.py 7,,pytest,python3 test.py -f tools/taosdemoTestdatatype.py @@ -807,6 +808,14 @@ 4,,pytest,python3 test.py -f insert/line_insert.py 3,,pytest,python3 test.py -f tag_lite/binary.py 3,,pytest,python3 test.py -f query/filterAllIntTypes.py +3,,pytest,python3 test.py -f dbmgmt/dbNameCaseSensitive.py +3,,pytest,python3 test.py -f insert/schemalessCaseSensitive.py +3,,pytest,python3 test.py -f table/columnNameCaseSensitive.py +3,,pytest,python3 test.py -f table/columnNameValidation.py +3,,pytest,python3 test.py -f table/tagNameCaseSensitive.py +3,,pytest,python3 test.py -f table/tbNameCaseSensitive.py +3,,pytest,python3 test.py -f functions/function_max_row.py +3,,pytest,python3 test.py -f functions/function_min_row.py 3,,develop-test,python3 ./test.py -f 2-query/ts_hidden_column.py 3,,develop-test,python3 ./test.py -f 2-query/ts_shortcut.py 3,,develop-test,python3 ./test.py -f 2-query/nchar_funcs.py diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh index 221b70b22a7b17586be9ed04895e716b391f3b0e..457e339d89a5e1da665ea96d9821d7dacf15f99f 100755 --- a/tests/parallel_test/run.sh +++ b/tests/parallel_test/run.sh @@ -7,10 +7,11 @@ function usage() { echo -e "\t -b branch" echo -e "\t -l log dir" echo -e "\t -o default timeout value" + echo -e "\t -w log web server" echo -e "\t -h help" } -while getopts "m:t:b:l:o:h" opt; do +while getopts "m:t:b:l:o:w:h" opt; do case $opt in m) config_file=$OPTARG @@ -27,6 +28,9 @@ while getopts "m:t:b:l:o:h" opt; do o) timeout_param="-o $OPTARG" ;; + w) + web_server=$OPTARG + ;; h) usage exit 0 @@ -59,10 +63,11 @@ if [ ! -f $t_file ]; then exit 1 fi date_tag=`date +%Y%m%d-%H%M%S` +test_log_dir=${branch}_${date_tag} if [ -z $log_dir ]; then - log_dir="log/${branch}_${date_tag}" + log_dir="log/${test_log_dir}" else - log_dir="$log_dir/${branch}_${date_tag}" + log_dir="$log_dir/${test_log_dir}" fi hosts=() @@ -134,14 +139,14 @@ function build_src() { echo "$cmd" ${cmd} if [ $? -ne 0 ]; then - flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>$log_dir/failed.log" + flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>${failed_case_file}" return fi script=". ~/.bashrc;cd ${workdirs[index]}/taos-tools;git submodule update --init --recursive;mkdir -p build;cd build;cmake ..;make -j4" cmd="${ssh_script} sh -c \"$script\"" ${cmd} if [ $? -ne 0 ]; then - flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>$log_dir/failed.log" + flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>${failed_case_file}" return fi script="cp -rf ${workdirs[index]}/taos-tools/build/build/bin/* ${workdirs[index]}/TDinternal/debug/build/bin/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib64/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo" @@ -191,6 +196,10 @@ function run_thread() { local exec_dir=`echo "$line"|cut -d, -f3` local case_cmd=`echo "$line"|cut -d, -f4` local case_file="" + echo "$case_cmd"|grep -q "\.sh" + if [ $? -eq 0 ]; then + case_file=`echo "$case_cmd"|grep -o ".*\.sh"|awk '{print $NF}'` + fi echo "$case_cmd"|grep -q "^python3" if [ $? -eq 0 ]; then case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` @@ -215,44 +224,54 @@ function run_thread() { # echo "$thread_no $count $cmd" local ret=0 local redo_count=1 + local case_log_file=$log_dir/${case_file}.txt start_time=`date +%s` + local case_index=`flock -x $lock_file -c "sh -c \"echo \\\$(( \\\$( cat $index_file ) + 1 )) | tee $index_file\""` + case_index=`printf "%5d" $case_index` + local case_info=`echo "$line"|cut -d, -f 3,4` while [ ${redo_count} -lt 6 ]; do - if [ -f $log_dir/$case_file.log ]; then - cp $log_dir/$case_file.log $log_dir/$case_file.${redo_count}.redolog + if [ -f $case_log_file ]; then + cp $case_log_file $log_dir/$case_file.${redo_count}.redotxt fi - echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log - echo -e "\e[33m >>>>> \e[0m ${case_cmd}" - date >>$log_dir/$case_file.log - # $cmd 2>&1 | tee -a $log_dir/$case_file.log + echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$case_log_file + local current_time=`date "+%Y-%m-%d %H:%M:%S"` + echo -e "$case_index \e[33m START >>>>> \e[0m ${case_info} \e[33m[$current_time]\e[0m" + echo "$current_time" >>$case_log_file + local real_start_time=`date +%s` + # $cmd 2>&1 | tee -a $case_log_file # ret=${PIPESTATUS[0]} - $cmd >>$log_dir/$case_file.log 2>&1 + $cmd >>$case_log_file 2>&1 ret=$? - echo "${hosts[index]} `date` ret:${ret}" >>$log_dir/$case_file.log + local real_end_time=`date +%s` + local time_elapsed=$(( real_end_time - real_start_time )) + echo "execute time: ${time_elapsed}s" >>$case_log_file + current_time=`date "+%Y-%m-%d %H:%M:%S"` + echo "${hosts[index]} $current_time exit code:${ret}" >>$case_log_file if [ $ret -eq 0 ]; then break fi redo=0 - grep -q "wait too long for taosd start" $log_dir/$case_file.log + grep -q "wait too long for taosd start" $case_log_file if [ $? -eq 0 ]; then redo=1 fi - grep -q "kex_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log + grep -q "kex_exchange_identification: Connection closed by remote host" $case_log_file if [ $? -eq 0 ]; then redo=1 fi - grep -q "ssh_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log + grep -q "ssh_exchange_identification: Connection closed by remote host" $case_log_file if [ $? -eq 0 ]; then redo=1 fi - grep -q "kex_exchange_identification: read: Connection reset by peer" $log_dir/$case_file.log + grep -q "kex_exchange_identification: read: Connection reset by peer" $case_log_file if [ $? -eq 0 ]; then redo=1 fi - grep -q "Database not ready" $log_dir/$case_file.log + grep -q "Database not ready" $case_log_file if [ $? -eq 0 ]; then redo=1 fi - grep -q "Unable to establish connection" $log_dir/$case_file.log + grep -q "Unable to establish connection" $case_log_file if [ $? -eq 0 ]; then redo=1 fi @@ -265,11 +284,18 @@ function run_thread() { redo_count=$(( redo_count + 1 )) done end_time=`date +%s` - echo >>$log_dir/$case_file.log - echo "${hosts[index]} execute time: $(( end_time - start_time ))s" >>$log_dir/$case_file.log + echo >>$case_log_file + total_time=$(( end_time - start_time )) + echo "${hosts[index]} total time: ${total_time}s" >>$case_log_file # echo "$thread_no ${line} DONE" - if [ $ret -ne 0 ]; then - flock -x $lock_file -c "echo \"${hosts[index]} ret:${ret} ${line}\" >>$log_dir/failed.log" + if [ $ret -eq 0 ]; then + echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[32m success\e[0m" + else + if [ ! -z ${web_server} ]; then + flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}" + else + flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}" + fi mkdir -p $log_dir/${case_file}.coredump local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump" local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" @@ -278,14 +304,16 @@ function run_thread() { fi cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/" $cmd # 2>/dev/null - local case_info=`echo "$line"|cut -d, -f 3,4` local corefile=`ls $log_dir/${case_file}.coredump/` - corefile=`find $log_dir/${case_file}.coredump/ -name "core.*"` - echo -e "$case_info \e[31m failed\e[0m" + corefile=`find $log_dir/${case_file}.coredump/ -name "core*"` + echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[31m failed\e[0m" echo "=========================log============================" - cat $log_dir/$case_file.log + cat $case_log_file echo "=====================================================" - echo -e "\e[34m log file: $log_dir/$case_file.log \e[0m" + echo -e "\e[34m log file: $case_log_file \e[0m" + if [ ! -z "${web_server}" ]; then + echo "${web_server}/$test_log_dir/${case_file}.txt" + fi if [ ! -z "$corefile" ]; then echo -e "\e[34m corefiles: $corefile \e[0m" local build_dir=$log_dir/build_${hosts[index]} @@ -320,6 +348,10 @@ mkdir -p $log_dir rm -rf $log_dir/* task_file=$log_dir/$$.task lock_file=$log_dir/$$.lock +index_file=$log_dir/case_index.txt +stat_file=$log_dir/stat.txt +failed_case_file=$log_dir/failed.txt +echo "0" >$index_file i=0 while [ $i -lt ${#hosts[*]} ]; do @@ -328,10 +360,6 @@ while [ $i -lt ${#hosts[*]} ]; do i=$(( i + 1 )) done wait -# if [ -f "$log_dir/failed.log" ]; then -# cat $log_dir/failed.log -# exit 1 -# fi i=0 j=0 @@ -357,15 +385,45 @@ rm -f $lock_file rm -f $task_file # docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f +echo "=====================================================================" +echo "log dir: $log_dir" +total_cases=`cat $index_file` +failed_cases=0 +if [ -f $failed_case_file ]; then + if [ ! -z "$web_server" ]; then + failed_cases=`grep -v "$web_server" $failed_case_file|wc -l` + else + failed_cases=`grep -v "log file:" $failed_case_file|wc -l` + fi +fi +success_cases=$(( total_cases - failed_cases )) +echo "Total Cases: $total_cases" >$stat_file +echo "Successful: $success_cases" >>$stat_file +echo "Failed: $failed_cases" >>$stat_file +cat $stat_file + RET=0 i=1 -if [ -f "$log_dir/failed.log" ]; then +if [ -f "${failed_case_file}" ]; then echo "=====================================================" while read line; do + if [ ! -z "${web_server}" ]; then + echo "$line"|grep -q "${web_server}" + if [ $? -eq 0 ]; then + echo " $line" + continue + fi + else + echo "$line"|grep -q "log file:" + if [ $? -eq 0 ]; then + echo " $line" + continue + fi + fi line=`echo "$line"|cut -d, -f 3,4` echo -e "$i. $line \e[31m failed\e[0m" >&2 i=$(( i + 1 )) - done <$log_dir/failed.log + done <${failed_case_file} RET=1 fi diff --git a/tests/parallel_test/run_container.sh b/tests/parallel_test/run_container.sh index 6ba16ee6467671dffda3b83f766f8fdb7247aeba..31e0ccd51b9271a42134fbd3194227b79dfdc17f 100755 --- a/tests/parallel_test/run_container.sh +++ b/tests/parallel_test/run_container.sh @@ -95,7 +95,7 @@ docker run \ -v $REPDIR/packaging/cfg/taos.cfg:/etc/taos/taos.cfg:ro \ -v $REPDIR/packaging:$CONTAINER_TESTDIR/packaging:ro \ -v $REPDIR/README.md:$CONTAINER_TESTDIR/README.md:ro \ - -v $REPDIR/docs-examples:$CONTAINER_TESTDIR/docs-examples \ + -v $REPDIR/docs:$CONTAINER_TESTDIR/docs \ -v $REPDIR/src/connector/python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \ -e LD_LIBRARY_PATH=/home/debug/build/lib:/home/debug/build/lib64 \ --rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd" $timeout_param diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 9a155a4df9ec1f4b6b1ce4860a75938c5edc7731..308ffab848dc5afd94027c910ae4db565393defe 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -15,6 +15,7 @@ import sys from util.log import * from util.cases import * from util.sql import * +import os from datetime import timedelta @@ -49,7 +50,7 @@ class TDTestCase: ret = tdSql.query('show mnodes') tdSql.checkRows(1) - tdSql.checkData(0, 2, "master") + tdSql.checkData(0, 2, "leader") role_time = tdSql.getData(0, 3) create_time = tdSql.getData(0, 4) @@ -72,7 +73,15 @@ class TDTestCase: ret = tdSql.query('show vnodes "{}"'.format(dnodeEndpoint)) tdSql.checkRows(1) tdSql.checkData(0, 0, 2) - tdSql.checkData(0, 1, "master") + tdSql.checkData(0, 1, "leader") + + cmd = "taos -h 127.0.0.1 -s 'show databases'" + r = os.popen(cmd) + text = r.read() + r.close + + if 'Unable to establish connection' in text: + tdLog.exit("%s failed: command 'taos -h 127.0.0.1' Unable to establish connection" % __file__) def stop(self): tdSql.close() diff --git a/tests/pytest/cluster/TD-3693/insert1Data.json b/tests/pytest/cluster/TD-3693/insert1Data.json index 3ac289a63a846c7de117ce6171ad023ca3f56211..43aa789e48ad7e7075ba5d9ca5b44dd8b7ae7f43 100644 --- a/tests/pytest/cluster/TD-3693/insert1Data.json +++ b/tests/pytest/cluster/TD-3693/insert1Data.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/cluster/TD-3693/insert2Data.json b/tests/pytest/cluster/TD-3693/insert2Data.json index 25717df4c76f59e8ef7d638c8793a391ff338a7c..e2c31717986bec0b1c16d889b4c9e355329648cd 100644 --- a/tests/pytest/cluster/TD-3693/insert2Data.json +++ b/tests/pytest/cluster/TD-3693/insert2Data.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/cluster/changeReplicaTest.py b/tests/pytest/cluster/changeReplicaTest.py index 7fa68edbfee2db599076befdf9bed5f4b4be3c83..a7b357cc579a4dfb82eceb149861bc00388adf99 100644 --- a/tests/pytest/cluster/changeReplicaTest.py +++ b/tests/pytest/cluster/changeReplicaTest.py @@ -30,22 +30,22 @@ class ClusterTestcase: tdSql.execute("use %s" % ctest.dbName) tdSql.query("show vgroups") for i in range(10): - tdSql.checkData(i, 5, "master") + tdSql.checkData(i, 5, "leader") tdSql.execute("alter database %s replica 2" % ctest.dbName) tdLog.sleep(30) tdSql.query("show vgroups") for i in range(10): - tdSql.checkData(i, 5, "master") - tdSql.checkData(i, 7, "slave") + tdSql.checkData(i, 5, "leader") + tdSql.checkData(i, 7, "follower") tdSql.execute("alter database %s replica 3" % ctest.dbName) tdLog.sleep(30) tdSql.query("show vgroups") for i in range(10): - tdSql.checkData(i, 5, "master") - tdSql.checkData(i, 7, "slave") - tdSql.checkData(i, 9, "slave") + tdSql.checkData(i, 5, "leader") + tdSql.checkData(i, 7, "follower") + tdSql.checkData(i, 9, "follower") ct = ClusterTestcase() ct.run() \ No newline at end of file diff --git a/tests/pytest/compress/insertDataDb1.json b/tests/pytest/compress/insertDataDb1.json index 65cec71a65ff4ef3814bee4949def151c32945ee..67006c4d1faaebace73b1fa63abed1c902afaa33 100644 --- a/tests/pytest/compress/insertDataDb1.json +++ b/tests/pytest/compress/insertDataDb1.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/dbmgmt/dbNameCaseSensitive.py b/tests/pytest/dbmgmt/dbNameCaseSensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..80d0614c3f4268630f0e06d6b4a14e5833e84ea5 --- /dev/null +++ b/tests/pytest/dbmgmt/dbNameCaseSensitive.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self._conn = conn + + def run(self): + + # database name + tdSql.execute("create database db") + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.error("create database Db") + tdSql.error("create database `db`") + tdSql.execute("create database `Db`") + tdSql.query("show databases") + tdSql.checkRows(2) + + tdSql.execute("alter database db cachelast 1") + tdSql.execute("alter database `Db` cachelast 1") + + tdSql.execute("use db") + tdSql.query("select database()") + tdSql.checkData(0, 0, 'db'); + tdSql.query("show db.vgroups") + tdSql.checkRows(0) + + tdSql.execute("use `Db`") + tdSql.query("select database()") + tdSql.checkData(0, 0, 'Db'); + tdSql.query("show `Db`.vgroups") + tdSql.checkRows(0) + tdSql.query("show create database `Db`") + tdSql.checkRows(1) + sql = tdSql.getData(0, 1) + tdSql.checkEqual(True, sql.startswith("CREATE DATABASE `Db`")) + + + tdSql.execute("drop database db") + tdSql.execute("drop database `Db`") + + tdSql.query("show databases") + tdSql.checkRows(0) + + # corner cases + tdSql.execute("create database `电力系统`") + tdSql.query("show `电力系统`.vgroups") + tdSql.checkRows(0) + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "电力系统") + tdSql.query("show create database `电力系统`") + sql = tdSql.getData(0, 1) + tdSql.checkEqual(True, sql.startswith("CREATE DATABASE `电力系统`")) + + tdSql.error("create database ``") + tdSql.execute("create database ` `") + tdSql.error("create database ` `") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/functions/data.tar.gz b/tests/pytest/functions/data.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9b0fd32993cb2654e9b6c2a9546903436da43f27 Binary files /dev/null and b/tests/pytest/functions/data.tar.gz differ diff --git a/tests/pytest/functions/function_diff.py b/tests/pytest/functions/function_diff.py index 5995b821d1a42e75d7b80e0d564e4281da9b3979..9742518886b93ec644e994f211583a37e61cf4e3 100644 --- a/tests/pytest/functions/function_diff.py +++ b/tests/pytest/functions/function_diff.py @@ -16,6 +16,7 @@ import taos from util.log import * from util.cases import * from util.sql import * +from util.dnodes import * import numpy as np @@ -156,7 +157,25 @@ class TDTestCase: tdSql.error("select diff(col) from st group by dev") tdSql.error("select diff(col) from st group by col") - + + # TS-1612 + os.system("tar -zxf %s/functions/data.tar.gz" % os.getcwd()) + tdSql.execute("create database radb") + tdSql.execute("use radb") + tdSql.execute("CREATE TABLE `vehicle_automode` (`time` TIMESTAMP,`auto_ctl_odom` INT) TAGS (`mac_address` BINARY(30))") + tdSql.execute("CREATE TABLE `va_00545a230327` USING `vehicle_automode` TAGS ('00545a230327')") + tdSql.execute("insert into va_00545a230327 file 'data/va_00545a230327.csv' ") + tdSql.query("select * from vehicle_automode") + rows = tdSql.queryRows + tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname") + tdSql.checkRows(rows - 1) + os.system("rm -rf data") + + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname") + tdSql.checkRows(rows - 1) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/functions/function_first.py b/tests/pytest/functions/function_first.py index f1a916b168271e67d0001c5d4444966f8c07a2d1..df9056acfe8fc5d3617511c3cdb12470903fa71b 100644 --- a/tests/pytest/functions/function_first.py +++ b/tests/pytest/functions/function_first.py @@ -17,6 +17,7 @@ from util.log import * from util.cases import * from util.sql import * import numpy as np +import re class TDTestCase: @@ -142,6 +143,29 @@ class TDTestCase: # TD-2607 first,last + where none exist condition + interval tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)") tdSql.checkRows(0) + + # TS-1601 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table tb01(ts timestamp, c1 double, c2 int)") + tdSql.execute("insert into tb01 values(now, 2.3987401, 20)(now + 2s, 4.58123, 11)") + + r = os.popen("taos -s 'select first(c1) + last(c1) from test.tb01'") + text = r.read() + r.close() + result = float(re.split('\n |\|', text)[3]) + + tdSql.query("select first(c1) + last(c1) from tb01") + tdSql.checkData(0, 0, result) + + r = os.popen("taos -s 'select first(c1) - last(c1) from test.tb01'") + text = r.read() + r.close() + result = float(re.split('\n |\|', text)[3]) + tdSql.query("select first(c1) - last(c1) from tb01") + tdSql.checkData(0, 0, result) + + def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_last.py b/tests/pytest/functions/function_last.py index 991ac96a800803440a2e662c163622af95c556e3..1b89e2f5e4df83c338221b92cc35a367a7358335 100644 --- a/tests/pytest/functions/function_last.py +++ b/tests/pytest/functions/function_last.py @@ -127,6 +127,10 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col6)/10 from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.91) + tdSql.query("select last(col7) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, True) diff --git a/tests/pytest/functions/function_max_row.py b/tests/pytest/functions/function_max_row.py new file mode 100644 index 0000000000000000000000000000000000000000..7ffa9858b93ba5f7bddcee55ebaabf138201c172 --- /dev/null +++ b/tests/pytest/functions/function_max_row.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.tables = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + + tdSql.execute("create table stb (ts timestamp, c1 int, c2 double, c3 float) tags(t1 int)") + for i in range(self.tables): + tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) + sql = "insert into tb%d values" % i + for j in range(self.rowNum): + sql += "(%d, %d, %f, %f)" % (self.ts + j * 3000, j, j + 0.1, j + 0.1) + intData.append(j) + floatData.append(j + 0.1) + tdSql.execute(sql) + + tdSql.error("select max_row(ts) from stb") + tdSql.error("select max_row(t1) from stb") + + tdSql.query("select max_row(c1) from stb") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max_row(c1), * from stb") + tdSql.checkData(0, 0, np.max(intData)) + tdSql.checkData(0, 2, np.max(intData)) + tdSql.checkData(0, 3, np.max(floatData)) + tdSql.checkData(0, 4, np.max(floatData)) + + tdSql.query("select max_row(c1), * from stb group by tbname") + for i in range(self.tables): + tdSql.checkData(i, 0, np.max(intData)) + tdSql.checkData(i, 2, np.max(intData)) + tdSql.checkData(i, 3, np.max(floatData)) + tdSql.checkData(i, 4, np.max(floatData)) + + tdSql.query("select max_row(c1), * from stb interval(6s)") + tdSql.checkRows(5) + + tdSql.query("select max_row(c1), * from tb1 interval(6s)") + tdSql.checkRows(5) + + tdSql.query("select max_row(c1), * from stb interval(6s) group by tbname") + tdSql.checkRows(50) + + tdSql.query("select max_row(c1), * from (select min_row(c1) c1, * from stb group by tbname)") + tdSql.checkData(0, 0, np.min(intData)) + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/functions/function_min_row.py b/tests/pytest/functions/function_min_row.py new file mode 100644 index 0000000000000000000000000000000000000000..9acc0eee5b638eb7c3312b0afe4bfe96a87f5746 --- /dev/null +++ b/tests/pytest/functions/function_min_row.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.tables = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + + tdSql.execute("create table stb (ts timestamp, c1 int, c2 double, c3 float) tags(t1 int)") + for i in range(self.tables): + tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) + sql = "insert into tb%d values" % i + for j in range(self.rowNum): + sql += "(%d, %d, %f, %f)" % (self.ts + j * 3000, j, j + 0.1, j + 0.1) + intData.append(j) + floatData.append(j + 0.1) + tdSql.execute(sql) + + tdSql.error("select min_row(ts) from stb") + tdSql.error("select min_row(t1) from stb") + + tdSql.query("select min_row(c1) from stb") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min_row(c1), * from stb") + tdSql.checkData(0, 0, np.min(intData)) + tdSql.checkData(0, 2, np.min(intData)) + tdSql.checkData(0, 3, np.min(floatData)) + tdSql.checkData(0, 4, np.min(floatData)) + + tdSql.query("select min_row(c1), * from stb group by tbname") + for i in range(self.tables): + tdSql.checkData(i, 0, np.min(intData)) + tdSql.checkData(i, 2, np.min(intData)) + tdSql.checkData(i, 3, np.min(floatData)) + tdSql.checkData(i, 4, np.min(floatData)) + + tdSql.query("select min_row(c1), * from stb interval(6s)") + tdSql.checkRows(5) + + tdSql.query("select min_row(c1), * from tb1 interval(6s)") + tdSql.checkRows(5) + + tdSql.query("select min_row(c1), * from stb interval(6s) group by tbname") + tdSql.checkRows(50) + + tdSql.query("select min_row(c1), * from (select max_row(c1) c1, * from stb group by tbname)") + tdSql.checkData(0, 0, np.max(intData)) + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py index 4c873ec4a4efa6d1877a83d5b3942f5eb990a714..79c318c3448a0a77b02143ac5689952d9e498d3c 100644 --- a/tests/pytest/insert/line_insert.py +++ b/tests/pytest/insert/line_insert.py @@ -196,12 +196,19 @@ class TDTestCase: self._conn.schemaless_insert([ "sts,t1=abc,t2=ab\"c,t3=ab\\,c,t4=ab\\=c,t5=ab\\ c c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=\"abc\" 1626006833640000000", - "sts,t1=abc c1=3i64,c2=false,c3=L\"{\\\"date\\\":\\\"2020-01-01 08:00:00.000\\\",\\\"temperature\\\":20}\",c6=\"ab\\\\c\" 1626006833640000000" + "sts,t1=abc c1=3i64,c2=false,c3=L\"{\\\"date\\\":\\\"2020-01-01 08:00:00.000\\\",\\\"temperature\\\":20}\",c6=\"ab\\\\c\" 1626006833640000000", + "type_json5,__deviceId__=10 index=0,jsonAttri$j=\"{\\\"jsonC\\\":\\\"0\\\"}\" 1626006833640000001" ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) tdSql.query('select tbname from sts') tdSql.checkRows(2) + tdSql.query("select * from sts") + tdSql.checkData(1, 2, '''{"date":"2020-01-01 08:00:00.000","temperature":20}''') + + tdSql.query("select * from type_json5") + tdSql.checkData(0, 2, '''{"jsonC":"0"}''') + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/insert/schemalessCaseSensitive.py b/tests/pytest/insert/schemalessCaseSensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cf133f9d9cfd30aaf266e128ff147500c38a47 --- /dev/null +++ b/tests/pytest/insert/schemalessCaseSensitive.py @@ -0,0 +1,150 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +from util.types import TDSmlProtocolType, TDSmlTimestampType +import json + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self._conn = conn + + def run(self): + + # influxDB Line Protocol + self.influxDBLineProtocol() + + # OpenTSDB Line Protocol + self.openTSDBLineProtocol() + + # OpenTSDB JSON Protocol + self.openTSDBJSONProtocol() + + def influxDBLineProtocol(self): + print("===== influxDB Line Protocol Case Sensitive Test =====\n") + tdSql.execute("create database influxdb precision 'ns' ") + tdSql.execute("use influxdb") + lines = [ + "St,deviceId=1i voltage=1,phase=\"Test\" 1626006833639000000", + "St,DeviceId=3i voltage=2,phase=\"Test\" 1626006833639000000", + "St,deviceId=2i,DeviceId=3 Voltage=2,Phase=\"Test2\" 1626006833639000000", + "St,deviceId=4i,DeviceId=3 voltage=1,phase=\"Test\",Voltage=2,Phase=\"Test1\" 1626006833639000000", + "tbl,deviceId=\"sensor0\" Hello=3i 1646053743694400029", + "tbl,deviceId=\"sensor0\" n=3i,N=4i 1646053743694400030", + "tbl,deviceId=\"sensor0\" g=3i 1646053743694400031", + "tbl,deviceId=\"sensor0\" G=3i 1646053743694400032", + "tbl,deviceId=\"sensor0\" nice=2i,Nice=3i 1646053743694400033", + "tbl,deviceId=\"sensor0\" hello=3i 1646053743694400034", + "超级表,deviceId=\"sensor0\" 电压=3i 1646053743694400035", + ] + + self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + tdSql.query("show stables") + tdSql.checkRows(3) + + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("describe `St`") + tdSql.checkRows(7) + + tdSql.query("select * from `St`") + tdSql.checkRows(4) + + tdSql.query("select * from tbl") + tdSql.checkRows(6) + + tdSql.query("select * from `超级表`") + tdSql.checkRows(1) + + def openTSDBLineProtocol(self): + print("===== OpenTSDB Line Protocol Case Sensitive Test =====\n") + tdSql.execute("create database opentsdbline") + tdSql.execute("use opentsdbline") + + # format: =[ =] + lines = [ + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.Current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.Current 1648432611249 10.8 Location=California.LosAngeles groupid=3", + "meters.Current 1648432611249 10.8 Location=California.LosAngeles location=California.SanFrancisco groupid=3", + "Meters.current 1648432611250 11.3 location=California.LosAngeles Groupid=3", + "电表 1648432611250 11.3 位置=California.LosAngeles Groupid=3" + ] + + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None) + tdSql.query("show stables") + tdSql.checkRows(4) + + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("describe `meters.Current`") + tdSql.checkRows(5) + tdSql.checkData(2, 0, "groupid") + tdSql.checkData(3, 0, "location") + tdSql.checkData(4, 0, "Location") + + tdSql.query("describe `Meters.current`") + tdSql.checkRows(4) + tdSql.checkData(2, 0, "Groupid") + tdSql.checkData(3, 0, "location") + + tdSql.query("describe `电表`") + tdSql.checkRows(4) + tdSql.checkData(2, 0, "Groupid") + tdSql.checkData(3, 0, "位置") + + def openTSDBJSONProtocol(self): + print("===== OpenTSDB JSON Protocol Case Sensitive Test =====\n") + tdSql.execute("create database opentsdbjson") + tdSql.execute("use opentsdbjson") + + lines = [ + {"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"Location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.Current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "Location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "电压", "timestamp": 1648432611250, "value": 221, "tags": {"位置": "California.LosAngeles", "groupid": 1}} + ] + + self._conn.schemaless_insert([json.dumps(lines)], TDSmlProtocolType.JSON.value, None) + tdSql.query("show stables") + tdSql.checkRows(4) + + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("describe `meters.Current`") + tdSql.checkRows(4) + + tdSql.query("describe `meters.voltage`") + tdSql.checkRows(5) + tdSql.checkData(3, 0, "Location") + tdSql.checkData(4, 0, "location") + + tdSql.query("describe `电压`") + tdSql.checkRows(4) + tdSql.checkData(3, 0, "位置") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/manualTest/TD-5114/checkClusterStatus.py b/tests/pytest/manualTest/TD-5114/checkClusterStatus.py index c6bff305a5b3317d03a793c9634b1ea19b3b7217..213d6b87abb9294c637039f3a2ce44f84461e937 100644 --- a/tests/pytest/manualTest/TD-5114/checkClusterStatus.py +++ b/tests/pytest/manualTest/TD-5114/checkClusterStatus.py @@ -53,7 +53,7 @@ class TwoClients: tdSql.query("show mnodes") tdSql.checkRows(3) - roles = "master slave" + roles = "leader follower" for i in range(tdSql.queryRows): if (tdSql.queryResult[i][2] in roles ): ep = tdSql.queryResult[i][1] diff --git a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json index 4f32b700d8d042134e9edc374abf57e7cf5674b5..14cc9848610d517c2db1417d81526aefe051b49a 100644 --- a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json +++ b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py index 2f17e0bd3ada7ece5f8544033192758fd4747b52..dbf7b9ad9e05d77c7f910992a226e34652749e5b 100644 --- a/tests/pytest/perfbenchmark/bug3433.py +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -74,7 +74,6 @@ class TDTestCase: "minRows": 100, "maxRows": 4096, "comp": 2, - "walLevel": 1, "cachelast": 0, "quorum": 1, "fsync": 3000, diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json index 149a4b56acb69ec9a35b1c05a54d6d08803f8080..1518da8c9d9311f26b5fc75ee8d575ed70c63691 100644 --- a/tests/pytest/query/nestedQuery/insertData.json +++ b/tests/pytest/query/nestedQuery/insertData.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/query/nestedQuery/nestedQuery.py b/tests/pytest/query/nestedQuery/nestedQuery.py index 89751bb7b808002b42e09d4a6bee2ef16e7ac775..9f9f56660cacd2b38d6c4f20b5da2cdc07e462dd 100755 --- a/tests/pytest/query/nestedQuery/nestedQuery.py +++ b/tests/pytest/query/nestedQuery/nestedQuery.py @@ -2233,10 +2233,10 @@ class TDTestCase: sql = "select * from ( select ts , " for i in range(4094): sql += "c%d , " % (i) - sql += "c4094 from d0 " + sql += "c4094 from d0 " sql += " %s )" % random.choice(order_where) sql += " %s ;" % random.choice(order_desc_where) - tdLog.info(len(sql)) + tdLog.info(len(sql)) tdSql.query(sql) tdSql.checkCols(4096) tdSql.checkRows(1000) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index a365369b21fc7429216f5c1e8c624bf856a744c1..62b435fbef9cb132e349e0f8da91eb5c795f3b0b 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -65,7 +65,6 @@ class TDTestCase: "minRows": 100, "maxRows": 4096, "comp": 2, - "walLevel": 1, "cachelast": 0, "quorum": 1, "fsync": 3000, diff --git a/tests/pytest/query/queryBase.py b/tests/pytest/query/queryBase.py index 4544fab3adcb6e760dcbc05ab56cd22edd35b3e2..9be950df49e7b2e34f88edaafd91fac37aa8a009 100644 --- a/tests/pytest/query/queryBase.py +++ b/tests/pytest/query/queryBase.py @@ -171,6 +171,11 @@ class TDTestCase: tdSql.waitedQuery(sql, 1, WAITS) tdSql.checkData(0, 1, 229400) + # TS-1664 + tdSql.error("create database string") + tdSql.error("create table string(ts timestamp, c1 int)") + tdSql.error("select * from string") + # # add case with filename # diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index e5c468600ba56a251057f204971084fe2844a85e..7d752a9863312e42e303e5db34e18ad740bc5a19 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -68,6 +68,16 @@ class TDTestCase: # TD-6006 tdSql.error("select * from dev_001 where 'name' is not null") tdSql.error("select * from dev_001 where \"name\" = 'first'") + + # TS-1577 + tdSql.query("show databases") + rows = tdSql.queryRows + + for i in range(1000): + tdSql.execute("create database test%d" % i) + + tdSql.query("show databases") + tdSql.checkRows(rows + 1000) def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryGroupbySort.py b/tests/pytest/query/queryGroupbySort.py index 6439fc6560d7f74b6d27ba3847f2459918fd94dc..c5ca1efacd6b4d29ea4c4b9212873ea85aa435ed 100644 --- a/tests/pytest/query/queryGroupbySort.py +++ b/tests/pytest/query/queryGroupbySort.py @@ -88,6 +88,23 @@ class TDTestCase: tdSql.query("select count(*) from tb group by c1") tdSql.checkRows(0) + # TS-1619 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table stb(ts timestamp, c1 int, c2 nchar(30)) tags(t1 int)") + for i in range(3): + tdSql.execute("create table t%d using stb tags(%d)" % (i, i)) + sql = "insert into t%d values " % i + for j in range(16): + if j % 4 == 0: + s = '00' + else: + s = str (j % 4 * 15) + sql += "(%d, %d, '2022-06-01 0%d:%s')" % (self.ts + j, i, int( j / 4 ), s) + tdSql.execute(sql) + + tdSql.query("select c2, sum(c1) from stb group by c2") + tdSql.checkRows(16) def stop(self): tdSql.close() diff --git a/tests/pytest/subscribe/singlemeter.py b/tests/pytest/subscribe/singlemeter.py index 879e0a75ebdf29022990b5e2e250370620c74636..ff182f70b57241bbb73d67044e5ff0cc87fb72c5 100644 --- a/tests/pytest/subscribe/singlemeter.py +++ b/tests/pytest/subscribe/singlemeter.py @@ -68,8 +68,24 @@ class TDTestCase: tdSub.consume() tdSub.checkRows(11) + + # TS-1788: Subscribe a case sensitive table + tdLog.info("create a table and insert 10 rows.") + tdSql.execute("create table `T1`(ts timestamp, a int, b int);") + for i in range(0, 10): + tdSql.execute("insert into `T1` values (%d, %d, %d);" % (now + i, i, i)) + + sqlstr = "select * from `T1`" + topic = "topic1" + now = int(time.time() * 1000) + + tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0)) + tdSub.consume() + tdSub.checkRows(10) + tdSub.close(True) + def stop(self): tdSub.close(False) tdSql.close() diff --git a/tests/pytest/table/columnNameCaseSensitive.py b/tests/pytest/table/columnNameCaseSensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf69c6bf1adba057c47f884f2ccbf99b30b088b --- /dev/null +++ b/tests/pytest/table/columnNameCaseSensitive.py @@ -0,0 +1,183 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + # column + tdSql.execute("create table tb(ts timestamp, c1 int)") + tdSql.execute("create table `TB`(ts timestamp, c1 int)") + tdSql.error("alter table tb add column C1 int") + tdSql.execute("alter table tb add column `C1` int") + tdSql.error("alter table `TB` add column C1 int") + tdSql.execute("alter table `TB` add column `C1` int") + + tdSql.error("create table tb2(ts timestamp, c1 int, C1 int)") + tdSql.execute("create table tb2(ts timestamp, c1 int, `C1` int)") + tdSql.query("describe tb2") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 'ts') + tdSql.checkData(1, 0, 'c1') + tdSql.checkData(2, 0, 'C1') + + tdSql.execute("insert into tb2(ts, c1) values(now, 1)") + tdSql.execute("insert into tb2(ts, `C1`) values(now + 1s, 1)") + tdSql.execute("insert into tb2(ts, c1, `C1`) values(now + 2s, 1, 2)") + tdSql.query("select * from tb2") + tdSql.checkRows(3) + + tdSql.query("select * from tb2 where c1 = 1") + tdSql.checkRows(2) + + tdSql.query("select * from tb2 where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 `C1` from tb2 where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 as `C1` from tb2 where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` a from tb2 where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` as a from tb2 where `C1` = 1") + tdSql.checkRows(1) + + tdSql.execute("alter table tb2 drop column c1") + tdSql.query("describe tb2") + tdSql.checkRows(2) + + tdSql.error("create table `TB2`(ts timestamp, c1 int, C1 int)") + tdSql.execute("create table `TB2`(ts timestamp, c1 int, `C1` int)") + tdSql.query("describe `TB2`") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 'ts') + tdSql.checkData(1, 0, 'c1') + tdSql.checkData(2, 0, 'C1') + + tdSql.execute("insert into `TB2`(ts, c1) values(now, 1)") + tdSql.execute("insert into `TB2`(ts, `C1`) values(now + 1s, 1)") + tdSql.execute("insert into `TB2`(ts, c1, `C1`) values(now + 2s, 1, 2)") + tdSql.query("select * from `TB2`") + tdSql.checkRows(3) + + tdSql.query("select * from `TB2` where c1 = 1") + tdSql.checkRows(2) + + tdSql.query("select * from `TB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 `C1` from `TB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 as `C1` from `TB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` a from `TB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` as a from `TB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.execute("alter table `TB2` drop column `C1`") + tdSql.query("describe tb2") + tdSql.checkRows(2) + + tdSql.error("create table `STB2`(ts timestamp, c1 int, C1 int) tags (t1 int)") + tdSql.execute("create table `STB2`(ts timestamp, c1 int, `C1` int) tags (t1 int)") + tdSql.query("describe `STB2`") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 'ts') + tdSql.checkData(1, 0, 'c1') + tdSql.checkData(2, 0, 'C1') + tdSql.checkData(3, 0, 't1') + + tdSql.execute("insert into tt2(ts, c1) using `STB2` tags(1) values(now, 1)") + tdSql.execute("insert into tt2(ts, `C1`) using `STB2` tags(1) values(now + 1s, 1)") + tdSql.execute("insert into tt2(ts, c1, `C1`) using `STB2` tags(1) values(now + 2s, 1, 2)") + tdSql.query("select * from `STB2`") + tdSql.checkRows(3) + + tdSql.query("select * from `STB2` where c1 = 1") + tdSql.checkRows(2) + + tdSql.query("select * from `STB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 `C1` from `STB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select c1 as `C1` from `STB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` a from `STB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("select `C1` as a from `STB2` where `C1` = 1") + tdSql.checkRows(1) + + tdSql.query("show create table `STB2`") + tdSql.checkData(0, 1, "CREATE TABLE `STB2` (`ts` TIMESTAMP,`c1` INT,`C1` INT) TAGS (`t1` INT)") + + tdSql.execute("alter table `STB2` drop column `C1`") + tdSql.query("describe tb2") + tdSql.checkRows(2) + + # cornor cases + tdSql.execute("alter table `STB2` add column `数量` int") + tdSql.execute("insert into tt3(ts, `数量`) using `STB2` tags(2) values(now + 3s, 1)") + tdSql.query("show create table `STB2`") + tdSql.checkData(0, 1, "CREATE TABLE `STB2` (`ts` TIMESTAMP,`c1` INT,`数量` INT) TAGS (`t1` INT)") + tdSql.query("select * from tt3") + tdSql.checkRows(1) + tdSql.query("select ts `TS` from tt3") + tdSql.checkRows(1) + tdSql.query("select ts as `TS` from tt3") + tdSql.checkRows(1) + tdSql.query("select ts as `时间戳` from tt3") + tdSql.checkRows(1) + tdSql.query("select ts `时间戳` from tt3") + tdSql.checkRows(1) + + tdSql.error("create table tt4(`` timestamp, c1 int)") + tdSql.error("create table tt4(` ` timestamp, ` ` int)") + tdSql.error("create table tt4(`tb1` timestamp, `tb1` int)") + + ts = 1656040651000 + tdSql.execute("create table `T4`(` ` timestamp, c1 int, `C1` int)") + tdSql.execute("insert into `T4`(` `, `C1`) values(%d, 1)" % ts) + tdSql.query("select * from `T4`") + tdSql.checkRows(1) + tdSql.execute("delete from `T4` where ` ` = '2022-06-24 11:17:31.000'") + tdSql.query("select * from `T4`") + tdSql.checkRows(0) + + tdSql.error("alter table `T4` add column `` double") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/table/columnNameValidation.py b/tests/pytest/table/columnNameValidation.py new file mode 100644 index 0000000000000000000000000000000000000000..a2968a2ea5053f2f9dd488456ebc62ae37513c27 --- /dev/null +++ b/tests/pytest/table/columnNameValidation.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +import sys +import string +import random +import subprocess +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.query('show tables') + tdSql.checkRows(0) + + # uniqueness + tdSql.error("create table t (t timestamp, f int, F int)") + tdSql.error("create table t (t timestamp, `f` int, F int)") + tdSql.error("create table t (t timestamp, `f` int, `f` int)") + tdSql.execute("create table t (t timestamp, `f` int, `F` int)") + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.execute("drop table t") + + tdSql.error("create table t (t timestamp, f int, `F` int) tags (T int)") + tdSql.error("create table t (t timestamp, f int, `F` int) tags (`T` int, `T` int)") + tdSql.execute("create table t (t timestamp, f int, `F` int) tags (`T` int)") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.execute("drop table t") + + # non-emptiness + tdSql.error("create table t (t timestamp, `` int)") + tdSql.error("create table t (t timestamp, `f` int) tags (`` int)") + tdSql.query("show tables") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/tagNameCaseSensitive.py b/tests/pytest/table/tagNameCaseSensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..c9ee64fa242484e64c9fa6cfdb6ed468436f2199 --- /dev/null +++ b/tests/pytest/table/tagNameCaseSensitive.py @@ -0,0 +1,89 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self._conn = conn + + def run(self): + tdSql.prepare() + + # tag + tdSql.error("create table `STB3`(ts timesatmp, c1 int) tags(t1 int, T1 int)") + tdSql.execute("create table `STB3`(ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("alter table `STB3` add tag `T1` int") + tdSql.execute("create table `STB4`(ts timestamp, c1 int) tags(t1 int, `T1` int)") + tdSql.execute("create table tt3 using `STB3`(t1) tags(1)") + tdSql.execute("create table tt4 using `STB3`(`T1`) tags(1)") + tdSql.query("select t1, `T1` from `STB3`") + tdSql.checkRows(2) + + tdSql.query("show create table `STB3`") + tdSql.checkData(0, 1, "CREATE TABLE `STB3` (`ts` TIMESTAMP,`c1` INT) TAGS (`t1` INT,`T1` INT)") + + tdSql.execute("alter table `STB3` drop tag `T1`") + tdSql.query("describe `STB3`") + tdSql.checkRows(3) + + # cornor case + tdSql.execute("create table `STB5`(ts timestamp, c1 int) tags(t1 int, `标签` int)") + tdSql.execute("insert into `测试` using `STB5` tags(1, 1) values(now, 1)") + tdSql.query("show create table `STB5`") + tdSql.checkData(0, 1, "CREATE TABLE `STB5` (`ts` TIMESTAMP,`c1` INT) TAGS (`t1` INT,`标签` INT)") + tdSql.query("select * from `测试`") + tdSql.checkRows(1) + + tdSql.query("select `标签` t from `测试`") + tdSql.checkRows(1) + + tdSql.execute("alter table `STB5` add tag `标签2` double") + tdSql.query("describe `STB5`") + tdSql.checkRows(5) + + ts = 1656040651000 + tdSql.error("create table `STB6`(ts timestamp, c1 int) tags(`` int)") + tdSql.error("create table `STB6`(ts timestamp, c1 int) tags(` ` int, ` ` binary(20))") + tdSql.execute("create table `STB6`(ts timestamp, c1 int) tags(` ` int)") + tdSql.execute("insert into tb6 using `STB6` tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (ts, ts + 1000, ts + 2000)) + tdSql.execute("insert into tb7 using `STB6` tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (ts, ts + 1000, ts + 2000)) + tdSql.query("select * from `STB6`") + tdSql.checkRows(6) + + tdSql.execute("delete from `STB6` where ` ` = 1 and ts = 1656040651000") + tdSql.checkAffectedRows(1) + tdSql.query("select * from `STB6`") + tdSql.checkRows(5) + tdSql.execute("delete from `STB6` where ` ` = 2") + tdSql.checkAffectedRows(3) + tdSql.query("select * from `STB6`") + tdSql.checkRows(2) + + tdSql.execute("alter table `STB6` add tag `1` int") + tdSql.execute("create table t1 using `STB6`(`1`) tags(1)") + tdSql.error("alter table t1 set tag 1=2222") + + tdSql.error("alter table `STB6` add tag `` nchar(20)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/tbNameCaseSensitive.py b/tests/pytest/table/tbNameCaseSensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3f668eb8f0f4f2482d752d27cfa1e51a43701a --- /dev/null +++ b/tests/pytest/table/tbNameCaseSensitive.py @@ -0,0 +1,141 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + + # table/stable + tdSql.execute("create database test") + tdSql.execute("create database `Test`") + tdSql.execute("use test") + tdSql.execute("create table tb(ts timestamp, c1 int)") + + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.query("show create table tb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "CREATE TABLE `tb` (`ts` TIMESTAMP,`c1` INT)") + + tdSql.error("create table Tb(ts timestamp, c1 int)") + tdSql.execute("create table `TB`(ts timestamp, c1 int)") + + tdSql.query("show tables") + tdSql.checkRows(2) + tdSql.query("show create table `TB`") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "CREATE TABLE `TB` (`ts` TIMESTAMP,`c1` INT)") + + tdSql.query("describe tb") + tdSql.checkRows(2) + + tdSql.query("describe `TB`") + tdSql.checkRows(2) + + tdSql.execute("insert into tb values(now, 1)") + tdSql.error("select * from `Test`.tb") + tdSql.query("select * from test.tb") + tdSql.checkRows(1) + + tdSql.execute("insert into `TB` values(now, 1)") + tdSql.error("select * from `Test`.`TB`") + tdSql.query("select * from test.`TB`") + tdSql.checkRows(1) + + tdSql.execute("create stable stb(ts timestamp, c1 int) tags(t1 int)") + tdSql.query("show stables") + tdSql.checkRows(1) + + tdSql.error("crate stable STb(ts timestamp, c1 int) tags(t1 int)") + tdSql.error("create stable `stb`(ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable `STB`(ts timestamp, c1 int) tags(t1 int)") + tdSql.query("show stables") + tdSql.checkRows(2) + + tdSql.query("describe stb") + tdSql.checkRows(3) + + tdSql.query("describe `STB`") + tdSql.checkRows(3) + + tdSql.execute("insert into t1 using stb tags(1) values(now, 1)") + tdSql.query("select * from stb") + tdSql.checkRows(1) + + tdSql.execute("insert into t2 using `STB` tags(1) values(now, 1)") + tdSql.query("select * from `STB`") + tdSql.checkRows(1) + + tdSql.execute("insert into `T2` using `STB` tags(1) values(now + 1s, 1)") + tdSql.query("select * from `STB`") + tdSql.checkRows(2) + + tdSql.query("select tbname from `STB`") + tdSql.checkRows(2) + + tdSql.execute("alter table stb add column c2 int") + tdSql.execute("alter table stb add tag t2 int") + tdSql.execute("alter table `STB` add column c2 int") + tdSql.execute("alter table `STB` add tag t2 int") + tdSql.execute("alter table `TB` add column c2 int") + + tdSql.query("show create table `STB`") + tdSql.checkData(0, 1, "CREATE TABLE `STB` (`ts` TIMESTAMP,`c1` INT,`c2` INT) TAGS (`t1` INT,`t2` INT)") + + # corner cases + tdSql.execute("create table `超级表`(ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create table `子表一` using `超级表` tags(1)") + tdSql.execute("insert into `子表二` using `超级表` tags(1) values(now, 1)") + + tdSql.query("select * from `超级表`") + tdSql.checkRows(1) + tdSql.query("select * from `子表二`") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(7) + + tdSql.execute("create table `普通表` (ts timestamp, c1 int)") + tdSql.execute("insert into `普通表` values(now, 2)") + tdSql.query("select * from `普通表`") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(8) + tdSql.query("show create table `普通表`") + tdSql.checkData(0, 1, "CREATE TABLE `普通表` (`ts` TIMESTAMP,`c1` INT)") + + tdSql.error("create table `` (ts timestamp, c1 int)") + tdSql.execute("create table ` ` (ts timestamp, c1 int)") + tdSql.error("create table ` ` (ts timestamp, c1 int)") + + ts = 1656040651000 + tdSql.execute("insert into ` ` values(%d, 1)" % ts) + tdSql.query("select * from ` `") + tdSql.checkRows(1) + tdSql.execute("delete from ` `") + tdSql.checkAffectedRows(1) + tdSql.query("select * from ` `") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index cd72958115aa38280c028c0f0e91443d62f692a4..85cb2dcfce7ff901673c7d388a814349a3659c2b 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json index 025751bcd3c2d0800d6a02f62adb76d15b8b0131..f5dad7a69db9ce34754796768798b8ffdce3477b 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json index 6fa020433a05f8f989638357c9874fe8843dfe34..c013f616b17f45b15e4bba6e9217f86a5bb40bc7 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset.json b/tests/pytest/tools/insert-tblimit-tboffset.json index b4d4016ef926d64f85df9a85bfb75352caf2442e..5f8770070ac0af719316c320dd34d91b23d56699 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset.json +++ b/tests/pytest/tools/insert-tblimit-tboffset.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index 8a7e39b17c13387e00167396d21a0c791601e390..cbf5e78dff326970e533b4835b92bceb0b191971 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index 6e150203b3103eabc546f772ed9aad73ae879207..bef719c5eee3a36faca3edc9f3fd606c4c665ebc 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/insert.json b/tests/pytest/tools/insert.json index 996b91ed06f283fdcd968df9cafc4f58583cbb8d..eacbb590f579385410bff8d46d06f52d496e8fcf 100644 --- a/tests/pytest/tools/insert.json +++ b/tests/pytest/tools/insert.json @@ -17,8 +17,7 @@ "cache": 16, "blocks": 8, "precision": "ms", - "update": 0, - "maxtablesPerVnode": 1000 + "update": 0 }, "super_tables": [{ "name": "stb01", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json index 8bd5ddbae8d5ce81269626165b2d275d05135ea5..c3ea89a0534a3a87a23801e053be96cde8f1b7df 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json index 5408a9841ab8a40e4ca7564724b7f6c7f941e0e0..b6428b482c3958a948d6c8615d26921b45a935d1 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json index 13eb80f3cf7f751398babed8e922f9e5b3a4242e..4a648092cc3f4efac5a0d027f7d36d7af2a7d520 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json index 38ac666fac5097d616c17bdfc7e900256827ddf4..afe156e18c4dad98eea5d81a0a8772e48a735945 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json index 9ef4a0af66e852a01d8ca7d677de4467ea316097..ac2fbb39b7271360bac17172cb8d2213a3b4a0d8 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json index a09dec21fa9cf3720b68a1fa2e843b49be0544ee..dede88c2dfe8b7dd1dca29d96b698b6e13202237 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json index e99c528c6d62e3b9ce59565e60d21fb562bb836d..cc696518bff385c5281ae29c3ebff5b9e2021a4f 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json index ad85f9607b72c5d4562266508bfdcf68837c33bd..1b726ef5da5a7f699b99c0e03238337bfac0c575 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index d6e3afdea31955992cc0c9cc8842bc6ae7c6e3f6..9f79c1ae239a74d2f0e6f35c3d1d883169356837 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py index c047e4b0aadcc27e0014420c2d350f106125109c..667b859c8f35b492d96e7e7633ab8c728f09335a 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -56,18 +56,18 @@ class TDTestCase: tdSql.execute("use regular_old") tdSql.query("show tables;") tdSql.checkRows(1) - tdSql.query("select * from d0;") + tdSql.query("select * from meters;") tdSql.checkCols(1024) - tdSql.query("describe d0;") + tdSql.query("describe meters;") tdSql.checkRows(1024) os.system("%s -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) tdSql.execute("use regular_new") tdSql.query("show tables;") tdSql.checkRows(1) - tdSql.query("select * from d0;") + tdSql.query("select * from meters;") tdSql.checkCols(4096) - tdSql.query("describe d0;") + tdSql.query("describe meters;") tdSql.checkRows(4096) # super table -d:database name -t:table num -n:rows num per table diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index d73719ebe41c5f25fc2cd585bc9974d9e83a946e..f379fe61bf3e799eb5315cf5f41bd5158de29b6b 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json index e10fd1116b948032d5aa67dc0844bbf493d650de..142098865062b0b6489a1aae18b6492f3e4b129b 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-allDataType.json b/tests/pytest/tools/taosdemoAllTest/insert-allDataType.json index a7ada9b84e2bb534eac63364039598d1ddb4c744..1e714c081321c6fc7005d10e49211a4cf10e44b9 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-allDataType.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-allDataType.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json b/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json index 49407a76d76edda3c45716134521265114702f11..3633bb64820bc49cedaf3c0964a0384b34e38a32 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-chinese.json b/tests/pytest/tools/taosdemoAllTest/insert-chinese.json index ab848b1317049f672775ec0cc6d1f6c3cd78760e..88ace597784901c8865ee539517a39757e722231 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-chinese.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-chinese.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json index d6420b100e5ad2bae887b3ae5fb5cc0f306d9762..2ae3d6c1ceb91f59ee1d7661b7c5f816d8465496 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json index 2c3b8c6f81962e02ff5aac37c58fb04b79159a7c..fc2cf160a4ee51ad020bd8aa0bd4a2dc6ee0b95c 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json index f8fe21a6c4015a27ee663bc7ac54a7889af62add..39e4b3bbc8900f4aa91aaf3472097f363568e2df 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json index c56f8f30402aa948828377b46e5cf8678a3b3472..920eed645608002435d335ece4527cfdcf06d4ae 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json index 93bb92764d3e4ba141a8b8c9b2df4fda69cb9eaa..a40c17d1f94f2b43de2cee7d2c83a8b0f29156f7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json index d51dee428fc8f1bb61af84d5f570f69cce344651..ae15b41e4fc464e6cd4932c63c3d1df440ff6fd5 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json index 05a6f7606a22f7c4712ed7c1a4452c43c87f5428..4386b7a7ee9fa21bfcfb115dccb2e72b509e3c80 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json index 02b56bbfe8a5e0900467e0dc0537919465a406a7..a87e257ff94d342b83a39b19c1bffd08be35ffe5 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json index 5978e5529f8d3a3b29cb04f1744a045b56e7e5ba..44707e87484d0cd898fc35cbb3b0d561072a59cf 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-offset.json b/tests/pytest/tools/taosdemoAllTest/insert-offset.json index 53edf41072a93b907da8af6648dab03691e039a8..351a2b38d59fa2569d60a2fc48c3c2cf81ce6c68 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-offset.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json index 91c033c67711e0713f65a08a48351288470d565e..de023d15a2b9654df5999940ed04a46e4eeabd27 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json b/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json index b14c3a8ec6d329e187c84b87412570e220eddb73..d63549b79b7c40a8c847d25f6de45a8618eb4da4 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample.json b/tests/pytest/tools/taosdemoAllTest/insert-sample.json index 87d442b7cbc981c8f3a86104c9d13856283f1815..d41433ff0c3dd7756d8c41aa556158b5167d9e77 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-sample.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json index c794c73c843607a7ef6bb84b288ac890a317bfa9..059643a851faefa30c0345aac52e0779de2a7fa6 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json index be55d31d5595b210695584f6dbbc334bb7b7f8e6..7f16fa74ec383df86360ebe3fbb2f956f81e8d90 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json index 67abdc67eed813501e012c8b7dce5d0719d22eb6..ffcd49e32e1774b70d7430f3b75ef4347c4eea94 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json index 84aa75eca7ac5eaabfeef715471e9b91ee66dfec..99f89eb7dc82b14e0f4fe3d8543326ad22a19ff9 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json index 58acd9bbd022bb55ef573f9a7e9434ed935b55bc..68fa2acf634140643014a028f8377372a78598f1 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json index 17153c2f2c00a2e296ebf59409be1287cb203c24..4cd6e3ceb57b5a0e9907b878af0d28f5b04fc062 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json index 59cbedca72709fe10203926881160629658ae3bc..04d32a3e79a6e0daed9d6c6a6e1dea77bad00784 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json index 52d6ae029de4a2c019545ac047526638237d701e..867152a6030712d8385449f5def8723a57f23f0d 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 60a10d2501bb2644784ea24afe2319679c441a34..0f4952074db6a8e31224b55a307aa04bdb23d187 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json index 1166ac36438babefbe0d0de70d5a5e3f088f055f..686a2cc4f108d4439ac26a7f7cdeeee291e7314b 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json +++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json index 8247c5f0158e5cce4d3891dc88048e4a29a3d888..def5043d4fbe56c8606126b8a997caa0ca193e3f 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json index 138ebbadf63d16816e723462693684cfd2e4c2c0..f1f4b7f3c66bc5072beac1aac18c9d69c3df72f4 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json index 682dcf2ce4393815590552e935578df26bb8f43c..cb90c1f89898878b668d052e3eee71aa0c8d01a9 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertRestful.json +++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json index e8468f5906a7ebdef62f6509a8968a0df7bdd775..8b8f959e05b8189e1ae8e0dc038522709f4c9e10 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json index 4dbe2940e2c7954e6b41a8f645d9e8d809d013d6..4480cf47d3476927eecc59098962caf33de27988 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json index 65973ccb485585de689f5e44a3bca28b675732b4..ae820815257f4fd3f33130c2829eb0ba9c4e47ae 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json +++ b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json index a1a28c9ee970c9db1f21ace18dd7b8f54f39e5ed..5c8dc689ae00b536719e33d27732377362cbc128 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block2.json b/tests/pytest/tools/taosdemoAllTest/manual_block2.json index 03f6e038fb4072f64569e65e91f86ccd8ce5f86e..c92c18b025e045018512baf2fd879e859324dd93 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block2.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block2.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json index 7b8abd6d4e25991d38ff16c737bf8169c7311318..0f7786e6822580d842a91c6f9b498c2ab401b07b 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json index aeee6322e5c0e6b58c0433be5f345e7c4f84f339..f8decfca417ea2a86ac1065334169841694df68c 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json index e30b7b0b1c6a136aa45c91da165ff8101eeb42e3..f166d461fec3d84f5c2fb4b295272529818ab35a 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json index d4ce2fee46d8848f574d75173818bff819c1d31f..ebbbc001f9da6c384f6cdf2eb4905ea6933e6f58 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json index ce12accf06c101956ec6a9d025e63bb1814acbd0..a18e1e0e1a36af7ea38eb159471c996c368c981d 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json index 9ffb2953d3c46df5a6cbd4e6042748185254e62a..4b246a93d731c47e78d01e280fe777cb5d54e397 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json index 896e484c258ed4f1418f48a74cd643defc9c6731..8857d5adae2b5ea808e1044fd28512b4562d597b 100644 --- a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json index eb196e4096d26f429f013a8936c910e5dc86c304..756316621d958693c16a5ced6e5882305cf88dcc 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json index 0febbdfa19d2ba8dd4db0b318d05c5af18fd1584..0073f52e2bd4dbad211fcfd30ab4e95e3a215401 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json index 5cd06c02759ddcba93eaa8ef4ef848a9b645cbda..8e96931e523b60a65ce59be79e038472fd4fe929 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json index 0885e01782b41079ccbfb7a30a8b4d3628ba9c20..5042549f09201a71ae9bc907ba2ac162acf4c382 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json index cbd4f6cb59c1ddd146b42a233c740d6bbaca45d3..0de5ddcc26840162b6e1264266ce4a3de0bd20ab 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json index 6f24801cb04f9f515e33898fb587b95029def325..57006fcc3c1a75d7f2064a10b4174f6ecd46a167 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json index 92e6ec0df7a70329312676298c3b5ffccc2a8767..dcca0f82aef7ed856a61a54ebb28d6ea3c1eccde 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json index c09493ec7b892baba37a7be4addb0ce526752f07..cdfc5cb26d7a89add24b12fb0731aeaab16d3690 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json index e04f2ff5e7cb24cb5384b7451712b3fe83bf18c3..caf9a9466b63c0336add8f52f8dd4b83dc87ad3a 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json index 4a4227adb8fdcd0cb025a10c5b6f417c921acd96..564f2405e3e008dae7c7dbdf14519860ea5acf25 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json index 1d29842e02c654987c50e6e73d4aec5eed48aa83..f0a84487d5e3a7fd1a52f655c622d23a8495bf0c 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json index 886503a950ca18b752bfa264218bb8564ce44ae0..ac5ba1dc5ff5b6fb3347cacf1ee276871733d226 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json index ca99d135c5f466c911f3063b88fbb3e58c4e4ed4..50af8517bc19a3de655280a6159644cb15df7df5 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json index d0109b50cf449b0e7e1b258ae29723a560b1d2f6..d79ae2b0054e591c6ea40c90ed54072fc36b47a6 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json index f8f3a8ee5cea1834c31ebb275a10977cd960f829..459d47b114e872eb702bbc3fe782562fdc5f4086 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json index 780fd60bb7e98f18a5c33798b6bb35a77e1d85db..35c808bd5880f192b409f909a65badb983df7088 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json index 66885ebab89f7221830e66d642ca17b99de0e397..eca27390c6b2c2a93c8e9c7a2222e8785913637e 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json index c9fa0f6fb0ddc777159b5d13f324c65b23cabd0d..6c780edd1537dcd56f56a4d3ee30d70cc4326835 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-timestamp.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-timestamp.json index 4e8ff40cfdb7650f9d82635ac5be42f67904158a..e7704b87fe677abacdf94c71724cf15177a01790 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-timestamp.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-timestamp.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json index 1d496b6b46bf3df3c4312bacafbfb77125491058..1d0490f539ec5ff80e76bb7966bbe5452d5521df 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json index c70db14b4c9b5fabe590eb8fec4a1f0e4dbc831a..723260c4228be73e21b01a37e50729dec24e4ce9 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json index 12034adc0788f84852019d776fc0987cbc9c4f16..ba3586635b4d000638c72bb6a55d58d2b2fb2d48 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json index 28f566833fc8958d364ee867c7628d573b4bf8ee..85ff34b99d8eec0be824b4799f3001c56053ea90 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json index 8f27feba6be7e3018461b0070420cc759cf8fc72..3e37ca197f14d93f83f120fd610e62e0f47e7b2c 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json index 2e4063cf272ba18732f0e456362cb1103ba6d5c4..38477734e231196e65c3e78413033691b1d73ff2 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json index c6fe0300f535a2b9d798b09853f0ad333e3bbcfd..63ff800812c39f78f9d26717f30ad05802ccc590 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json index 92e88141ca09971d0d202ee488471c14e07d4cd3..0804acbae0db9c22972ab4107d8d2ddd0f1ed130 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json index 18f1a39e0afcdae3d52e4bc4a4a97e15dbcfda37..73845c2dc5042926e7e60ae8ea501ee326bc84f2 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json index 01ec546012ad04f94cfb6224048fffd89d5cbbc8..b3a113ad38447ce0df6c6e685edc046ff5bea86f 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json index d950a260f6ed3ad4a9ed53bc859304a71e5a680a..33e61b7d2052ed136911face5356d4dc911eb975 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json index 0deed5ba5420a1dd9a1efddbb6e1e7a757dc10d0..aff3190e1a24a5961d3e887dbd680b74b87ad141 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json index 9d1d1ee71898d5e80a7310822da00de6c4636746..b9f11954571ab52c212b24cb6ee23f382412968a 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json index f732d2e0c5575740dc3d1eeade05e09de8860faf..e302d619c4a29557d0ceba1d86b3b4c6988696be 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json index 24f468d719546733b900ecbd283f2904e96d222f..a692e4ef6f3a8e0f9813e2ecc61b654fbbbc7850 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json index adb8764b2f6f3f89f0c3e2024ef0098ffb45b2c4..eaca2e040f13345080325074d02c5b70d431fdae 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json index b21154f1c578dedfbb880ac6aa8c9a1d101574ef..8f7d0d2e80a07d26e3bd10078f9047d80b46d8b4 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json index 46a0832612ff0f3db489b1917ff3b2c53606b2de..134e4755b53bc06711b0f6138f74977ccc69efe0 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json index e7501804211c60767e073f98865a6ee9d719901f..bc948974b6e60d73520b30c9d03b0cb038e899b9 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json index 2712f885936c12c1cf7742376ea541fd12e55cd4..c09d5cfeb3cab11007be07bb74c89897ad2b11ec 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json index f8fe21a6c4015a27ee663bc7ac54a7889af62add..39e4b3bbc8900f4aa91aaf3472097f363568e2df 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json index 45eb612e6f2efcedfe9de8d5f6cb4aeb3a464353..4c5e90f1850cf7ee2f35b7e81179bfef797d3aef 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json index 4e6edb2199b4cadffcc4bbc7ac74d00cfb1f1a69..c8c96844749d16b938e0d39ae0da5637271118c7 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json index 622b2554ec37b223226fcab3ad3e01568937fc0f..f0ad1b4a5f796e222f9d32ccabdaeb757408947a 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json index 31985c85460cf39cc926afdc3c614fb84a45bd4b..15d2753c4b23fc83e40a9317eff95b3945adddfd 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json index 3ebc377ca79d5cf472c102f23736960d757636e1..d636c95a9461f6ccea7b21e792ed0034fd8d214d 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json index adc6fa74bee9441999b83196726c2a133da7c24d..263a592dfadcd811f69071c7e1f9efc3a1dc3520 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json index 715644f4f062d166e67f3038bacb903a26fbf93d..04165f16b1c70d9b3e6de547c37ffdb85388a6e6 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json index e3d6ce850aeae242a5ac857cc02a9123845debb7..cc4d180fb5ff3537c71164b6dd288e98503bda08 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json index b14c3a8ec6d329e187c84b87412570e220eddb73..d63549b79b7c40a8c847d25f6de45a8618eb4da4 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json index 563dc86d0a1481e6b117766facf2122c75bd20f2..ffe16eccd195d7bd9d12bc09a2959d57a037513d 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json index f59d2e4e22e165ddf1adf8b95212d521a75737d9..37714edc74e4469dcceb13934459d3d0df13c6a4 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json index 4903335d181a0f06a0f0714072301438883f0f6e..4625da3a6b6c3d7024aab08a23c8d692291a0efe 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json index a27feee68a7700633197791567647875e6febee4..8f5b62be9b013ed8de5220cbc16eb2f83193760e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json index 50e1a7173b0b708b454559c3a718e48900467c5a..a30c3f7c781375c170cab702f05b46a141320400 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json index ca0d17f93ba503f3b532aa2cb9245282c540c507..2966af8f238f668d5e2114197135ce831814eafe 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json index c5a3a5f76de18589f3271287a78510e39acfb27f..40780dd992ee5c93507a5f19d503c73f7c78062a 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json index c86e759db4377d05a2e4ec1b1b2bc4144f5689e4..cada61687e11ed251755c7e5f7c5f2d3d23dbbe3 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json index ee36b62f903a2d27b24b55eba9a10146d45080ee..87386853b37ebfb3a45361620cdf7d9de395df8e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json index 25086c856e72006ad579641b08858622b2209188..9e213b52a46cc183dbe7e2879ab777cacbe1fecf 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json index 4bd071ec15a56feb1ea2b119697f934620d6b8c2..5b4bfbae65dec700a9927c14705f1df47571eff3 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json index 628c86045fa4a33f5d2e93882ca3b56dbfc91292..efc01bb9e611a9463531c653a9912878832f8fa9 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json index 7abab6a0cf00d3161bb85114cb07eb39d7f7a747..e6224159773ed04da1e08a696e5d65db7ee2cca7 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json index 8f8539be2117f8706f894f92b2075848b0203216..51ac878c3794ae358e9ebd4dddc03abe1f855b26 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json index 168b3753a13e6bfa2e884f5b8be4a03bb1675b2a..57e823f74a8160e12e6e3331bf718077ba5f0b43 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json index 4fb7241012563143cf289f510a8b58f39841b9d0..18487defcce2224491cfe57ec7e8e3649f3849c3 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json index 99233bdd738d068664241efda40d96c5a6fc7090..2c6e6260ff820acb0df2b357d78261648f400e1c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdump-insert-dp1.json b/tests/pytest/tools/taosdump-insert-dp1.json index 6481197bd6649576650ebeb95350ea50a31c1c1a..a8efd17a9ba564f957e71d3f8a99b114cb11ff6f 100644 --- a/tests/pytest/tools/taosdump-insert-dp1.json +++ b/tests/pytest/tools/taosdump-insert-dp1.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdump-insert-dp2.json b/tests/pytest/tools/taosdump-insert-dp2.json index 384a905c737911214fab72a95c9e771c895f98fd..14b0c9265fee4e313592e182f89c755fad5d0def 100644 --- a/tests/pytest/tools/taosdump-insert-dp2.json +++ b/tests/pytest/tools/taosdump-insert-dp2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py index 21555567765fcae218b254472b1330a11d83e77f..95ed69b9177a079a59002d87d4e6bccbd9f6dc9a 100644 --- a/tests/pytest/tools/taosdumpTest.py +++ b/tests/pytest/tools/taosdumpTest.py @@ -60,6 +60,13 @@ class TDTestCase: else: print("directory exists") + for i in range(1, 9): + if not os.path.exists("./taosdumptest/tmp%d" % i): + os.makedirs("./taosdumptest/tmp%d" % i) + else: + os.system("rm -rf ./taosdumptest/tmp%d" % i) + os.makedirs("./taosdumptest/tmp%d" % i) + if not os.path.exists("./taosdumptest/tmp2"): os.makedirs("./taosdumptest/tmp2") tdSql.execute("drop database if exists db") diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index c0de2c957a59ff94ab22a8656f030719e60e5761..94c5adf30908174aaf1537d2ae314cf929fcab04 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -63,6 +63,7 @@ class TDTestCase: os.makedirs("./taosdumptest/tmp") else: print("directory exists") + os.system("rm -rf ./taosdumptest/tmp/*") tdSql.prepare() @@ -87,6 +88,7 @@ class TDTestCase: os.system("rm ./taosdumptest/tmp/*.sql") os.system("rm ./taosdumptest/tmp/*.avro*") + os.system("rm -rf ./taosdumptest/taosdump.*") os.system( "%s --databases db -o ./taosdumptest/tmp " % binPath) @@ -122,6 +124,7 @@ class TDTestCase: os.system("rm ./taosdumptest/tmp/*.sql") os.system("rm ./taosdumptest/tmp/*.avro*") + os.system("rm -rf ./taosdumptest/tmp/taosdump.*") os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath) tdSql.execute("drop database test") diff --git a/tests/pytest/tools/taosdumpTest3.py b/tests/pytest/tools/taosdumpTest3.py index 3994ad0323a3e1b5b968999178f1cae83c5e0753..2a9561f7c54d0fa5f5428d2ea79533e6b83f94e4 100644 --- a/tests/pytest/tools/taosdumpTest3.py +++ b/tests/pytest/tools/taosdumpTest3.py @@ -57,6 +57,10 @@ class TDTestCase: def run(self): if not os.path.exists("./taosdumptest"): os.makedirs("./taosdumptest") + else: + print("directory exists") + os.system("rm -rf ./taosdumptest/*") + for i in range(1, 9): if not os.path.exists("./taosdumptest/tmp%d" % i): os.makedirs("./taosdumptest/tmp%d" % i) @@ -80,15 +84,15 @@ class TDTestCase: tdSql.execute( "create table st0_0 using st0 tags(0) st0_1 using st0 tags (1) ") tdSql.execute( - "insert into st0_0 values(1614218412000,8537,'R')(1614218422000,8538,'E')") + "insert into st0_0 values(1661997612000,8537,'R')(1661997622000,8538,'E')") tdSql.execute( - "insert into st0_1 values(1614218413000,1537,'A')(1614218423000,1538,'D')") + "insert into st0_1 values(1661997613000,1537,'A')(1661997623000,1538,'D')") tdSql.execute( "create table if not exists gt0 (ts timestamp, c0 int, c1 float) ") tdSql.execute( "create table if not exists gt1 (ts timestamp, c0 int, c1 double) ") - tdSql.execute("insert into gt0 values(1614218412000,637,8.861)") - tdSql.execute("insert into gt1 values(1614218413000,638,8.862)") + tdSql.execute("insert into gt0 values(1661997602000,637,8.861)") + tdSql.execute("insert into gt1 values(1661997603000,638,8.862)") # create db1 , three stables:stb0,include ctables stb0_0 \ stb0_1,stb1 include ctables stb1_0 and stb1_1 # \stb3,include ctables stb3_0 and stb3_1 @@ -100,35 +104,35 @@ class TDTestCase: tdSql.execute( "create table st0_0 using st0 tags(0) st0_1 using st0 tags(1) ") tdSql.execute( - "insert into st0_0 values(1614218412000,8600,'R')(1614218422000,8600,'E')") + "insert into st0_0 values(1654218412000,8600,'R')(1654218422000,8600,'E')") tdSql.execute( - "insert into st0_1 values(1614218413000,8601,'A')(1614218423000,8601,'D')") + "insert into st0_1 values(1654218413000,8601,'A')(1654218423000,8601,'D')") tdSql.execute( "create stable st1(ts timestamp, c11 float, c12 nchar(10)) tags(t1 int)") tdSql.execute( "create table st1_0 using st1 tags(0) st1_1 using st1 tags(1) ") tdSql.execute( - "insert into st1_0 values(1614218412000,8610.1,'R')(1614218422000,8610.1,'E')") + "insert into st1_0 values(1654218412000,8610.1,'R')(1654218422000,8610.1,'E')") tdSql.execute( - "insert into st1_1 values(1614218413000,8611.2,'A')(1614218423000,8611.1,'D')") + "insert into st1_1 values(1654218413000,8611.2,'A')(1654218423000,8611.1,'D')") tdSql.execute( "create stable st2(ts timestamp, c21 float, c22 nchar(10)) tags(t1 int)") tdSql.execute( "create table st2_0 using st2 tags(0) st2_1 using st2 tags(1) ") tdSql.execute( - "insert into st2_0 values(1614218412000,8620.3,'R')(1614218422000,8620.3,'E')") + "insert into st2_0 values(1654218412000,8620.3,'R')(1654218422000,8620.3,'E')") tdSql.execute( - "insert into st2_1 values(1614218413000,8621.4,'A')(1614218423000,8621.4,'D')") + "insert into st2_1 values(1654218413000,8621.4,'A')(1654218423000,8621.4,'D')") tdSql.execute( "create table if not exists gt0 (ts timestamp, c00 int, c01 float) ") tdSql.execute( "create table if not exists gt1 (ts timestamp, c10 int, c11 double) ") tdSql.execute( "create table if not exists gt2 (ts timestamp, c20 int, c21 float) ") - tdSql.execute("insert into gt0 values(1614218412700,8637,78.86155)") + tdSql.execute("insert into gt0 values(1654218412700,8637,78.86155)") tdSql.execute( - "insert into gt1 values(1614218413800,8638,78.862020199)") - tdSql.execute("insert into gt2 values(1614218413900,8639,78.863)") + "insert into gt1 values(1654218413800,8638,78.862020199)") + tdSql.execute("insert into gt2 values(1654218413900,8639,78.863)") # create tdSql.execute("create database if not exists dp3 precision 'ns'") @@ -138,13 +142,15 @@ class TDTestCase: tdSql.execute( "create table st0_0 using st0 tags(0) st0_1 using st0 tags(1) ") tdSql.execute( - "insert into st0_0 values(1614218412000000001,8600,'R')(1614218422000000002,8600,'E')") + "insert into st0_0 values(1654218412000000001,8600,'R')(1654218422000000002,8600,'E')") tdSql.execute( - "insert into st0_1 values(1614218413000000001,8601,'A')(1614218423000000002,8601,'D')") + "insert into st0_1 values(1654218413000000001,8601,'A')(1654218423000000002,8601,'D')") +# sys.exit(0) # # taosdump stable and general table os.system("%s -o ./taosdumptest/tmp1 -D dp1,dp2 -T 8 " % binPath) os.system("%s -o ./taosdumptest/tmp2 dp1 st0 gt0 -T 8 " % binPath) + #sys.exit(0) os.system( "%s -o ./taosdumptest/tmp3 dp2 st0 st1_0 gt0 -T 8 " % binPath) diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py index 146beb90e5b999664eb3ab119ada96ee26768a2e..7d614543e124519636199d81b1555944272d2def 100644 --- a/tests/pytest/tools/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -61,7 +61,7 @@ class TDTestCase: def build_db(precision, start_time): tdSql.execute("drop database if exists timedb1") tdSql.execute( - "create database timedb1 days 10 keep 365 blocks 8 precision " + + "create database timedb1 days 10 keep 36500 blocks 8 precision " + "\"" + precision + "\"") diff --git a/tests/pytest/tsdb/insertDataDb1.json b/tests/pytest/tsdb/insertDataDb1.json index 555ae46be3aed85cb3bc7990465594e32be4ad47..353c704505db2ecd28487c065ebe44802544c0df 100644 --- a/tests/pytest/tsdb/insertDataDb1.json +++ b/tests/pytest/tsdb/insertDataDb1.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tsdb/insertDataDb1Replica2.json b/tests/pytest/tsdb/insertDataDb1Replica2.json index 20ea68cc06d1f3fd8ade8b0cfc95a976f339508e..973744c97fe2ea2e82212685ce71984f00ea49fb 100644 --- a/tests/pytest/tsdb/insertDataDb1Replica2.json +++ b/tests/pytest/tsdb/insertDataDb1Replica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json index 586fb60fcc608309927149d2a26f79220fcc67e1..78fedb44e4d66ad5754cdfa4b24c6c7a4c7dc23a 100644 --- a/tests/pytest/tsdb/insertDataDb2.json +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tsdb/insertDataDb2Newstab.json b/tests/pytest/tsdb/insertDataDb2Newstab.json index 0558c8c33d1af477ae3b0cafe9416534db44dfb0..24963aba2cab539a52f7ddc6f5f09749b2a3fefa 100644 --- a/tests/pytest/tsdb/insertDataDb2Newstab.json +++ b/tests/pytest/tsdb/insertDataDb2Newstab.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json index 5bc145994d778105e10ae2631494cddfe8377cf7..a6b6b975a22e5b4a99bf99f2377fc591c82ee009 100644 --- a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/tsdb/insertDataDb2Replica2.json b/tests/pytest/tsdb/insertDataDb2Replica2.json index 07bbeaa632ce174aa6f1388689f15cc1c1a77b64..bd97a0ee1941275fffef1508ace2ccaf66023f98 100644 --- a/tests/pytest/tsdb/insertDataDb2Replica2.json +++ b/tests/pytest/tsdb/insertDataDb2Replica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb1.json b/tests/pytest/wal/insertDataDb1.json index 1b7f757387afb8da99e7bfd7934a68ad90a6a8dd..1e268faac557101940e0615a06b38c5504599b9e 100644 --- a/tests/pytest/wal/insertDataDb1.json +++ b/tests/pytest/wal/insertDataDb1.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb1Replica2.json b/tests/pytest/wal/insertDataDb1Replica2.json index 20ea68cc06d1f3fd8ade8b0cfc95a976f339508e..973744c97fe2ea2e82212685ce71984f00ea49fb 100644 --- a/tests/pytest/wal/insertDataDb1Replica2.json +++ b/tests/pytest/wal/insertDataDb1Replica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb2.json b/tests/pytest/wal/insertDataDb2.json index 15df1350c873a4569187fe8a7cac2f6e2b474eeb..6743ee0c8260eb3861697f1aaadf6c27f05dbcab 100644 --- a/tests/pytest/wal/insertDataDb2.json +++ b/tests/pytest/wal/insertDataDb2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb2Newstab.json b/tests/pytest/wal/insertDataDb2Newstab.json index 0558c8c33d1af477ae3b0cafe9416534db44dfb0..24963aba2cab539a52f7ddc6f5f09749b2a3fefa 100644 --- a/tests/pytest/wal/insertDataDb2Newstab.json +++ b/tests/pytest/wal/insertDataDb2Newstab.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb2NewstabReplica2.json b/tests/pytest/wal/insertDataDb2NewstabReplica2.json index 5bc145994d778105e10ae2631494cddfe8377cf7..a6b6b975a22e5b4a99bf99f2377fc591c82ee009 100644 --- a/tests/pytest/wal/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/wal/insertDataDb2NewstabReplica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/pytest/wal/insertDataDb2Replica2.json b/tests/pytest/wal/insertDataDb2Replica2.json index 07bbeaa632ce174aa6f1388689f15cc1c1a77b64..bd97a0ee1941275fffef1508ace2ccaf66023f98 100644 --- a/tests/pytest/wal/insertDataDb2Replica2.json +++ b/tests/pytest/wal/insertDataDb2Replica2.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/script/general/db/alter_tables_d2.sim b/tests/script/general/db/alter_tables_d2.sim index f74f98d571e49090e2c25d2371a0e0c268c9a3ee..41d72e35d3ba35ad9fd3741ae4c9abe8ea075d2b 100644 --- a/tests/script/general/db/alter_tables_d2.sim +++ b/tests/script/general/db/alter_tables_d2.sim @@ -109,7 +109,7 @@ sql show mnodes -x step1 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step2 endi @@ -186,7 +186,7 @@ sql show mnodes -x step3 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step3 endi @@ -323,7 +323,7 @@ sql show mnodes -x step9 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step9 endi @@ -419,7 +419,7 @@ sql show mnodes -x step10 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step10 endi @@ -482,7 +482,7 @@ sql show mnodes -x step1xx print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1xx endi diff --git a/tests/script/general/db/alter_tables_v1.sim b/tests/script/general/db/alter_tables_v1.sim index 20c4c7336312b7eb701fe04c6b4158f466e43146..8708d764e43e41325e3ed7d80d45c34d0a3d1031 100644 --- a/tests/script/general/db/alter_tables_v1.sim +++ b/tests/script/general/db/alter_tables_v1.sim @@ -62,7 +62,7 @@ sql show mnodes -x step2 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step2 endi @@ -115,7 +115,7 @@ sql show mnodes -x step5 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step5 endi @@ -185,7 +185,7 @@ sql show mnodes -x step7 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step7 endi @@ -240,7 +240,7 @@ sql show mnodes -x step9 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step9 endi @@ -314,7 +314,7 @@ sql show mnodes -x step10 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step10 endi @@ -369,7 +369,7 @@ sql show mnodes -x step12 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step12 endi diff --git a/tests/script/general/db/alter_tables_v4.sim b/tests/script/general/db/alter_tables_v4.sim index 10bb4e108bf7fc6c879e230565dad29599f15549..02816097ed2235b1535a0629407cc9704862d84a 100644 --- a/tests/script/general/db/alter_tables_v4.sim +++ b/tests/script/general/db/alter_tables_v4.sim @@ -81,7 +81,7 @@ sql show mnodes -x step3 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step3 endi @@ -155,7 +155,7 @@ sql show mnodes -x step5 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step5 endi @@ -287,7 +287,7 @@ sql show mnodes -x step9 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step9 endi @@ -381,7 +381,7 @@ sql show mnodes -x step10 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step10 endi @@ -441,7 +441,7 @@ sql show mnodes -x step12 print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step12 endi diff --git a/tests/script/general/db/delete.sim b/tests/script/general/db/delete.sim index 4384044885e7c6a660079551b1190481ace3554e..80ec812c74135addc1d0c072b8e7ebd6be8e6f86 100644 --- a/tests/script/general/db/delete.sim +++ b/tests/script/general/db/delete.sim @@ -57,7 +57,7 @@ step3: sql show mnodes print dnode1 role $data2_1 -if $data2_1 != master then +if $data2_1 != leader then goto step3 endi diff --git a/tests/script/general/wal/sync.sim b/tests/script/general/wal/sync.sim index 3a8952391818bc02244f7455f694b6c1fe834288..bd9ed574a96b1afbaaf3f68a4ca41700f40434a2 100644 --- a/tests/script/general/wal/sync.sim +++ b/tests/script/general/wal/sync.sim @@ -54,13 +54,13 @@ print mnode2Role $mnode2Role $mnode3Role = $data2_3 print mnode3Role $mnode3Role -if $mnode1Role != master then +if $mnode1Role != leader then goto show1 endi -if $mnode2Role != slave then +if $mnode2Role != follower then goto show1 endi -if $mnode3Role != slave then +if $mnode3Role != follower then goto show1 endi diff --git a/tests/script/issue/TD-2677.sim b/tests/script/issue/TD-2677.sim index 8d2058a3851e456dd85f8d6a295ce45b7a6fd3bd..db61aca8111c03920fa780ebfd88592c89695623 100644 --- a/tests/script/issue/TD-2677.sim +++ b/tests/script/issue/TD-2677.sim @@ -54,13 +54,13 @@ print mnode2Role $mnode2Role $mnode3Role = $data2_3 print mnode3Role $mnode3Role -if $mnode1Role != master then +if $mnode1Role != leader then goto step1 endi -if $mnode2Role != slave then +if $mnode2Role != follower then goto step1 endi -if $mnode3Role != slave then +if $mnode3Role != follower then goto step1 endi diff --git a/tests/script/issue/TD-2680.sim b/tests/script/issue/TD-2680.sim index 631332160fe1ede876ef2b966dfb1963ac7351ca..4f1bd63dd825f266fc5b8f11d078f711a90e6388 100644 --- a/tests/script/issue/TD-2680.sim +++ b/tests/script/issue/TD-2680.sim @@ -67,7 +67,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/issue/TD-2713.sim b/tests/script/issue/TD-2713.sim index b66c55b9b95f67eb2478791e8ee45ffd503b12be..f2c0bc9eb52ea58f488ab81d1c509748437b3a50 100644 --- a/tests/script/issue/TD-2713.sim +++ b/tests/script/issue/TD-2713.sim @@ -60,13 +60,13 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi -if $data2_2 != slave then +if $data2_2 != follower then goto step1 endi -if $data2_3 != slave then +if $data2_3 != follower then goto step1 endi diff --git a/tests/script/issue/TD-3300.sim b/tests/script/issue/TD-3300.sim index 0745ceb8490567b508397b3e99d4748dc41c8971..8ff3af5895a22c913bb888e865099cf840ceb17a 100644 --- a/tests/script/issue/TD-3300.sim +++ b/tests/script/issue/TD-3300.sim @@ -76,10 +76,10 @@ endi if $data06 != 2 then return -1 endi -if $data05 != master then +if $data05 != leader then return -1 endi -if $data07 != slave then +if $data07 != follower then return -1 endi @@ -125,10 +125,10 @@ endi if $data06 != 2 then goto step4 endi -if $data05 != master then +if $data05 != leader then goto step4 endi -if $data07 != slave then +if $data07 != follower then goto step4 endi @@ -189,7 +189,7 @@ endi if $data05 != offline then goto step5 endi -if $data07 != master then +if $data07 != leader then goto step5 endi @@ -251,10 +251,10 @@ endi if $data06 != 2 then goto step6 endi -if $data05 != slave then +if $data05 != follower then goto step6 endi -if $data07 != master then +if $data07 != leader then goto step6 endi @@ -337,7 +337,7 @@ endi if $data06 != 2 then goto step7 endi -if $data05 != master then +if $data05 != leader then goto step7 endi if $data07 != offline then @@ -422,10 +422,10 @@ endi if $data06 != 2 then goto step8 endi -if $data05 != master then +if $data05 != leader then goto step8 endi -if $data07 != slave then +if $data07 != follower then goto step8 endi @@ -521,7 +521,7 @@ endi if $data05 != offline then goto step7 endi -if $data07 != master then +if $data07 != leader then goto step7 endi diff --git a/tests/script/tmp/mnodes.sim b/tests/script/tmp/mnodes.sim index 8bca76c38b627ac6a6aa9eaae07a9126a9b19057..e9b03c84472f8c38808484a5adecbbeb9a05fe76 100644 --- a/tests/script/tmp/mnodes.sim +++ b/tests/script/tmp/mnodes.sim @@ -74,13 +74,13 @@ print mnode2Role $mnode2Role $mnode3Role = $data2_3 print mnode3Role $mnode3Role -if $mnode1Role != master then +if $mnode1Role != leader then goto step1 endi -if $mnode2Role != slave then +if $mnode2Role != follower then goto step1 endi -if $mnode3Role != slave then +if $mnode3Role != follower then goto step1 endi diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..77e010f5bd9269e868c4f963e7c920c41fade8a8 100644 --- a/tests/script/unique/account/paras.sim +++ b/tests/script/unique/account/paras.sim @@ -17,7 +17,7 @@ endi if $data02 != 3/128 then return -1 endi -if $data03 != 0/128 then +if $data03 != 0/32767 then return -1 endi if $data04 != 0/2147483647 then @@ -111,4 +111,4 @@ if $data16 != 0.000/10.000 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim b/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim index dbd0e2bd87c099dca54aa33d609696cb2dc89381..f20c92b9827ef88f5e69f15195a028987be82d5d 100644 --- a/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim +++ b/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim @@ -1,6 +1,6 @@ # Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode # step 1: start dnode1 -# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode) +# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode) # step 2: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841) # step 3: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc # step 4: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2 @@ -145,7 +145,7 @@ if $dnode2Status != ready then goto wait_dnode3_offline endi -sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3 +sleep $sleepTimer # waitting for move leader vnode of dnode2 to dnode3 # check using select sql select count(*) from $stb print data00 $data00 diff --git a/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync_second.sim b/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync_second.sim index e15edb3f3d444b7407244b8cf3fa781ba36357d6..a7380a8de98d174e9e272d0c2e2782f95b4d0d44 100644 --- a/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync_second.sim +++ b/tests/script/unique/arbitrator/dn2_mn1_cache_file_sync_second.sim @@ -1,6 +1,6 @@ # Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode # step 1: start dnode1 -# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode) +# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode) # step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841) # step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc # step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2 diff --git a/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim index 96fde9061a0648d69386cb7f5c23450dea6aad7e..3f5bcc5da989e6f5368411b18c0956d1868b6999 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim @@ -144,7 +144,7 @@ if $dnode3Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_offline endi -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_offline endi @@ -209,11 +209,11 @@ $dnode2Vtatus = $data7_2 print dnode2Vtatus: $dnode3Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_slave endi -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_slave endi @@ -325,11 +325,11 @@ $dnode2Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode2_vgroup_slave endi -if $dnode2Vtatus != slave then +if $dnode2Vtatus != follower then sleep 2000 goto wait_dnode2_vgroup_slave endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim index da76cc467b7813586c98761319a01eb1f2ed6bb6..f0d2b41ce8ffd470278931ea07ff4fe3d21f3546 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim @@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi @@ -211,11 +211,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_slave endi @@ -330,11 +330,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_master endi @@ -393,11 +393,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master_2 endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_master_2 endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim index 6d81effab63bb52801ab51e53ee1147326b3e851..53a1ce04fa3876ff56f931570c98b896896e2737 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_change.sim @@ -145,7 +145,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi @@ -218,11 +218,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_slave endi @@ -292,7 +292,7 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim index d22aca07cbd3d3c6a902076103c9759209c6a966..ba15bb42d599cef7de137db2af386de57f9e8005 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim @@ -151,7 +151,7 @@ if $dnode3Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_offline endi -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_offline endi @@ -237,11 +237,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode2Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_slave endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_slave endi @@ -320,7 +320,7 @@ if $dnode2Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_master endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim index 884a43bce12d1bc2c137bab17b0e780c521e327c..119cb418dbfc478fb648397744f6e334567affd9 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim @@ -203,7 +203,7 @@ if $dnode2Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_master endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master endi @@ -328,11 +328,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode2Vtatus = $data7_2 $dnode3Vtatus = $data5_2 -if $dnode2Vtatus != slave then +if $dnode2Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_master_1 endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master_1 endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim index 3c74de49166067624335937d0e0486924b9fdb4f..f393e4afbac4fdeb9eaba639be34554a38c9c882 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_createErrData_online.sim @@ -165,7 +165,7 @@ if $dnode2Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_master endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master endi @@ -290,11 +290,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode2Vtatus = $data7_2 $dnode3Vtatus = $data5_2 -if $dnode2Vtatus != slave then +if $dnode2Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_master_1 endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master_1 endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim index d0399222f14cc8b8d7ac12ebca8b91549c34942d..00ab574e0783e09906bd5c561c00a7c935720e82 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_delDir.sim @@ -148,7 +148,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi @@ -206,11 +206,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_slave endi @@ -306,11 +306,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_slave endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_slave endi @@ -422,11 +422,11 @@ $dnode3Vtatus = $data7_2 print dnode4Vtatus: $dnode4Vtatus print dnode3Vtatus: $dnode3Vtatus -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode2_vgroup_slave endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode2_vgroup_slave endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim index 01534f9476a164d607620fcc93601c272b3e6042..370b19990ba9df97367a82144657dec35765951b 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim @@ -151,7 +151,7 @@ if $dnode3Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_offline endi -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_offline endi @@ -237,11 +237,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode2Vtatus = $data7_2 $dnode3Vtatus = $data5_2 -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_slave endi -if $dnode3Vtatus != slave then +if $dnode3Vtatus != follower then sleep 2000 goto wait_dnode3_vgroup_slave endi @@ -320,7 +320,7 @@ if $dnode2Vtatus != offline then sleep 2000 goto wait_dnode3_vgroup_master endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode3_vgroup_master endi diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim index b9ee508f78602cee7d6c9f17dbc3e250e7014f72..35a7bf896610f39af3096b2b24942b2437c23051 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim @@ -268,7 +268,7 @@ if $dnode3Vtatus != offline then sleep 2000 goto wait_dnode2_vgroup_master endi -if $dnode2Vtatus != master then +if $dnode2Vtatus != leader then sleep 2000 goto wait_dnode2_vgroup_master endi diff --git a/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim index d90853d2e41e8384acad2f4dcd5a95eee8298745..d12b5ff3ad19b67766a475953e49a89454993aad 100644 --- a/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim +++ b/tests/script/unique/arbitrator/dn3_mn2_killDnode.sim @@ -104,7 +104,7 @@ $mnode2Status = $data2_2 $mnode3Status = $data2_3 #$mnode4Status = $data2_4 -if $mnode1Status != master then +if $mnode1Status != leader then return -1 endi diff --git a/tests/script/unique/arbitrator/insert_duplicationTs.sim b/tests/script/unique/arbitrator/insert_duplicationTs.sim index 4af47ca336c188c3194b9fc64925073f8fb406c2..f10405eaa94a5b92f9fa6ae21c8cecb3f28a16b0 100644 --- a/tests/script/unique/arbitrator/insert_duplicationTs.sim +++ b/tests/script/unique/arbitrator/insert_duplicationTs.sim @@ -1,6 +1,6 @@ # Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode # step 1: start dnode1 -# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode) +# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode) # step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841) # step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc # step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2 @@ -157,7 +157,7 @@ if $dnode3Status != ready then goto wait_dnode2_offline endi -sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3 +sleep $sleepTimer # waitting for move leader vnode of dnode2 to dnode3 # check using select sql select count(*) from $stb print data00 $data00 diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim index 0adb6b475938c1aa37f56c20c5d6327c9f89d574..f3da076fde52a3b4eb6dd552ef8f3293d16349cd 100644 --- a/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica2_alterTable_online.sim @@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim index a0877ad89c20697e3b9a46f4512766fbd11439d8..6a4c92959c0d9649a934270d60a76e274254cd4f 100644 --- a/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim +++ b/tests/script/unique/arbitrator/offline_replica2_alterTag_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim index 376484a0661d89fcd38f6be790437e10c6ef2761..d97638b6fc46ae6082f81b7e39682cbc961199e0 100644 --- a/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica2_createTable_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim index 9f21193400a8d7833afe8dbc5b4671c2f623778d..bb51700196424ef5e6b99ce5297bf047c4680d79 100644 --- a/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim +++ b/tests/script/unique/arbitrator/offline_replica2_dropDb_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim index cb3bbb3a73f5213c346fc675049827730a6e3a01..592ad1b136f1ff22b859cd5a3972df08a9d888a0 100644 --- a/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica2_dropTable_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim index 8a9995f89162883c6529a3cd8e63bc764884147b..b75a06874e6bfbf1120de324ac49f520678fb5d3 100644 --- a/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica3_alterTable_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim index 6eed563bbc7c79ac35a782fbd9f2ccaa79f277d6..5a06ab44059da667ec0d4db677b544ec0e628e09 100644 --- a/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim +++ b/tests/script/unique/arbitrator/offline_replica3_alterTag_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim index 2633d822c91d87ad6e5cc70370ac880fd397b889..31fe60aaac3d6db7dd72045a44ef1b252bd14aa2 100644 --- a/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica3_createTable_online.sim @@ -151,7 +151,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim index 3abfc401611044b5cc7a1fb9411e4fd461beca78..725ca1b6209a6227807043f5b570f7b62a6601ea 100644 --- a/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim +++ b/tests/script/unique/arbitrator/offline_replica3_dropDb_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim index f2acb8b90a35186395ab559ee428a182a2cba0d4..7dea6b24a9aa374c903724323967986b42a78603 100644 --- a/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim +++ b/tests/script/unique/arbitrator/offline_replica3_dropTable_online.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim b/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim index 9d0e967f4e1575195e126cd975540067038409c5..5c8e06fadda6a52b172b988d01162351c3e24b86 100644 --- a/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim +++ b/tests/script/unique/arbitrator/replica_changeWithArbitrator.sim @@ -224,7 +224,7 @@ if $data2_1 != offline then sleep 2000 goto wait_dnode2_master endi -if $data2_2 != master then +if $data2_2 != leader then sleep 2000 goto wait_dnode2_master endi diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim index a8c0e83cc1e5da61a473fa5b9753e90450e57bec..7ce878c63add94c7f49a78b5fd05f60f5bdb46ad 100644 --- a/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim +++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_add.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim index 951d26635b257d1c1d3ab81889b66bb909b8038b..83e31ff9ae66d6e4c951af3330043b33b011921d 100644 --- a/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim +++ b/tests/script/unique/arbitrator/sync_replica2_alterTable_drop.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica2_dropDb.sim b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim index e4e7f951881cf87c426c869774b2a3a548b29517..2272e63f0415472c32357b84310b043ad5c16b75 100644 --- a/tests/script/unique/arbitrator/sync_replica2_dropDb.sim +++ b/tests/script/unique/arbitrator/sync_replica2_dropDb.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica2_dropTable.sim b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim index 0049dc6fba9bd33cd9c2e4f5b56510769dd0c60e..4f7588a43b97029caa4a48fad6f3817323be4635 100644 --- a/tests/script/unique/arbitrator/sync_replica2_dropTable.sim +++ b/tests/script/unique/arbitrator/sync_replica2_dropTable.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim index 499089960100f0be445552112e2b2000164c1ca0..fa34a67b93fcc2277369133be48b26997edd0fe0 100644 --- a/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim +++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_add.sim @@ -148,7 +148,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim index 10bd4fc8bd3946871605f66493e15cdde28ec257..aefb849527da41645c3c7e815417bbc67fe74247 100644 --- a/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim +++ b/tests/script/unique/arbitrator/sync_replica3_alterTable_drop.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica3_createTable.sim b/tests/script/unique/arbitrator/sync_replica3_createTable.sim index a0b391dd763f96fa6d340fad213943045b45ed69..0cea59f799b1bc7f28c004f2e609c86d4931fc1f 100644 --- a/tests/script/unique/arbitrator/sync_replica3_createTable.sim +++ b/tests/script/unique/arbitrator/sync_replica3_createTable.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim b/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim index 68c6ecbd6ecf7c06b5ae595c16e5b7d4b89435bd..2f29dfb472cbec0a758f308b5b06223ce1ee9f36 100644 --- a/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim +++ b/tests/script/unique/arbitrator/sync_replica3_dnodeChang_DropAddAlterTableDropDb.sim @@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi @@ -210,7 +210,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave endi @@ -243,7 +243,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master endi @@ -317,7 +317,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave_2 endi @@ -350,7 +350,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master_2 endi @@ -440,7 +440,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != slave then +if $dnode4Vtatus != follower then sleep 2000 goto wait_dnode4_vgroup_slave_3 endi @@ -473,7 +473,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $dnode4Vtatus = $data5_2 $dnode3Vtatus = $data7_2 -if $dnode4Vtatus != master then +if $dnode4Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_master_3 endi diff --git a/tests/script/unique/arbitrator/sync_replica3_dropDb.sim b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim index 83e53eaeeb16e0988b778ef0b2515dc12c397d38..4f61da9d9d60e2aecfea4118e7bba91b67b8ef7e 100644 --- a/tests/script/unique/arbitrator/sync_replica3_dropDb.sim +++ b/tests/script/unique/arbitrator/sync_replica3_dropDb.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/arbitrator/sync_replica3_dropTable.sim b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim index 7496541b76da3245f034924e95fd468b61135aef..a74364a1581c07efbe93fbe58a5f32a611bf02ac 100644 --- a/tests/script/unique/arbitrator/sync_replica3_dropTable.sim +++ b/tests/script/unique/arbitrator/sync_replica3_dropTable.sim @@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != leader then sleep 2000 goto wait_dnode4_vgroup_offline endi diff --git a/tests/script/unique/cluster/balance1.sim b/tests/script/unique/cluster/balance1.sim index c98687a81ceae03d20fc205a121fcfc813c74be7..b686fb8665f7196b0db8137cd334f733970d99a4 100644 --- a/tests/script/unique/cluster/balance1.sim +++ b/tests/script/unique/cluster/balance1.sim @@ -192,10 +192,10 @@ print dnode1 ==> $dnode1Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi -if $dnode3Role != slave then +if $dnode3Role != follower then return -1 endi @@ -236,7 +236,7 @@ print dnode1 ==> $dnode1Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi @@ -274,11 +274,11 @@ $dnode4Role = $data2_4 print dnode1 ==> $dnode1Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi -if $dnode4Role != slave then +if $dnode4Role != follower then return -1 endi diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim index 0b80acbe6c9fc11455df6023f66da7f057db2d09..789d3787b5fc9730eedd4538a5e8a798cfefdd73 100644 --- a/tests/script/unique/cluster/balance2.sim +++ b/tests/script/unique/cluster/balance2.sim @@ -82,13 +82,13 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi -if $data2_2 != slave then +if $data2_2 != follower then goto step1 endi -if $data2_3 != slave then +if $data2_3 != follower then goto step1 endi @@ -226,17 +226,17 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then return -1 endi -if $dnode3Role != slave then +if $dnode3Role != follower then return -1 endi -if $dnode4Role != slave then +if $dnode4Role != follower then return -1 endi @@ -279,7 +279,7 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then @@ -289,7 +289,7 @@ if $dnode3Role != null then return -1 endi -if $dnode4Role != slave then +if $dnode4Role != follower then return -1 endi diff --git a/tests/script/unique/cluster/balance3.sim b/tests/script/unique/cluster/balance3.sim index c2e9a845149cd9df78b9528115c5108c1e6531d3..0f583ed60015de08370ddc29214dc886519c4f0b 100644 --- a/tests/script/unique/cluster/balance3.sim +++ b/tests/script/unique/cluster/balance3.sim @@ -68,13 +68,13 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi -if $data2_2 != slave then +if $data2_2 != follower then goto step1 endi -if $data2_3 != slave then +if $data2_3 != follower then goto step1 endi @@ -240,7 +240,7 @@ print dnode3 ==> $data2_3 print dnode4 ==> $data2_4 print dnode5 ==> $data2_5 -if $data2_4 != slave then +if $data2_4 != follower then goto show4 endi @@ -290,7 +290,7 @@ print dnode5 ==> $data2_5 print dnode6 ==> $data2_6 print dnode7 ==> $data2_7 -if $data2_5 != slave then +if $data2_5 != follower then goto show5 endi @@ -381,7 +381,7 @@ print dnode5 ==> $data2_5 print dnode6 ==> $data2_6 print dnode7 ==> $data2_7 -if $data2_6 != slave then +if $data2_6 != follower then goto show7 endi @@ -451,10 +451,10 @@ print dnode7 ==> $data2_7 if $data2_1 != offline then goto show9 endi -if $data2_5 != master then +if $data2_5 != leader then goto show9 endi -if $data2_6 != slave then +if $data2_6 != follower then goto show9 endi @@ -480,13 +480,13 @@ print dnode7 ==> $data2_7 if $data2_1 != null then goto show10 endi -if $data2_5 != master then +if $data2_5 != leader then goto show10 endi -if $data2_6 != slave then +if $data2_6 != follower then goto show10 endi -if $data2_7 != slave then +if $data2_7 != follower then goto show10 endi diff --git a/tests/script/unique/cluster/cache.sim b/tests/script/unique/cluster/cache.sim index 1b3771353f8ca411db1fc8ea62335c5ecc16bf45..f4da8380eb28789666d99f39501b66103df4d394 100644 --- a/tests/script/unique/cluster/cache.sim +++ b/tests/script/unique/cluster/cache.sim @@ -57,9 +57,9 @@ endi #sql create table sys.st as select avg(taosd), avg(system) from sys.cpu interval(30s) sql show log.vgroups -if $data05 != master then +if $data05 != leader then return -1 endi -if $data15 != master then +if $data15 != leader then return -1 endi diff --git a/tests/script/unique/cluster/flowctrl.sim b/tests/script/unique/cluster/flowctrl.sim index 700fa0a3f195c84ed6e7903fe572c81b20b4fd21..8e04767ffd04099e7dc3ae4355b3740921d81203 100644 --- a/tests/script/unique/cluster/flowctrl.sim +++ b/tests/script/unique/cluster/flowctrl.sim @@ -55,13 +55,13 @@ print mnode2Role $mnode2Role $mnode3Role = $data2_3 print mnode3Role $mnode3Role -if $mnode1Role != master then +if $mnode1Role != leader then goto show1 endi -if $mnode2Role != slave then +if $mnode2Role != follower then goto show1 endi -if $mnode3Role != slave then +if $mnode3Role != follower then goto show1 endi diff --git a/tests/script/unique/cluster/vgroup100.sim b/tests/script/unique/cluster/vgroup100.sim index 656ed2ec44ebbb2d666ed2618e15d5ae34a5e525..cfe27657985135eebd13d9f1bcf563b536131177 100644 --- a/tests/script/unique/cluster/vgroup100.sim +++ b/tests/script/unique/cluster/vgroup100.sim @@ -46,13 +46,13 @@ print $dnode1Role print $dnode2Role print $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto show2 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto show2 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto show2 endi @@ -109,13 +109,13 @@ sql show mnodes -x show7 $dnode1Role = $data2_1 $dnode2Role = $data2_2 $dnode3Role = $data2_3 -if $dnode1Role != master then +if $dnode1Role != leader then goto show7 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto show7 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto show7 endi diff --git a/tests/script/unique/clusterSimCase/cluster_main.sim b/tests/script/unique/clusterSimCase/cluster_main.sim index 274ce85974bb2238bae60e6ffa9723760ae1c394..2ec6ce9b55bec90924ce4ac721bf44501ae787e8 100644 --- a/tests/script/unique/clusterSimCase/cluster_main.sim +++ b/tests/script/unique/clusterSimCase/cluster_main.sim @@ -10,17 +10,17 @@ #taos> show vgroups; # vgId | tables | status | onlineVnodes | dnode | vstatus | dnode | vstatus | #====================================================================================================== -# 2 | 1024 | ready | 2 | 3 | master | 2 | slave | -# 3 | 1024 | ready | 2 | 3 | master | 2 | slave | -# 4 | 1024 | ready | 2 | 3 | master | 2 | slave | -# 5 | 718 | ready | 2 | 3 | master | 2 | slave | +# 2 | 1024 | ready | 2 | 3 | leader | 2 | follower | +# 3 | 1024 | ready | 2 | 3 | leader | 2 | follower | +# 4 | 1024 | ready | 2 | 3 | leader | 2 | follower | +# 5 | 718 | ready | 2 | 3 | leader | 2 | follower | #Query OK, 4 row(s) in set (0.002749s) # #taos> show mnodes # -> ; # id | end_point | role | create_time | #===================================================================================== -# 1 | ubuntu-OptiPlex-7060:7100 | master | 2020-07-22 06:25:31.677 | +# 1 | ubuntu-OptiPlex-7060:7100 | leader | 2020-07-22 06:25:31.677 | #Query OK, 1 row(s) in set (0.002126s) @@ -136,7 +136,7 @@ if $vg2Dnode3Status != offline then sleep 2000 goto wait_vgroup_chang_0 endi -if $vg2Dnode2Status != master then +if $vg2Dnode2Status != leader then sleep 2000 goto wait_vgroup_chang_0 endi @@ -165,11 +165,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $vg2Dnode3Status = $data5_2 $vg2Dnode2Status = $data7_2 -if $vg2Dnode3Status != slave then +if $vg2Dnode3Status != follower then sleep 2000 goto wait_vgroup_chang_1 endi -if $vg2Dnode2Status != master then +if $vg2Dnode2Status != leader then sleep 2000 goto wait_vgroup_chang_1 endi @@ -197,7 +197,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $vg2Dnode3Status = $data5_2 $vg2Dnode2Status = $data7_2 -if $vg2Dnode3Status != master then +if $vg2Dnode3Status != leader then sleep 2000 goto wait_vgroup_chang_2 endi @@ -230,11 +230,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat $vg2Dnode3Status = $data5_2 $vg2Dnode2Status = $data7_2 -if $vg2Dnode2Status != slave then +if $vg2Dnode2Status != follower then sleep 2000 goto wait_vgroup_chang_3 endi -if $vg2Dnode3Status != master then +if $vg2Dnode3Status != leader then sleep 2000 goto wait_vgroup_chang_3 endi diff --git a/tests/script/unique/db/commit.sim b/tests/script/unique/db/commit.sim index 661dd4505f704fff961739075247d4264d617f72..dec78c8e43cee9fda4328f59d0336502790b19f5 100644 --- a/tests/script/unique/db/commit.sim +++ b/tests/script/unique/db/commit.sim @@ -13,7 +13,7 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 -print ========= start dnode1 as master +print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sql connect sleep 2000 diff --git a/tests/script/unique/db/delete.sim b/tests/script/unique/db/delete.sim index c876f23de35f67b0cd2a6f0e428a825d98de7c27..b0ad52e494143a1a0218855c68ab473efde95086 100644 --- a/tests/script/unique/db/delete.sim +++ b/tests/script/unique/db/delete.sim @@ -84,7 +84,7 @@ step3: sql show mnodes print dnode1 role $data2_1 -if $data2_1 != master then +if $data2_1 != leader then goto step3 endi diff --git a/tests/script/unique/db/replica_add12.sim b/tests/script/unique/db/replica_add12.sim index d46187e4456acbd39c7b1f7689388af156d04f85..6cca6ce4cd5c64344d3d0115c6a10bdb1ac8e4a6 100644 --- a/tests/script/unique/db/replica_add12.sim +++ b/tests/script/unique/db/replica_add12.sim @@ -60,7 +60,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi if $data2_2 != null then diff --git a/tests/script/unique/db/replica_add13.sim b/tests/script/unique/db/replica_add13.sim index 13a5c9783228761c2b431f9bb1f571c29c05f080..6bc76615da0baa0072c60bab3db9a71a2e78bcff 100644 --- a/tests/script/unique/db/replica_add13.sim +++ b/tests/script/unique/db/replica_add13.sim @@ -66,7 +66,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/db/replica_add23.sim b/tests/script/unique/db/replica_add23.sim index ac0bd6d9d7b09065c832fb8ad0698b97a9f94853..ffc62abb8ea52cd8851f98efb9f063228875a4f9 100644 --- a/tests/script/unique/db/replica_add23.sim +++ b/tests/script/unique/db/replica_add23.sim @@ -67,7 +67,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/db/replica_part.sim b/tests/script/unique/db/replica_part.sim index 9880ec666c8543676c406c843d14235a09ae13ee..924d544c4299b67b0946ff34984a87f228212793 100644 --- a/tests/script/unique/db/replica_part.sim +++ b/tests/script/unique/db/replica_part.sim @@ -54,7 +54,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/db/replica_reduce21.sim b/tests/script/unique/db/replica_reduce21.sim index d3a76485f81616497a8c4859aafefda7ef371c01..0305f3ad09e1df41e27960f6d2fea7c6035636f1 100644 --- a/tests/script/unique/db/replica_reduce21.sim +++ b/tests/script/unique/db/replica_reduce21.sim @@ -47,7 +47,7 @@ endi sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/db/replica_reduce31.sim b/tests/script/unique/db/replica_reduce31.sim index 5350bcc78c327ae9eb35f24e6d01901cebfb7a07..4286ad94adbb1cc8e4b736d46b93d5006d4adc24 100644 --- a/tests/script/unique/db/replica_reduce31.sim +++ b/tests/script/unique/db/replica_reduce31.sim @@ -55,7 +55,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/db/replica_reduce32.sim b/tests/script/unique/db/replica_reduce32.sim index ead265d5d54bd415d12147e5b61b04fcc5d7544f..730661ec02f3b488c26e07cfe4e6d534f0bef214 100644 --- a/tests/script/unique/db/replica_reduce32.sim +++ b/tests/script/unique/db/replica_reduce32.sim @@ -54,7 +54,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi if $data2_2 != null then diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim index 7e647925d1d3d66d21f279ace852e3fc12496510..ce13a4a847ea6ad056b9d7bfeef8076ea5c5ef4e 100644 --- a/tests/script/unique/dnode/alternativeRole.sim +++ b/tests/script/unique/dnode/alternativeRole.sim @@ -66,13 +66,13 @@ sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi if $data2_2 != null then goto show2 endi -if $data2_3 != slave then +if $data2_3 != follower then goto show2 endi diff --git a/tests/script/unique/dnode/offline3.sim b/tests/script/unique/dnode/offline3.sim index 93c75e3b13333d55aea7cb2417413a14a1c13e62..de5cc946453c1d96e4a6867b0ffa5ffb673ca7c6 100644 --- a/tests/script/unique/dnode/offline3.sim +++ b/tests/script/unique/dnode/offline3.sim @@ -59,7 +59,7 @@ endi sql show mnodes print mnode1 $data2_1 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..39138d6db7ed5b5e9f0a17303692aee409afe6bc 100644 --- a/tests/script/unique/http/admin.sim +++ b/tests/script/unique/http/admin.sim @@ -178,7 +178,7 @@ endi print =============== step8 - monitor dbs #system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls #print 24-> $system_content -#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then +#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","leader"]],"rows":1}]@ then # return -1 # endi diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim index e3623c7c629d671eedc7b6a416b9e77e6445c4ff..fbffc0a69b2e0e85dc70423833a5737e0b85e2a3 100644 --- a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim +++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim @@ -195,33 +195,33 @@ print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $dat print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 -if $data5_4 != master then +if $data5_4 != leader then print $data5_4 sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_3 != slave then +if $data5_3 != follower then print $data5_2 sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_2 != master then +if $data5_2 != leader then print $data5_3 sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_4 != slave then +if $data7_4 != follower then print $data7_4 sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_3 != master then +if $data7_3 != leader then print $data7_3 sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_2 != slave then +if $data7_2 != follower then print $data7_2 sleep 2000 goto wait_dnode1_vgroup_slave diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim index c88e26d7eb19a533be84f646321e103480b2d10a..b076c4c5010d1f53a994c11efb641eed89fbb830 100644 --- a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim +++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim @@ -197,28 +197,28 @@ print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $dat print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 -if $data5_4 != master then +if $data5_4 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_3 != slave then +if $data5_3 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_2 != master then +if $data5_2 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_4 != slave then +if $data7_4 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_3 != master then +if $data7_3 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_2 != slave then +if $data7_2 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim index 69e83a2c0091394b2babf76a592ab80a95ae3e6a..9fe8e31db92fc024c3ab748a5b2aad481a38988e 100644 --- a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim +++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim @@ -169,28 +169,28 @@ $d1v2status = $data7_4 $d1v3status = $data7_2 $d1v4status = $data7_3 -if $d2v2status != master then +if $d2v2status != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $d2v3status != master then +if $d2v3status != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $d2v4status != master then +if $d2v4status != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $d1v2status != slave then +if $d1v2status != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $d1v3status != slave then +if $d1v3status != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $d1v4status != slave then +if $d1v4status != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim index ed3f9b8274c204727a08c163596316ed17808d6b..f665f551f71917c7b4328f122c2249db78594416 100644 --- a/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim +++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim @@ -195,28 +195,28 @@ print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $dat print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3 print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 -if $data5_4 != master then +if $data5_4 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_3 != slave then +if $data5_3 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data5_2 != master then +if $data5_2 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_4 != slave then +if $data7_4 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_3 != master then +if $data7_3 != leader then sleep 2000 goto wait_dnode1_vgroup_slave endi -if $data7_2 != slave then +if $data7_2 != follower then sleep 2000 goto wait_dnode1_vgroup_slave endi diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..710b3aa169b0cd4433b1d6e28f14bd2c140e5571 100644 --- a/tests/script/unique/mnode/mgmt20.sim +++ b/tests/script/unique/mnode/mgmt20.sim @@ -27,10 +27,10 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi @@ -61,10 +61,10 @@ show4: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show4 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show4 endi diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim index 8409383309dbde5500b9719cd64fd74ca5e384b2..44000de86085166747908ae54771043c4db5f3ed 100644 --- a/tests/script/unique/mnode/mgmt21.sim +++ b/tests/script/unique/mnode/mgmt21.sim @@ -15,7 +15,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,10 +33,10 @@ show2: sql show mnodes -x show2 print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim index 399805312ba905d55bceffe011cfe074c831684e..415a40c21ce13dc108980364cb2978017193bbe9 100644 --- a/tests/script/unique/mnode/mgmt22.sim +++ b/tests/script/unique/mnode/mgmt22.sim @@ -14,7 +14,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,26 +33,26 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi print ============== step3 sql_error drop dnode $hostname1 -x error1 -print should not drop master +print should not drop leader print ============== step4 system sh/exec.sh -n dnode1 -s stop -x SIGINT sleep 3000 sql_error show mnodes -print error of no master +print error of no leader print ============== step5 sql_error drop dnode $hostname1 -print error of no master +print error of no leader print ============== step6 system sh/exec.sh -n dnode1 -s start @@ -71,10 +71,10 @@ show6: sql show mnodes -x show6 print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show6 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show6 endi @@ -94,10 +94,10 @@ sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto show7 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show7 endi if $data3_3 != null then diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..1145c271883f62d67ac6e4f5968477eac820ac01 100644 --- a/tests/script/unique/mnode/mgmt23.sim +++ b/tests/script/unique/mnode/mgmt23.sim @@ -14,7 +14,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,10 +33,10 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi @@ -53,10 +53,10 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi -if $dnode2Role != slave then +if $dnode2Role != follower then return -1 endi if $dnode3Role != null then @@ -82,13 +82,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step4 endi if $dnode2Role != null then goto step4 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step4 endi @@ -117,13 +117,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step5 endi if $dnode2Role != null then goto step5 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step5 endi diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..c68a7236f9b6e517eaa2378338cf1c1cd64dd68c 100644 --- a/tests/script/unique/mnode/mgmt24.sim +++ b/tests/script/unique/mnode/mgmt24.sim @@ -14,7 +14,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,10 +33,10 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi @@ -67,10 +67,10 @@ sql show mnodes -x step5 print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto step5 endi -if $data2_2 != slave then +if $data2_2 != follower then goto step5 endi diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim index 9cca9c844806b138faf52186ffc3184d4876a1d6..f9564f4f1a28fbcfda0ef09d1d2bb07547faa62a 100644 --- a/tests/script/unique/mnode/mgmt25.sim +++ b/tests/script/unique/mnode/mgmt25.sim @@ -14,7 +14,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,10 +33,10 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi @@ -53,10 +53,10 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi -if $dnode2Role != slave then +if $dnode2Role != follower then return -1 endi if $dnode3Role != null then @@ -75,13 +75,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then return -1 endi -if $dnode3Role != slave then +if $dnode3Role != follower then return -1 endi diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..34bd3defba85b30a03f9c81bb99685237faef9c7 100644 --- a/tests/script/unique/mnode/mgmt26.sim +++ b/tests/script/unique/mnode/mgmt26.sim @@ -14,7 +14,7 @@ sql connect sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi @@ -33,10 +33,10 @@ show2: sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 -if $data2_1 != master then +if $data2_1 != leader then goto show2 endi -if $data2_2 != slave then +if $data2_2 != follower then goto show2 endi @@ -53,10 +53,10 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi -if $dnode2Role != slave then +if $dnode2Role != follower then return -1 endi if $dnode3Role != null then @@ -76,13 +76,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then return -1 endi -if $dnode3Role != slave then +if $dnode3Role != follower then return -1 endi @@ -103,13 +103,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then return -1 endi -if $dnode3Role != slave then +if $dnode3Role != follower then return -1 endi diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..3a6140539bfa260a577c5d6b2592fde894f5a1c7 100644 --- a/tests/script/unique/mnode/mgmt30.sim +++ b/tests/script/unique/mnode/mgmt30.sim @@ -19,7 +19,7 @@ sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data3_3 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi if $data3_2 != null then @@ -53,13 +53,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step2 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step2 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step2 endi diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim index ce7cdce35d8c0463564f46d26a0711d39340c8bf..ad05cd4f6be778f2f7a827d1f50089b5c5e67ddd 100644 --- a/tests/script/unique/mnode/mgmt33.sim +++ b/tests/script/unique/mnode/mgmt33.sim @@ -15,7 +15,7 @@ sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data3_3 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi if $data3_2 != null then @@ -45,10 +45,10 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step2 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step2 endi if $dnode3Role != null then @@ -75,13 +75,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step3 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step3 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step3 endi @@ -104,13 +104,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step4 endi if $dnode2Role != null then goto step4 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step4 endi @@ -138,13 +138,13 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step5 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step5 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step5 endi @@ -169,10 +169,10 @@ print dnode3 ==> $dnode3Role if $dnode1Role != offline then goto step6 endi -#if $dnode2Role != master then +#if $dnode2Role != leader then # return -1 #endi -#if $dnode3Role != slave then +#if $dnode3Role != follower then # return -1 #endi @@ -197,10 +197,10 @@ print dnode3 ==> $dnode3Role if $dnode1Role != null then goto step7 endi -#if $dnode2Role != master then +#if $dnode2Role != leader then # return -1 #endi -#if $dnode3Role != slave then +#if $dnode3Role != follower then # return -1 #endi diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim index d8a46b0955f59273279bbbc5c89c07c05db672d7..7f62b43fb8bcd8c4987ca99f78eec1d8d49a88ed 100644 --- a/tests/script/unique/mnode/mgmt34.sim +++ b/tests/script/unique/mnode/mgmt34.sim @@ -18,7 +18,7 @@ sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data3_3 -if $data2_1 != master then +if $data2_1 != leader then return -1 endi if $data3_2 != null then @@ -49,10 +49,10 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step2 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step2 endi if $dnode3Role != null then @@ -84,13 +84,13 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step3 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step3 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step3 endi if $dnode4Role != null then @@ -119,13 +119,13 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step4 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step4 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step4 endi if $dnode4Role != null then @@ -152,16 +152,16 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step5 endi if $dnode2Role != null then goto step5 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step5 endi -if $dnode4Role != slave then +if $dnode4Role != follower then goto step5 endi @@ -190,16 +190,16 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step6 endi if $dnode2Role != null then goto step6 endi -if $dnode3Role != slave then +if $dnode3Role != follower then goto step6 endi -if $dnode4Role != slave then +if $dnode4Role != follower then goto step6 endi @@ -249,13 +249,13 @@ print dnode4 ==> $dnode4Role if $dnode1Role != null then goto step8 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step8 endi -#if $dnode3Role != master then +#if $dnode3Role != leader then # return -1 #endi -#if $dnode4Role != slave then +#if $dnode4Role != follower then # return -1 #endi diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim index 5afb41905846bff3ce9894e928245a7d34078354..fee2e405a283736272892367aae93582cc98f3ba 100644 --- a/tests/script/unique/mnode/mgmtr2.sim +++ b/tests/script/unique/mnode/mgmtr2.sim @@ -20,7 +20,7 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then return -1 endi if $dnode2Role != null then @@ -72,10 +72,10 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != master then +if $dnode1Role != leader then goto step4 endi -if $dnode2Role != slave then +if $dnode2Role != follower then goto step4 endi if $dnode3Role != null then diff --git a/tests/script/unique/vnode/many.sim b/tests/script/unique/vnode/many.sim index a9298b1cf275c24ab6ebe7fea9387a51d6d044ba..24e2cd60c7458d3bab027a80632bd5cddc5b7eba 100644 --- a/tests/script/unique/vnode/many.sim +++ b/tests/script/unique/vnode/many.sim @@ -51,7 +51,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/vnode/replica2_repeat.sim b/tests/script/unique/vnode/replica2_repeat.sim index ac68d591648b2dd66f3fdda8c70b0af40c814459..9845ef2d199ddcfe71ca664033bbf16ea84c24ec 100644 --- a/tests/script/unique/vnode/replica2_repeat.sim +++ b/tests/script/unique/vnode/replica2_repeat.sim @@ -52,7 +52,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/script/unique/vnode/replica3_basic.sim b/tests/script/unique/vnode/replica3_basic.sim index 0ff42b523b8982c85bd84bb251715585a66137fc..edb70b7d4cbaf53d85cd54b9282bda64562cc52d 100644 --- a/tests/script/unique/vnode/replica3_basic.sim +++ b/tests/script/unique/vnode/replica3_basic.sim @@ -44,13 +44,13 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi -if $data2_2 != slave then +if $data2_2 != follower then goto step1 endi -if $data2_3 != slave then +if $data2_3 != follower then goto step1 endi diff --git a/tests/script/unique/vnode/replica3_repeat.sim b/tests/script/unique/vnode/replica3_repeat.sim index 636bc64f89948a8bd4ec0d3dc9dc2a1b10f50e00..cde0c512a5c2d719171635a29a1fba94d1611ab9 100644 --- a/tests/script/unique/vnode/replica3_repeat.sim +++ b/tests/script/unique/vnode/replica3_repeat.sim @@ -59,7 +59,7 @@ sql show mnodes print mnode1 $data2_1 print mnode1 $data2_2 print mnode1 $data2_3 -if $data2_1 != master then +if $data2_1 != leader then goto step1 endi diff --git a/tests/system-test/2-query/td_12191.json b/tests/system-test/2-query/td_12191.json index f5d26db40dc04867c0613a83302d5c3d193e0b7c..daf938a4612ce4e6b815c1453404dab0c2722439 100644 --- a/tests/system-test/2-query/td_12191.json +++ b/tests/system-test/2-query/td_12191.json @@ -24,7 +24,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertMSDB.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertMSDB.json index 8bd5ddbae8d5ce81269626165b2d275d05135ea5..c3ea89a0534a3a87a23801e053be96cde8f1b7df 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertMSDB.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertMSDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertNanoDB.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertNanoDB.json index 5408a9841ab8a40e4ca7564724b7f6c7f941e0e0..b6428b482c3958a948d6c8615d26921b45a935d1 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertNanoDB.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertNanoDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertUSDB.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertUSDB.json index 13eb80f3cf7f751398babed8e922f9e5b3a4242e..4a648092cc3f4efac5a0d027f7d36d7af2a7d520 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertUSDB.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoInsertUSDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabase.json index 38ac666fac5097d616c17bdfc7e900256827ddf4..afe156e18c4dad98eea5d81a0a8772e48a735945 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabase.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabase.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json index 467c54988bd05a93091e3831e52c9a3785c0f26d..73511dbdd6534d1c9ca1277fa6c902583adb9b76 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseNow.json index a09dec21fa9cf3720b68a1fa2e843b49be0544ee..dede88c2dfe8b7dd1dca29d96b698b6e13202237 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseNow.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabasecsv.json index 52e772becacffb406d30d902ae852d065f902a9e..20998d3392a82bcf5ef47e092bd747d8f8934f75 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabasecsv.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_no.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_no.json index 759a437b448c8c65bf252e859345dd9557cc51c5..b7411913034d3e147a8de42300139c3c59dccb67 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_no.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_no.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_yes.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_yes.json index aafc79215fc0b94d037da3a9b229a2f967b51613..fa1eb1f7ff486f2dc78a2e06160fdf5263c1ba6a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_yes.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/TD-10539/create_taosdemo_yes.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-3453/query-interrupt.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-3453/query-interrupt.json index c2e4920097cd1b3581c9893c9677c3cf1f14b7ed..fc9bb5816d66500df37354c86debe5437fedfdaf 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-3453/query-interrupt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/TD-3453/query-interrupt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-4985/query-limit-offset.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-4985/query-limit-offset.json index ad85f9607b72c5d4562266508bfdcf68837c33bd..1b726ef5da5a7f699b99c0e03238337bfac0c575 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-4985/query-limit-offset.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/TD-4985/query-limit-offset.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py deleted file mode 100644 index ec55acb848352def34e3090e66c4ef392b737ce0..0000000000000000000000000000000000000000 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py +++ /dev/null @@ -1,706 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import random -import string -import os -import time -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -from util.dnodes import tdDnodes - -class TDTestCase: - updatecfgDict={'maxSQLLength':1048576} - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql") - - now = time.time() - self.ts = int(round(now * 1000)) - self.num = 100 - - def get_random_string(self, length): - letters = string.ascii_lowercase - result_str = ''.join(random.choice(letters) for i in range(length)) - return result_str - - def run(self): - tdSql.prepare() - # test case for https://jira.taosdata.com:18080/browse/TD-5213 - - print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============") - startTime = time.time() - sql = "create table regular_table_1(ts timestamp, " - for i in range(4094): - sql += "col%d int, " % (i + 1) - sql += "col4095 binary(22))" - tdLog.info(len(sql)) - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into regular_table_1 values(%d, " - for j in range(4094): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_1") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from regular_table_1") - tdSql.checkRows(self.num) - tdSql.checkCols(4096) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - #insert in order - tdLog.info('test insert in order') - for i in range(self.num): - sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 1000)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_1") - tdSql.checkData(0, 0, 2*self.num) - tdSql.query("select * from regular_table_1") - tdSql.checkRows(2*self.num) - tdSql.checkCols(4096) - - #insert out of order - tdLog.info('test insert out of order') - for i in range(self.num): - sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 2000)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_1") - tdSql.checkData(0, 0, 3*self.num) - tdSql.query("select * from regular_table_1") - tdSql.checkRows(3*self.num) - tdSql.checkCols(4096) - - - print("==============step2,regular table error col or value==============") - tdLog.info('test regular table exceeds row num') - # column > 4096 - sql = "create table regular_table_2(ts timestamp, " - for i in range(4095): - sql += "col%d int, " % (i + 1) - sql += "col4096 binary(22))" - tdLog.info(len(sql)) - tdSql.error(sql) - - # column > 4096 - sql = "insert into regular_table_1 values(%d, " - for j in range(4095): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.error(sql) - - # insert column < 4096 - sql = "insert into regular_table_1 values(%d, " - for j in range(4092): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.error(sql) - - # alter column > 4096 - sql = "alter table regular_table_1 add column max int; " - tdSql.error(sql) - - print("==============step3,regular table , mix data type==============") - startTime = time.time() - sql = "create table regular_table_3(ts timestamp, " - for i in range(2000): - sql += "col%d int, " % (i + 1) - for i in range(2000,4094): - sql += "col%d bigint, " % (i + 1) - sql += "col4095 binary(22))" - tdLog.info(len(sql)) - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into regular_table_3 values(%d, " - for j in range(4094): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_3") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from regular_table_3") - tdSql.checkRows(self.num) - tdSql.checkCols(4096) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - sql = "create table regular_table_4(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(4), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(10), " % (i + 1) - for i in range(4090,4094): - sql += "timestamp_%d timestamp, " % (i + 1) - sql += "col4095 binary(22))" - tdLog.info(len(sql)) - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into regular_table_4 values(%d, " - for j in range(500): - str = "'%s', " % random.randint(-2147483647,2147483647) - sql += str - for j in range(500,1000): - str = "'%s', " % random.randint(-32767,32767 ) - sql += str - for j in range(1000,1500): - str = "'%s', " % random.randint(-127,127) - sql += str - for j in range(1500,2000): - str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) - sql += str - for j in range(2000,2500): - str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) - sql += str - for j in range(2500,3000): - str = "'%s', " % random.choice(['true','false']) - sql += str - for j in range(3000,3500): - str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) - sql += str - for j in range(3500,3800): - str = "'%s', " % self.get_random_string(4) - sql += str - for j in range(3800,4090): - str = "'%s', " % self.get_random_string(10) - sql += str - for j in range(4090,4094): - str = "%s, " % (self.ts + j) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_4") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from regular_table_4") - tdSql.checkRows(self.num) - tdSql.checkCols(4096) - tdLog.info("end ,now new one") - - #insert null value - tdLog.info('test insert null value') - for i in range(self.num): - sql = "insert into regular_table_4 values(%d, " - for j in range(2500): - str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) - sql += str - for j in range(2500,3000): - str = "'%s', " % random.choice(['true' ,'false']) - sql += str - for j in range(3000,3500): - str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) - sql += str - for j in range(3500,3800): - str = "'%s', " % self.get_random_string(4) - sql += str - for j in range(3800,4090): - str = "'%s', " % self.get_random_string(10) - sql += str - for j in range(4090,4094): - str = "%s, " % (self.ts + j) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 10000)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_4") - tdSql.checkData(0, 0, 2*self.num) - tdSql.query("select * from regular_table_4") - tdSql.checkRows(2*self.num) - tdSql.checkCols(4096) - - #insert in order - tdLog.info('test insert in order') - for i in range(self.num): - sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,100) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 1000)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_4") - tdSql.checkData(0, 0, 3*self.num) - tdSql.query("select * from regular_table_4") - tdSql.checkRows(3*self.num) - tdSql.checkCols(4096) - - #insert out of order - tdLog.info('test insert out of order') - for i in range(self.num): - sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,100) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 2000)) - time.sleep(1) - tdSql.query("select count(*) from regular_table_4") - tdSql.checkData(0, 0, 4*self.num) - tdSql.query("select * from regular_table_4") - tdSql.checkRows(4*self.num) - tdSql.checkCols(4096) - - #define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384] - #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] - tdLog.info('test regular_table max bytes per row 49151') - sql = "create table regular_table_5(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(20), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(34), " % (i + 1) - for i in range(4090,4094): - sql += "timestamp_%d timestamp, " % (i + 1) - sql += "col4095 binary(69))" - tdSql.execute(sql) - tdSql.query("select * from regular_table_5") - tdSql.checkCols(4096) - # TD-5324 - sql = "alter table regular_table_5 modify column col4095 binary(70); " - tdSql.error(sql) - - # drop and add - sql = "alter table regular_table_5 drop column col4095; " - tdSql.execute(sql) - sql = "select * from regular_table_5; " - tdSql.query(sql) - tdSql.checkCols(4095) - sql = "alter table regular_table_5 add column col4095 binary(70); " - tdSql.error(sql) - sql = "alter table regular_table_5 add column col4095 binary(69); " - tdSql.execute(sql) - sql = "select * from regular_table_5; " - tdSql.query(sql) - tdSql.checkCols(4096) - - #out TSDB_MAX_BYTES_PER_ROW 49151 - tdLog.info('test regular_table max bytes per row out 49151') - sql = "create table regular_table_6(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(20), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(34), " % (i + 1) - for i in range(4090,4094): - sql += "timestamp_%d timestamp, " % (i + 1) - sql += "col4095 binary(70))" - tdLog.info(len(sql)) - tdSql.error(sql) - - - print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============") - startTime = time.time() - sql = "create stable stable_1(ts timestamp, " - for i in range(4090): - sql += "col%d int, " % (i + 1) - sql += "col4091 binary(22))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " - tdLog.info(len(sql)) - tdSql.execute(sql) - sql = '''create table table_0 using stable_1 - tags('table_0' , '1' , '2' , '3' );''' - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into table_0 values(%d, " - for j in range(4090): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from table_0") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from table_0") - tdSql.checkRows(self.num) - tdSql.checkCols(4092) - - sql = '''create table table_1 using stable_1 - tags('table_1' , '1' , '2' , '3' );''' - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into table_1 values(%d, " - for j in range(2080): - sql += "'%d', " % random.randint(0,1000) - for j in range(2080,4080): - sql += "'%s', " % 'NULL' - for j in range(4080,4090): - sql += "'%s', " % random.randint(0,10000) - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from table_1") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from table_1") - tdSql.checkRows(self.num) - tdSql.checkCols(4092) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - #insert in order - tdLog.info('test insert in order') - for i in range(self.num): - sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 1000)) - time.sleep(1) - tdSql.query("select count(*) from table_1") - tdSql.checkData(0, 0, 2*self.num) - tdSql.query("select * from table_1") - tdSql.checkRows(2*self.num) - tdSql.checkCols(4092) - - #insert out of order - tdLog.info('test insert out of order') - for i in range(self.num): - sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,1000) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 2000)) - time.sleep(1) - tdSql.query("select count(*) from table_1") - tdSql.checkData(0, 0, 3*self.num) - tdSql.query("select * from table_1") - tdSql.checkRows(3*self.num) - tdSql.checkCols(4092) - - print("==============step5,stable table , mix data type==============") - sql = "create stable stable_3(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(4), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(10), " % (i + 1) - sql += "col4091 binary(22))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " - tdLog.info(len(sql)) - tdSql.execute(sql) - sql = '''create table table_30 using stable_3 - tags('table_30' , '1' , '2' , '3' );''' - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into table_30 values(%d, " - for j in range(500): - str = "'%s', " % random.randint(-2147483647,2147483647) - sql += str - for j in range(500,1000): - str = "'%s', " % random.randint(-32767,32767 ) - sql += str - for j in range(1000,1500): - str = "'%s', " % random.randint(-127,127) - sql += str - for j in range(1500,2000): - str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) - sql += str - for j in range(2000,2500): - str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) - sql += str - for j in range(2500,3000): - str = "'%s', " % random.choice(['true','false']) - sql += str - for j in range(3000,3500): - str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) - sql += str - for j in range(3500,3800): - str = "'%s', " % self.get_random_string(4) - sql += str - for j in range(3800,4090): - str = "'%s', " % self.get_random_string(10) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from table_30") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from table_30") - tdSql.checkRows(self.num) - tdSql.checkCols(4092) - - #insert null value - tdLog.info('test insert null value') - sql = '''create table table_31 using stable_3 - tags('table_31' , '1' , '2' , '3' );''' - tdSql.execute(sql) - - for i in range(self.num): - sql = "insert into table_31 values(%d, " - for j in range(2500): - str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) - sql += str - for j in range(2500,3000): - str = "'%s', " % random.choice(['true' ,'false']) - sql += str - for j in range(3000,3500): - str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) - sql += str - for j in range(3500,3800): - str = "'%s', " % self.get_random_string(4) - sql += str - for j in range(3800,4090): - str = "'%s', " % self.get_random_string(10) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i)) - time.sleep(1) - tdSql.query("select count(*) from table_31") - tdSql.checkData(0, 0, self.num) - tdSql.query("select * from table_31") - tdSql.checkRows(self.num) - tdSql.checkCols(4092) - - #insert in order - tdLog.info('test insert in order') - for i in range(self.num): - sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,100) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 1000)) - time.sleep(1) - tdSql.query("select count(*) from table_31") - tdSql.checkData(0, 0, 2*self.num) - tdSql.query("select * from table_31") - tdSql.checkRows(2*self.num) - tdSql.checkCols(4092) - - #insert out of order - tdLog.info('test insert out of order') - for i in range(self.num): - sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, " - for j in range(10): - str = "'%s', " % random.randint(0,100) - sql += str - sql += "'%s')" % self.get_random_string(22) - tdSql.execute(sql % (self.ts + i + 2000)) - time.sleep(1) - tdSql.query("select count(*) from table_31") - tdSql.checkData(0, 0, 3*self.num) - tdSql.query("select * from table_31") - tdSql.checkRows(3*self.num) - tdSql.checkCols(4092) - - #define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384 - #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] - tdLog.info('test super table max bytes per row 49151') - sql = "create table stable_4(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(20), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(34), " % (i + 1) - sql += "col4091 binary(101))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " - tdSql.execute(sql) - sql = '''create table table_40 using stable_4 - tags('table_40' , '1' , '2' , '3' );''' - tdSql.execute(sql) - tdSql.query("select * from table_40") - tdSql.checkCols(4092) - tdSql.query("describe table_40") - tdSql.checkRows(4096) - - tdLog.info('test super table drop and add column or tag') - sql = "alter stable stable_4 drop column col4091; " - tdSql.execute(sql) - sql = "select * from stable_4; " - tdSql.query(sql) - tdSql.checkCols(4095) - sql = "alter table stable_4 add column col4091 binary(102); " - tdSql.error(sql) - sql = "alter table stable_4 add column col4091 binary(101); " - tdSql.execute(sql) - sql = "select * from stable_4; " - tdSql.query(sql) - tdSql.checkCols(4096) - - sql = "alter stable stable_4 drop tag tag_1; " - tdSql.execute(sql) - sql = "select * from stable_4; " - tdSql.query(sql) - tdSql.checkCols(4095) - sql = "alter table stable_4 add tag tag_1 int; " - tdSql.execute(sql) - sql = "select * from stable_4; " - tdSql.query(sql) - tdSql.checkCols(4096) - sql = "alter table stable_4 add tag loc1 nchar(10); " - tdSql.error(sql) - - tdLog.info('test super table max bytes per row 49151') - sql = "create table stable_5(ts timestamp, " - for i in range(500): - sql += "int_%d int, " % (i + 1) - for i in range(500,1000): - sql += "smallint_%d smallint, " % (i + 1) - for i in range(1000,1500): - sql += "tinyint_%d tinyint, " % (i + 1) - for i in range(1500,2000): - sql += "double_%d double, " % (i + 1) - for i in range(2000,2500): - sql += "float_%d float, " % (i + 1) - for i in range(2500,3000): - sql += "bool_%d bool, " % (i + 1) - for i in range(3000,3500): - sql += "bigint_%d bigint, " % (i + 1) - for i in range(3500,3800): - sql += "nchar_%d nchar(20), " % (i + 1) - for i in range(3800,4090): - sql += "binary_%d binary(34), " % (i + 1) - sql += "col4091 binary(102))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " - tdSql.error(sql) - - print("==============step6, super table error col ==============") - tdLog.info('test exceeds row num') - # column + tag > 4096 - sql = "create stable stable_2(ts timestamp, " - for i in range(4091): - sql += "col%d int, " % (i + 1) - sql += "col4092 binary(22))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " - tdLog.info(len(sql)) - tdSql.error(sql) - - # column + tag > 4096 - sql = "create stable stable_2(ts timestamp, " - for i in range(4090): - sql += "col%d int, " % (i + 1) - sql += "col4091 binary(22))" - sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) " - tdLog.info(len(sql)) - tdSql.error(sql) - - # alter column + tag > 4096 - sql = "alter table stable_1 add column max int; " - tdSql.error(sql) - # TD-5322 - sql = "alter table stable_1 add tag max int; " - tdSql.error(sql) - # TD-5324 - sql = "alter table stable_4 modify column col4091 binary(102); " - tdSql.error(sql) - sql = "alter table stable_4 modify tag loc nchar(20); " - tdSql.query("select * from table_40") - tdSql.checkCols(4092) - tdSql.query("describe table_40") - tdSql.checkRows(4096) - - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv deleted file mode 100755 index 5b30be5b4c4d5c323141097af6207ffb8bb93449..0000000000000000000000000000000000000000 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv +++ /dev/null @@ -1,3 +0,0 @@ -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091 -1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL -1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,NULL,NULL,NULL,NULL,NULL,NULL,5,NULL,NULL,6,NULL,NULL,NULL,7,NULL,NULL,NULL,8,NULL,NULL,NULL,9,NULL,NULL,10 \ No newline at end of file diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json deleted file mode 100755 index d7225dfd129c76db181cfc93789ac0f7a535d0fa..0000000000000000000000000000000000000000 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json +++ /dev/null @@ -1,137 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 10, - "num_of_records_per_req": 1, - "max_sql_len": 102400000, - "databases": [{ - "dbinfo": { - "name": "json_test", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 50, - "blocks": 8, - "precision": "ms", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb_old", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb_old_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 2, - "childtable_limit": 0, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv", - "tags_file": "", - "columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] - },{ - "name": "stb_new", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb_new_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 2, - "childtable_limit": 0, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./5-taos-tools/taosbenchmark/sample.csv", - "tags_file": "", - "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}] - },{ - "name": "stb_mix", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb_mix_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 2, - "childtable_limit": 0, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./5-taos-tools/taosbenchmark/sample.csv", - "tags_file": "", - "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], - "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] - },{ - "name": "stb_excel", - "child_table_exists":"no", - "childtable_count": 2, - "childtable_prefix": "stb_excel_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "sample", - "insert_mode": "taosc", - "insert_rows": 2, - "childtable_limit": 0, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv", - "tags_file": "", - "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], - "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] - }] - }] -} diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py deleted file mode 100755 index 56b51f5498aed0a540a86bf03625266ad3599b58..0000000000000000000000000000000000000000 --- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py +++ /dev/null @@ -1,176 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -import time -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - - #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force - #regular old && new - startTime = time.time() - os.system("%staosBenchmark -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath) - tdSql.execute("use regular_old") - tdSql.query("show tables;") - tdSql.checkRows(1) - tdSql.query("select * from d0;") - tdSql.checkCols(1024) - tdSql.query("describe d0;") - tdSql.checkRows(1024) - - os.system("%staosBenchmark -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) - tdSql.execute("use regular_new") - tdSql.query("show tables;") - tdSql.checkRows(1) - tdSql.query("select * from d0;") - tdSql.checkCols(4096) - tdSql.query("describe d0;") - tdSql.checkRows(4096) - - #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force - os.system("%staosBenchmark -d super_old -t 1 -n 10 -l 1021 -y" % binPath) - tdSql.execute("use super_old") - tdSql.query("show tables;") - tdSql.checkRows(1) - tdSql.query("select * from meters;") - tdSql.checkCols(1024) - tdSql.query("select * from d0;") - tdSql.checkCols(1022) - tdSql.query("describe meters;") - tdSql.checkRows(1024) - tdSql.query("describe d0;") - tdSql.checkRows(1024) - - os.system("%staosBenchmark -d super_new -t 1 -n 10 -l 4093 -y" % binPath) - tdSql.execute("use super_new") - tdSql.query("show tables;") - tdSql.checkRows(1) - tdSql.query("select * from meters;") - tdSql.checkCols(4096) - tdSql.query("select * from d0;") - tdSql.checkCols(4094) - tdSql.query("describe meters;") - tdSql.checkRows(4096) - tdSql.query("describe d0;") - tdSql.checkRows(4096) - tdSql.execute("create table stb_new1_1 using meters tags(1,2)") - tdSql.query("select * from stb_new1_1") - tdSql.checkCols(4094) - tdSql.query("describe stb_new1_1;") - tdSql.checkRows(4096) - - # insert: create one or mutiple tables per sql and insert multiple rows per sql - # test case for https://jira.taosdata.com:18080/browse/TD-5213 - os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) - tdSql.execute("use json_test") - tdSql.query("select count (tbname) from stb_old") - tdSql.checkData(0, 0, 1) - - tdSql.query("select * from stb_old") - tdSql.checkRows(10) - tdSql.checkCols(1024) - - tdSql.query("select count (tbname) from stb_new") - tdSql.checkData(0, 0, 1) - - tdSql.query("select * from stb_new") - tdSql.checkRows(10) - tdSql.checkCols(4096) - tdSql.query("describe stb_new;") - tdSql.checkRows(4096) - tdSql.query("select * from stb_new_0") - tdSql.checkRows(10) - tdSql.checkCols(4091) - tdSql.query("describe stb_new_0;") - tdSql.checkRows(4096) - tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)") - tdSql.query("select * from stb_new1_1") - tdSql.checkCols(4091) - tdSql.query("describe stb_new1_1;") - tdSql.checkRows(4096) - - tdSql.query("select count (tbname) from stb_mix") - tdSql.checkData(0, 0, 1) - - tdSql.query("select * from stb_mix") - tdSql.checkRows(10) - tdSql.checkCols(4096) - tdSql.query("describe stb_mix;") - tdSql.checkRows(4096) - tdSql.query("select * from stb_mix_0") - tdSql.checkRows(10) - tdSql.checkCols(4092) - tdSql.query("describe stb_mix_0;") - tdSql.checkRows(4096) - - tdSql.query("select count (tbname) from stb_excel") - tdSql.checkData(0, 0, 1) - - tdSql.query("select * from stb_excel") - tdSql.checkRows(10) - tdSql.checkCols(4096) - tdSql.query("describe stb_excel;") - tdSql.checkRows(4096) - tdSql.query("select * from stb_excel_0") - tdSql.checkRows(10) - tdSql.checkCols(4092) - tdSql.query("describe stb_excel_0;") - tdSql.checkRows(4096) - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - - os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tnt1r.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tnt1r.json index d73719ebe41c5f25fc2cd585bc9974d9e83a946e..f379fe61bf3e799eb5315cf5f41bd5158de29b6b 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tnt1r.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tnt1r.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tntmr.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tntmr.json index e10fd1116b948032d5aa67dc0844bbf493d650de..142098865062b0b6489a1aae18b6492f3e4b129b 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tntmr.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-1s1tntmr.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-allDataType.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-allDataType.json index a7ada9b84e2bb534eac63364039598d1ddb4c744..1e714c081321c6fc7005d10e49211a4cf10e44b9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-allDataType.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-allDataType.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese-sml.json index 49407a76d76edda3c45716134521265114702f11..3633bb64820bc49cedaf3c0964a0384b34e38a32 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese-sml.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese.json index ab848b1317049f672775ec0cc6d1f6c3cd78760e..88ace597784901c8865ee539517a39757e722231 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-chinese.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-disorder.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-disorder.json index d6420b100e5ad2bae887b3ae5fb5cc0f306d9762..2ae3d6c1ceb91f59ee1d7661b7c5f816d8465496 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-disorder.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-disorder.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-N00.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-N00.json index 2c3b8c6f81962e02ff5aac37c58fb04b79159a7c..fc2cf160a4ee51ad020bd8aa0bd4a2dc6ee0b95c 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-N00.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-N00.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-Y00.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-Y00.json index f8fe21a6c4015a27ee663bc7ac54a7889af62add..39e4b3bbc8900f4aa91aaf3472097f363568e2df 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-Y00.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-drop-exist-auto-Y00.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-illegal.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-illegal.json index c56f8f30402aa948828377b46e5cf8678a3b3472..920eed645608002435d335ece4527cfdcf06d4ae 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-illegal.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-illegal.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-interlace-row.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-interlace-row.json index 93bb92764d3e4ba141a8b8c9b2df4fda69cb9eaa..a40c17d1f94f2b43de2cee7d2c83a8b0f29156f7 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-interlace-row.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-interlace-row.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-interval-speed.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-interval-speed.json index d51dee428fc8f1bb61af84d5f570f69cce344651..ae15b41e4fc464e6cd4932c63c3d1df440ff6fd5 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-interval-speed.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-interval-speed.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-newdb.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-newdb.json index 05a6f7606a22f7c4712ed7c1a4452c43c87f5428..4386b7a7ee9fa21bfcfb115dccb2e72b509e3c80 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-newdb.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-newdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-newtable.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-newtable.json index 02b56bbfe8a5e0900467e0dc0537919465a406a7..a87e257ff94d342b83a39b19c1bffd08be35ffe5 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-newtable.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-newtable.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-nodbnodrop.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-nodbnodrop.json index 5978e5529f8d3a3b29cb04f1744a045b56e7e5ba..44707e87484d0cd898fc35cbb3b0d561072a59cf 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-nodbnodrop.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-nodbnodrop.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-offset.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-offset.json index 53edf41072a93b907da8af6648dab03691e039a8..351a2b38d59fa2569d60a2fc48c3c2cf81ce6c68 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-offset.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-offset.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-renewdb.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-renewdb.json index 91c033c67711e0713f65a08a48351288470d565e..de023d15a2b9654df5999940ed04a46e4eeabd27 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-renewdb.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-renewdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-sample-ts.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-sample-ts.json index 344293b5558f3d0c95d417fbd60009f767467f9d..6d37233b975ab67cf8ca30f1cfc9268e3b449400 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-sample-ts.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-sample-ts.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-sample.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-sample.json index c31099913b2a14543bca32db646c204498e3fe5c..d251dafe4ba07c6a28cf3979e52ee77b1d630ef5 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-sample.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-sample.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert-timestep.json b/tests/system-test/5-taos-tools/taosbenchmark/insert-timestep.json index c794c73c843607a7ef6bb84b288ac890a317bfa9..059643a851faefa30c0345aac52e0779de2a7fa6 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert-timestep.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert-timestep.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151-error.json b/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151-error.json index be55d31d5595b210695584f6dbbc334bb7b7f8e6..7f16fa74ec383df86360ebe3fbb2f956f81e8d90 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151-error.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151-error.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151.json b/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151.json index 67abdc67eed813501e012c8b7dce5d0719d22eb6..ffcd49e32e1774b70d7430f3b75ef4347c4eea94 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertBinaryLenLarge16374AllcolLar49151.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertChildTab0.json b/tests/system-test/5-taos-tools/taosbenchmark/insertChildTab0.json index 84aa75eca7ac5eaabfeef715471e9b91ee66dfec..99f89eb7dc82b14e0f4fe3d8543326ad22a19ff9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertChildTab0.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertChildTab0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertChildTabLess0.json b/tests/system-test/5-taos-tools/taosbenchmark/insertChildTabLess0.json index 58acd9bbd022bb55ef573f9a7e9434ed935b55bc..68fa2acf634140643014a028f8377372a78598f1 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertChildTabLess0.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertChildTabLess0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNum4096.json b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNum4096.json index ecc62b7251d4b74d163c2d2cd116c0d1b3e80c5c..a96422fc52c2b3d2ffe19b2435f0770074f74ee5 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNum4096.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNum4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNumLarge4096.json b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNumLarge4096.json index fe6f22527d46418d96c7dbdd61336254bf137200..18ea19682ced9b0757bd019f73a85d875471388f 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNumLarge4096.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsAndTagNumLarge4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsNum0.json b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsNum0.json index 52d6ae029de4a2c019545ac047526638237d701e..867152a6030712d8385449f5def8723a57f23f0d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsNum0.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertColumnsNum0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertInterlaceRowsLarge1M.json b/tests/system-test/5-taos-tools/taosbenchmark/insertInterlaceRowsLarge1M.json index 0c82f1d299f4dbf888c3a5033c283cc1beb2bbeb..aaa5812c49cc14802d84595c019aebb42b4fb62f 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertInterlaceRowsLarge1M.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertInterlaceRowsLarge1M.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertMaxNumPerReq.json b/tests/system-test/5-taos-tools/taosbenchmark/insertMaxNumPerReq.json index 1166ac36438babefbe0d0de70d5a5e3f088f055f..686a2cc4f108d4439ac26a7f7cdeeee291e7314b 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertMaxNumPerReq.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertMaxNumPerReq.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReq0.json b/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReq0.json index 8247c5f0158e5cce4d3891dc88048e4a29a3d888..def5043d4fbe56c8606126b8a997caa0ca193e3f 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReq0.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReq0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReqless0.json b/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReqless0.json index 138ebbadf63d16816e723462693684cfd2e4c2c0..f1f4b7f3c66bc5072beac1aac18c9d69c3df72f4 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReqless0.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertNumOfrecordPerReqless0.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertRestful.json b/tests/system-test/5-taos-tools/taosbenchmark/insertRestful.json index 682dcf2ce4393815590552e935578df26bb8f43c..cb90c1f89898878b668d052e3eee71aa0c8d01a9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertRestful.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertRestful.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertSigcolumnsNum4096.json b/tests/system-test/5-taos-tools/taosbenchmark/insertSigcolumnsNum4096.json index e8468f5906a7ebdef62f6509a8968a0df7bdd775..8b8f959e05b8189e1ae8e0dc038522709f4c9e10 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertSigcolumnsNum4096.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertSigcolumnsNum4096.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertTagsNumLarge128.json b/tests/system-test/5-taos-tools/taosbenchmark/insertTagsNumLarge128.json index 4dbe2940e2c7954e6b41a8f645d9e8d809d013d6..4480cf47d3476927eecc59098962caf33de27988 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insertTagsNumLarge128.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insertTagsNumLarge128.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insert_5M_rows.json b/tests/system-test/5-taos-tools/taosbenchmark/insert_5M_rows.json index 65973ccb485585de689f5e44a3bca28b675732b4..ae820815257f4fd3f33130c2829eb0ba9c4e47ae 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/insert_5M_rows.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/insert_5M_rows.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/manual_block1_comp.json b/tests/system-test/5-taos-tools/taosbenchmark/manual_block1_comp.json index a1a28c9ee970c9db1f21ace18dd7b8f54f39e5ed..5c8dc689ae00b536719e33d27732377362cbc128 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/manual_block1_comp.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/manual_block1_comp.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/manual_block2.json b/tests/system-test/5-taos-tools/taosbenchmark/manual_block2.json index 03f6e038fb4072f64569e65e91f86ccd8ce5f86e..c92c18b025e045018512baf2fd879e859324dd93 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/manual_block2.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/manual_block2.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_A.json b/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_A.json index 7b8abd6d4e25991d38ff16c737bf8169c7311318..0f7786e6822580d842a91c6f9b498c2ab401b07b 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_A.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_A.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_B.json b/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_B.json index aeee6322e5c0e6b58c0433be5f345e7c4f84f339..f8decfca417ea2a86ac1065334169841694df68c 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_B.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/manual_change_time_1_1_B.json @@ -25,7 +25,6 @@ "minRows": 1000, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit1.json b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit1.json index e30b7b0b1c6a136aa45c91da165ff8101eeb42e3..f166d461fec3d84f5c2fb4b295272529818ab35a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit1.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit1.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit5.json b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit5.json index d4ce2fee46d8848f574d75173818bff819c1d31f..ebbbc001f9da6c384f6cdf2eb4905ea6933e6f58 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit5.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit5.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit94.json b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit94.json index ce12accf06c101956ec6a9d025e63bb1814acbd0..a18e1e0e1a36af7ea38eb159471c996c368c981d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit94.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-limit94.json @@ -27,7 +27,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-newdb.json b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-newdb.json index 9ffb2953d3c46df5a6cbd4e6042748185254e62a..4b246a93d731c47e78d01e280fe777cb5d54e397 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-newdb.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/moredemo-offset-newdb.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/query-interrupt.json b/tests/system-test/5-taos-tools/taosbenchmark/query-interrupt.json index 896e484c258ed4f1418f48a74cd643defc9c6731..8857d5adae2b5ea808e1044fd28512b4562d597b 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/query-interrupt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/query-interrupt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/queryInsertdata.json b/tests/system-test/5-taos-tools/taosbenchmark/queryInsertdata.json index eb196e4096d26f429f013a8936c910e5dc86c304..756316621d958693c16a5ced6e5882305cf88dcc 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/queryInsertdata.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/queryInsertdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/queryInsertrestdata.json b/tests/system-test/5-taos-tools/taosbenchmark/queryInsertrestdata.json index 0febbdfa19d2ba8dd4db0b318d05c5af18fd1584..0073f52e2bd4dbad211fcfd30ab4e95e3a215401 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/queryInsertrestdata.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/queryInsertrestdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tnt1r-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tnt1r-sml.json index 5cd06c02759ddcba93eaa8ef4ef848a9b645cbda..8e96931e523b60a65ce59be79e038472fd4fe929 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tnt1r-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tnt1r-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tntmr-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tntmr-sml.json index 0885e01782b41079ccbfb7a30a8b4d3628ba9c20..5042549f09201a71ae9bc907ba2ac162acf4c382 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tntmr-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-1s1tntmr-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-allDataType-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-allDataType-sml.json index cbd4f6cb59c1ddd146b42a233c740d6bbaca45d3..0de5ddcc26840162b6e1264266ce4a3de0bd20ab 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-allDataType-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-allDataType-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-disorder-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-disorder-sml.json index 6f24801cb04f9f515e33898fb587b95029def325..57006fcc3c1a75d7f2064a10b4174f6ecd46a167 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-disorder-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-disorder-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-N00-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-N00-sml.json index 92e6ec0df7a70329312676298c3b5ffccc2a8767..dcca0f82aef7ed856a61a54ebb28d6ea3c1eccde 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-N00-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-N00-sml.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-Y00-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-Y00-sml.json index c09493ec7b892baba37a7be4addb0ce526752f07..cdfc5cb26d7a89add24b12fb0731aeaab16d3690 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-Y00-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-drop-exist-auto-Y00-sml.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interlace-row-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interlace-row-sml.json index e04f2ff5e7cb24cb5384b7451712b3fe83bf18c3..caf9a9466b63c0336add8f52f8dd4b83dc87ad3a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interlace-row-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interlace-row-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interval-speed-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interval-speed-sml.json index 4a4227adb8fdcd0cb025a10c5b6f417c921acd96..564f2405e3e008dae7c7dbdf14519860ea5acf25 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interval-speed-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-interval-speed-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newdb-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newdb-sml.json index 1d29842e02c654987c50e6e73d4aec5eed48aa83..f0a84487d5e3a7fd1a52f655c622d23a8495bf0c 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newdb-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newdb-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newtable-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newtable-sml.json index 886503a950ca18b752bfa264218bb8564ce44ae0..ac5ba1dc5ff5b6fb3347cacf1ee276871733d226 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newtable-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-newtable-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-nodbnodrop-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-nodbnodrop-sml.json index ca99d135c5f466c911f3063b88fbb3e58c4e4ed4..50af8517bc19a3de655280a6159644cb15df7df5 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-nodbnodrop-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-nodbnodrop-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-offset-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-offset-sml.json index d0109b50cf449b0e7e1b258ae29723a560b1d2f6..d79ae2b0054e591c6ea40c90ed54072fc36b47a6 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-offset-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-offset-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-renewdb-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-renewdb-sml.json index f8f3a8ee5cea1834c31ebb275a10977cd960f829..459d47b114e872eb702bbc3fe782562fdc5f4086 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-renewdb-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-renewdb-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sample-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sample-sml.json index a778c4860bfb92c2342409d17f25ef6d46fec707..af214a232205f861a57a41a81d42464bf90c7639 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sample-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sample-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-json-alltype.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-json-alltype.json index 66885ebab89f7221830e66d642ca17b99de0e397..eca27390c6b2c2a93c8e9c7a2222e8785913637e 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-json-alltype.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-json-alltype.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-telnet-alltype.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-telnet-alltype.json index c9fa0f6fb0ddc777159b5d13f324c65b23cabd0d..6c780edd1537dcd56f56a4d3ee30d70cc4326835 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-telnet-alltype.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-telnet-alltype.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-timestamp.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-timestamp.json index 4e8ff40cfdb7650f9d82635ac5be42f67904158a..e7704b87fe677abacdf94c71724cf15177a01790 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-timestamp.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-sml-timestamp.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-timestep-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-timestep-sml.json index 1d496b6b46bf3df3c4312bacafbfb77125491058..1d0490f539ec5ff80e76bb7966bbe5452d5521df 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-timestep-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insert-timestep-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json index c70db14b4c9b5fabe590eb8fec4a1f0e4dbc831a..723260c4228be73e21b01a37e50729dec24e4ce9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-error-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json index 12034adc0788f84852019d776fc0987cbc9c4f16..ba3586635b4d000638c72bb6a55d58d2b2fb2d48 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTab0-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTab0-sml.json index 4b27b6b4d01d930578f639638db7a3f277a4cada..b96357983df2f6b27986e251ae9d8cc3fe35b28d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTab0-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTab0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTabLess0-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTabLess0-sml.json index 8f27feba6be7e3018461b0070420cc759cf8fc72..3e37ca197f14d93f83f120fd610e62e0f47e7b2c 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTabLess0-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertChildTabLess0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNum4096-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNum4096-sml.json index 2e4063cf272ba18732f0e456362cb1103ba6d5c4..38477734e231196e65c3e78413033691b1d73ff2 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNum4096-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNum4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNumLarge4096-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNumLarge4096-sml.json index 83c08923c4b51aa25b3a41d1b6168b48b1bb680f..215d8f052afd632afc6c2f83c0278f128231648e 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNumLarge4096-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsAndTagNumLarge4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsNum0-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsNum0-sml.json index 92e88141ca09971d0d202ee488471c14e07d4cd3..0804acbae0db9c22972ab4107d8d2ddd0f1ed130 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsNum0-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertColumnsNum0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertInterlaceRowsLarge1M-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertInterlaceRowsLarge1M-sml.json index 18f1a39e0afcdae3d52e4bc4a4a97e15dbcfda37..73845c2dc5042926e7e60ae8ea501ee326bc84f2 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertInterlaceRowsLarge1M-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertInterlaceRowsLarge1M-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml-telnet.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml-telnet.json index 01ec546012ad04f94cfb6224048fffd89d5cbbc8..b3a113ad38447ce0df6c6e685edc046ff5bea86f 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml-telnet.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml-telnet.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml.json index d950a260f6ed3ad4a9ed53bc859304a71e5a680a..33e61b7d2052ed136911face5356d4dc911eb975 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertMaxNumPerReq-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReq0-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReq0-sml.json index 0deed5ba5420a1dd9a1efddbb6e1e7a757dc10d0..aff3190e1a24a5961d3e887dbd680b74b87ad141 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReq0-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReq0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReqless0-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReqless0-sml.json index 9d1d1ee71898d5e80a7310822da00de6c4636746..b9f11954571ab52c212b24cb6ee23f382412968a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReqless0-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertNumOfrecordPerReqless0-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertSigcolumnsNum4096-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertSigcolumnsNum4096-sml.json index e15edfdf445eeddeff111a625b6f80e16189b8dc..3db1de723f56e64e5c9160a268315f6ba409a64d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertSigcolumnsNum4096-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertSigcolumnsNum4096-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertTagsNumLarge128-sml.json b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertTagsNumLarge128-sml.json index 885aafd8840fdf3e7572e633c971540e05252e53..28a7251216961a561eddca90358ae5e44db618b4 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/sml/insertTagsNumLarge128-sml.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/sml/insertTagsNumLarge128-sml.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json index f86eaedd142f5ea02a7bdeb45492c6bbfdec6a1a..98a2dd5ae62b010d3c7b4e8bf7707e29847aaf21 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json index be7421cd498216d778f6cdb71a71841a57b7aa54..2a056b46e5a5c0be6971d955396ac08ef5fb2b49 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-allDataType-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-allDataType-stmt.json index 46a0832612ff0f3db489b1917ff3b2c53606b2de..134e4755b53bc06711b0f6138f74977ccc69efe0 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-allDataType-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-allDataType-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-disorder-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-disorder-stmt.json index e7501804211c60767e073f98865a6ee9d719901f..bc948974b6e60d73520b30c9d03b0cb038e899b9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-disorder-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-disorder-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-N00-stmt.json index 2712f885936c12c1cf7742376ea541fd12e55cd4..c09d5cfeb3cab11007be07bb74c89897ad2b11ec 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-N00-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-N00-stmt.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-Y00-stmt.json index f8fe21a6c4015a27ee663bc7ac54a7889af62add..39e4b3bbc8900f4aa91aaf3472097f363568e2df 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-Y00-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-drop-exist-auto-Y00-stmt.json @@ -25,7 +25,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interlace-row-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interlace-row-stmt.json index 45eb612e6f2efcedfe9de8d5f6cb4aeb3a464353..4c5e90f1850cf7ee2f35b7e81179bfef797d3aef 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interlace-row-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interlace-row-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json index 14fafd60d5d69660e9c65bb3d498e4cbca4759da..93e41ea575f17a2310111f4e8deb32d45a9c8978 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json index 622b2554ec37b223226fcab3ad3e01568937fc0f..f0ad1b4a5f796e222f9d32ccabdaeb757408947a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json index 31985c85460cf39cc926afdc3c614fb84a45bd4b..15d2753c4b23fc83e40a9317eff95b3945adddfd 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json index 3ebc377ca79d5cf472c102f23736960d757636e1..d636c95a9461f6ccea7b21e792ed0034fd8d214d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json index adc6fa74bee9441999b83196726c2a133da7c24d..263a592dfadcd811f69071c7e1f9efc3a1dc3520 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json index 715644f4f062d166e67f3038bacb903a26fbf93d..04165f16b1c70d9b3e6de547c37ffdb85388a6e6 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-stmt.json index e3d6ce850aeae242a5ac857cc02a9123845debb7..cc4d180fb5ff3537c71164b6dd288e98503bda08 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-ts-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-ts-stmt.json index 344293b5558f3d0c95d417fbd60009f767467f9d..6d37233b975ab67cf8ca30f1cfc9268e3b449400 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-ts-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-sample-ts-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-timestep-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-timestep-stmt.json index 563dc86d0a1481e6b117766facf2122c75bd20f2..ffe16eccd195d7bd9d12bc09a2959d57a037513d 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-timestep-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insert-timestep-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json index f59d2e4e22e165ddf1adf8b95212d521a75737d9..37714edc74e4469dcceb13934459d3d0df13c6a4 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-error-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json index 4903335d181a0f06a0f0714072301438883f0f6e..4625da3a6b6c3d7024aab08a23c8d692291a0efe 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTab0-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTab0-stmt.json index a27feee68a7700633197791567647875e6febee4..8f5b62be9b013ed8de5220cbc16eb2f83193760e 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTab0-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTab0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTabLess0-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTabLess0-stmt.json index 50e1a7173b0b708b454559c3a718e48900467c5a..a30c3f7c781375c170cab702f05b46a141320400 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTabLess0-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertChildTabLess0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNum4096-stmt.json index ca0d17f93ba503f3b532aa2cb9245282c540c507..2966af8f238f668d5e2114197135ce831814eafe 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNum4096-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNum4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNumLarge4096-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNumLarge4096-stmt.json index c5a3a5f76de18589f3271287a78510e39acfb27f..40780dd992ee5c93507a5f19d503c73f7c78062a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNumLarge4096-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsAndTagNumLarge4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsNum0-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsNum0-stmt.json index c86e759db4377d05a2e4ec1b1b2bc4144f5689e4..cada61687e11ed251755c7e5f7c5f2d3d23dbbe3 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsNum0-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertColumnsNum0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertInterlaceRowsLarge1M-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertInterlaceRowsLarge1M-stmt.json index ee36b62f903a2d27b24b55eba9a10146d45080ee..87386853b37ebfb3a45361620cdf7d9de395df8e 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertInterlaceRowsLarge1M-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertInterlaceRowsLarge1M-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertMaxNumPerReq-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertMaxNumPerReq-stmt.json index 25086c856e72006ad579641b08858622b2209188..9e213b52a46cc183dbe7e2879ab777cacbe1fecf 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertMaxNumPerReq-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertMaxNumPerReq-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReq0-stmt.json index 4bd071ec15a56feb1ea2b119697f934620d6b8c2..5b4bfbae65dec700a9927c14705f1df47571eff3 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReq0-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReq0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReqless0-stmt.json index 628c86045fa4a33f5d2e93882ca3b56dbfc91292..efc01bb9e611a9463531c653a9912878832f8fa9 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReqless0-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertNumOfrecordPerReqless0-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertSigcolumnsNum4096-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertSigcolumnsNum4096-stmt.json index 7abab6a0cf00d3161bb85114cb07eb39d7f7a747..e6224159773ed04da1e08a696e5d65db7ee2cca7 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertSigcolumnsNum4096-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertSigcolumnsNum4096-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertTagsNumLarge128-stmt.json b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertTagsNumLarge128-stmt.json index 8f8539be2117f8706f894f92b2075848b0203216..51ac878c3794ae358e9ebd4dddc03abe1f855b26 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertTagsNumLarge128-stmt.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/stmt/insertTagsNumLarge128-stmt.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/subInsertdata.json b/tests/system-test/5-taos-tools/taosbenchmark/subInsertdata.json index 168b3753a13e6bfa2e884f5b8be4a03bb1675b2a..57e823f74a8160e12e6e3331bf718077ba5f0b43 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/subInsertdata.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/subInsertdata.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/subInsertdataMaxsql100.json b/tests/system-test/5-taos-tools/taosbenchmark/subInsertdataMaxsql100.json index 4fb7241012563143cf289f510a8b58f39841b9d0..18487defcce2224491cfe57ec7e8e3649f3849c3 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/subInsertdataMaxsql100.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/subInsertdataMaxsql100.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoInsertNanoDB.json b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoInsertNanoDB.json index 99233bdd738d068664241efda40d96c5a6fc7090..2c6e6260ff820acb0df2b357d78261648f400e1c 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoInsertNanoDB.json +++ b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoInsertNanoDB.json @@ -26,7 +26,6 @@ "minRows": 100, "maxRows": 4096, "comp":2, - "walLevel":1, "cachelast":0, "quorum":1, "fsync":3000, diff --git a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJson-childTable.py b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJson-childTable.py index 56d402baa16ddbe2f5bd277873edd5200674272d..1729f4201c4cc3490c85fd4183c8b543757c8e73 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJson-childTable.py +++ b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJson-childTable.py @@ -27,37 +27,43 @@ class TDTestCase: def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] else: - projPath = selfPath[:selfPath.find("tests")] + projPath = selfPath[: selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if "taosd" in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + if "packaging" not in rootRealPath: + buildPath = root[: len(root) - len("/build/bin")] break return buildPath def run(self): buildPath = self.getBuildPath() - if (buildPath == ""): + if buildPath == "": tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath + "/build/bin/" testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert*_res.txt*") - os.system("rm -rf 5-taos-tools/taosbenchmark/%s.sql" % testcaseFilename ) + os.system("rm -rf 5-taos-tools/taosbenchmark/%s.sql" % testcaseFilename) # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-nodbnodrop.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-nodbnodrop.json -y" + % binPath + ) tdSql.error("show dbno.stables") - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-newdb.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-newdb.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 5) @@ -69,7 +75,10 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") tdSql.checkData(0, 0, 8) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-offset.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-offset.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -81,19 +90,25 @@ class TDTestCase: tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-newtable.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-newtable.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") - tdSql.checkData(0, 0, 340) + tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") - tdSql.checkData(0, 0, 400) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-renewdb.json -y" % binPath) + tdSql.checkData(0, 0, 160) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/insert-renewdb.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -105,15 +120,10 @@ class TDTestCase: tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) - + # rm useless files os.system("rm -rf ./insert*_res.txt*") - - - - - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonStmt.py b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonStmt.py index d470dee21491ad3ba1db6ba190e6e5366883e12b..885d580a9eb9784fc3bf5d442d59944c8fc0347a 100644 --- a/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonStmt.py +++ b/tests/system-test/5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonStmt.py @@ -23,33 +23,36 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] else: - projPath = selfPath[:selfPath.find("tests")] + projPath = selfPath[: selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if "taosd" in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + if "packaging" not in rootRealPath: + buildPath = root[: len(root) - len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() - if (buildPath == ""): + if buildPath == "": tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath + "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json -y " % binPath) + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-1s1tnt1r-stmt.json -y " + % binPath + ) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) @@ -62,50 +65,61 @@ class TDTestCase: tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 4000) + tdSql.checkData(0, 0, 4000) - - # insert: create mutiple tables per sql and insert one rows per sql . - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json -y " % binPath) + # insert: create mutiple tables per sql and insert one rows per sql . + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-1s1tntmr-stmt.json -y " + % binPath + ) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 100) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 1000) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 4000) + tdSql.checkData(0, 0, 4000) - # insert: using parament "insert_interval to controls spped of insert. + # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-interval-speed-stmt.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 100) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 1000) tdSql.query("show stables") tdSql.checkData(1, 4, 20) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 4000) - + tdSql.checkData(0, 0, 4000) + # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json -y" % binPath) + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-nodbnodrop-stmt.json -y" + % binPath + ) tdSql.error("show dbno.stables") - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json -y" % binPath) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-newdb-stmt.json -y" + % binPath + ) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 5) @@ -114,41 +128,50 @@ class TDTestCase: tdSql.query("select count (tbname) from stb2") tdSql.checkData(0, 0, 7) tdSql.query("select count (tbname) from stb3") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") - tdSql.checkData(0, 0, 8) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json -y" % binPath) - tdSql.execute("use db") + tdSql.checkData(0, 0, 8) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-offset-stmt.json -y" + % binPath + ) + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 240) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 220) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json -y" % binPath) - tdSql.execute("use db") + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-newtable-stmt.json -y" + % binPath + ) + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") - tdSql.checkData(0, 0, 340) + tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") - tdSql.checkData(0, 0, 400) - os.system("%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json -y" % binPath) - tdSql.execute("use db") + tdSql.checkData(0, 0, 160) + os.system( + "%staosBenchmark -f 5-taos-tools/taosbenchmark/stmt/insert-renewdb-stmt.json -y" + % binPath + ) + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 120) + tdSql.checkData(0, 0, 120) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 140) + tdSql.checkData(0, 0, 140) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from stb4") @@ -156,10 +179,8 @@ class TDTestCase: testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf 5-taos-tools/taosbenchmark/%s.sql" % testcaseFilename ) - - - + os.system("rm -rf 5-taos-tools/taosbenchmark/%s.sql" % testcaseFilename) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index e880f1e44690c117e7099cecf9e7f452003f441d..d1288213c5baa823d4ebd78e2b564de520a44aa6 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -143,13 +143,13 @@ char *simGetVariable(SScript *script, char *varName, int32_t varLen) { return var->varValue; } -int32_t simExecuteExpression(SScript *script, char *exp) { +int32_t simExecuteExpression(SScript *script, char *expr) { char * op1, *op2, *var1, *var2, *var3, *rest; int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; char t0[1024], t1[1024], t2[1024], t3[2048]; int32_t result; - rest = paGetToken(exp, &var1, &var1Len); + rest = paGetToken(expr, &var1, &var1Len); rest = paGetToken(rest, &op1, &op1Len); rest = paGetToken(rest, &var2, &var2Len); rest = paGetToken(rest, &op2, &op2Len); diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index 1acdcd2ac6eb0ecb66e2977dee7577393ed242ef..7de263000645cc3b5078ab96415620973149788a 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -251,11 +251,11 @@ SScript *simParseScript(char *fileName) { return script; } -int32_t simCheckExpression(char *exp) { +int32_t simCheckExpression(char *expr) { char * op1, *op2, *op, *rest; int32_t op1Len, op2Len, opLen; - rest = paGetToken(exp, &op1, &op1Len); + rest = paGetToken(expr, &op1, &op1Len); if (op1Len == 0) { sprintf(parseErr, "expression is required"); return -1; @@ -295,10 +295,10 @@ int32_t simCheckExpression(char *exp) { rest = paGetToken(rest, &op, &opLen); - if (opLen == 0) return (int32_t)(rest - exp); + if (opLen == 0) return (int32_t)(rest - expr); /* if it is key word "then" */ - if (strncmp(op, "then", 4) == 0) return (int32_t)(op - exp); + if (strncmp(op, "then", 4) == 0) return (int32_t)(op - expr); rest = paGetToken(rest, &op2, &op2Len); if (op2Len == 0) { @@ -312,7 +312,7 @@ int32_t simCheckExpression(char *exp) { } if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || op[0] == '.') { - return (int32_t)(rest - exp); + return (int32_t)(rest - expr); } return -1; diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 0879e371ef62fee81786728e2b980442567fbaa1..7569d3fc7ddf0708d093b92bb9896277d7134416 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -43,9 +43,9 @@ char *simParseArbitratorName(char *varName) { char *simParseHostName(char *varName) { static char hostName[140]; - int32_t index = atoi(varName + 8); + int32_t idx = atoi(varName + 8); int32_t port = 7100; - switch (index) { + switch (idx) { case 1: port = 7100; break;