diff --git a/.gitmodules b/.gitmodules index 7edcdff5d3dd805ec6b222915688940c7bd7dcb9..c890f52ad1c49439a6bee4a5e25bd333b053654e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ [submodule "src/connector/go"] path = src/connector/go url = https://github.com/taosdata/driver-go.git -[submodule "src/connector/grafanaplugin"] - path = src/connector/grafanaplugin - url = https://github.com/taosdata/grafanaplugin.git [submodule "src/connector/hivemq-tdengine-extension"] path = src/connector/hivemq-tdengine-extension url = https://github.com/taosdata/hivemq-tdengine-extension.git @@ -16,9 +13,9 @@ [submodule "deps/TSZ"] path = deps/TSZ url = https://github.com/taosdata/TSZ.git -[submodule "deps/avro"] - path = deps/avro - url = https://github.com/apache/avro +[submodule "src/kit/taos-tools"] + path = src/kit/taos-tools + url = https://github.com/taosdata/taos-tools [submodule "src/plugins/taosadapter"] path = src/plugins/taosadapter url = https://github.com/taosdata/taosadapter diff --git a/Jenkinsfile b/Jenkinsfile index 1767479054a50dc1a23ab4c63515699810a0c596..6cb8625cce86143a18595fb96ebaae5c83e6f568 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -38,7 +38,8 @@ def pre_test(){ sudo rmtaos || echo "taosd has not installed" ''' sh ''' - killall -9 taosd ||echo "no taosd running" + kill -9 $(pidof taosd) ||echo "no taosd running" + kill -9 $(pidof taosadapter) ||echo "no taosadapter running" killall -9 gdb || echo "no gdb running" killall -9 python3.8 || echo "no python program running" cd ${WKC} @@ -57,7 +58,7 @@ def pre_test(){ cd ${WKC} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WKC} @@ -67,6 +68,8 @@ def pre_test(){ } sh''' cd ${WKC} + git remote prune origin + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -87,28 +90,28 @@ def pre_test(){ cd ${WK} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WK} git checkout develop ''' - } + } } sh ''' cd ${WK} - git pull >/dev/null - + git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx mkdir debug cd debug - cmake .. > /dev/null + cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python/ + pip3 install ${WKC}/src/connector/python/ ''' return 1 } @@ -131,7 +134,7 @@ def pre_test_noinstall(){ cd ${WKC} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WKC} @@ -141,6 +144,8 @@ def pre_test_noinstall(){ } sh''' cd ${WKC} + git remote prune origin + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -161,24 +166,24 @@ def pre_test_noinstall(){ cd ${WK} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WK} git checkout develop ''' - } + } } sh ''' cd ${WK} - git pull >/dev/null - + git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx mkdir debug cd debug - cmake .. > /dev/null + cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=false > /dev/null make ''' return 1 @@ -202,7 +207,7 @@ def pre_test_mac(){ cd ${WKC} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WKC} @@ -212,6 +217,8 @@ def pre_test_mac(){ } sh''' cd ${WKC} + git remote prune origin + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -232,24 +239,26 @@ def pre_test_mac(){ cd ${WK} git checkout 2.0 ''' - } + } else{ sh ''' cd ${WK} git checkout develop ''' - } + } } sh ''' cd ${WK} - git pull >/dev/null - + git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx mkdir debug cd debug - cmake .. > /dev/null + cmake .. -DBUILD_TOOLS=false > /dev/null + go env -w GOPROXY=https://goproxy.cn,direct + go env -w GO111MODULE=on cmake --build . ''' return 1 @@ -262,7 +271,7 @@ def pre_test_win(){ cd C:\\workspace\\TDinternal rd /s /Q C:\\workspace\\TDinternal\\debug cd C:\\workspace\\TDinternal\\community - git reset --hard HEAD~10 + git reset --hard HEAD~10 ''' script { if (env.CHANGE_TARGET == 'master') { @@ -276,7 +285,7 @@ def pre_test_win(){ cd C:\\workspace\\TDinternal\\community git checkout 2.0 ''' - } + } else{ bat ''' cd C:\\workspace\\TDinternal\\community @@ -286,7 +295,8 @@ def pre_test_win(){ } bat''' cd C:\\workspace\\TDinternal\\community - git pull + git remote prune origin + git pull git fetch origin +refs/pull/%CHANGE_ID%/merge git checkout -qf FETCH_HEAD git clean -dfx @@ -306,36 +316,36 @@ def pre_test_win(){ cd C:\\workspace\\TDinternal git checkout 2.0 ''' - } + } else{ bat ''' cd C:\\workspace\\TDinternal git checkout develop ''' - } + } } bat ''' cd C:\\workspace\\TDinternal - git pull + git pull date git clean -dfx mkdir debug cd debug call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" amd64 - cmake ../ -G "NMake Makefiles" - set CL=/MP4 nmake nmake || exit 8 + cmake ../ -G "NMake Makefiles" + set CL=/MP nmake nmake || exit 8 nmake install || exit 8 xcopy /e/y/i/f C:\\workspace\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 || exit 8 cd C:\\workspace\\TDinternal\\community\\src\\connector\\python python -m pip install . - + ''' return 1 } pipeline { agent none - options { skipDefaultCheckout() } + options { skipDefaultCheckout() } environment{ WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' @@ -343,7 +353,7 @@ pipeline { stages { stage('pre_build'){ agent{label 'master'} - options { skipDefaultCheckout() } + options { skipDefaultCheckout() } when { changeRequest() } @@ -368,32 +378,32 @@ pipeline { // sh ''' // git checkout 2.0 // ''' - // } + // } // else{ // sh ''' // git checkout develop // ''' - // } + // } // } // sh''' // git fetch origin +refs/pull/${CHANGE_ID}/merge // git checkout -qf FETCH_HEAD - // ''' + // ''' - // script{ - // skipbuild='2' + // script{ + // skipbuild='2' // skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) // println skipbuild // } // sh''' // rm -rf ${WORKSPACE}.tes // ''' - // } + // } } } stage('Parallel test stage') { //only build pr - options { skipDefaultCheckout() } + options { skipDefaultCheckout() } when { allOf{ changeRequest() @@ -412,13 +422,11 @@ pipeline { ./test-all.sh p1 date''' } - } } stage('python_2_s5') { agent{label " slave5 || slave15 "} steps { - pre_test() timeout(time: 55, unit: 'MINUTES'){ sh ''' @@ -431,8 +439,8 @@ pipeline { } stage('python_3_s6') { agent{label " slave6 || slave16 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ + steps { + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -444,8 +452,8 @@ pipeline { } stage('test_b1_s2') { agent{label " slave2 || slave12 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ + steps { + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' rm -rf /var/lib/taos/* @@ -453,23 +461,54 @@ pipeline { nohup taosd >/dev/null & sleep 10 ''' + sh ''' - cd ${WKC}/tests/examples/nodejs - npm install td2.0-connector > /dev/null 2>&1 - node nodejsChecker.js host=localhost - node test1970.js - cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport - npm install td2.0-connector > /dev/null 2>&1 - node nanosecondTest.js + cd ${WKC}/src/connector/python + export PYTHONPATH=$PWD/ + export LD_LIBRARY_PATH=${WKC}/debug/build/lib + pip3 install pytest + pytest tests/ + python3 examples/bind-multi.py + python3 examples/bind-row.py + python3 examples/demo.py + python3 examples/insert-lines.py + python3 examples/pep-249.py + python3 examples/query-async.py + python3 examples/query-objectively.py + python3 examples/subscribe-sync.py + python3 examples/subscribe-async.py + ''' + + sh ''' + cd ${WKC}/src/connector/nodejs + npm install + npm run test + cd ${WKC}/tests/examples/nodejs + npm install td2.0-connector > /dev/null 2>&1 + node nodejsChecker.js host=localhost + node test1970.js + cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport + npm install td2.0-connector > /dev/null 2>&1 + node nanosecondTest.js ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' + cd ${WKC}/src/connector/C# + dotnet test + dotnet run --project src/test/Cases/Cases.csproj + + cd ${WKC}/tests/examples/C# + dotnet run --project C#checker/C#checker.csproj + dotnet run --project TDengineTest/TDengineTest.csproj + dotnet run --project schemaless/schemaless.csproj + cd ${WKC}/tests/examples/C#/taosdemo - mcs -out:taosdemo *.cs > /dev/null 2>&1 - echo '' |./taosdemo -c /etc/taos + dotnet build -c Release + tree | true + ./bin/Release/net5.0/taosdemo -c /etc/taos -y ''' - } + } sh ''' cd ${WKC}/tests/gotest bash batchtest.sh @@ -483,7 +522,7 @@ pipeline { } stage('test_crash_gen_s3') { agent{label " slave3 || slave13 "} - + steps { pre_test() timeout(time: 60, unit: 'MINUTES'){ @@ -513,7 +552,7 @@ pipeline { ./test-all.sh b2fq date ''' - } + } } } stage('test_valgrind_s4') { @@ -527,8 +566,8 @@ pipeline { ./valgrind-test.sh 2>&1 > mem-error-out.log ./handle_val_log.sh ''' - } - timeout(time: 55, unit: 'MINUTES'){ + } + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -544,8 +583,8 @@ pipeline { } stage('test_b4_s7') { agent{label " slave7 || slave17 "} - steps { - timeout(time: 105, unit: 'MINUTES'){ + steps { + timeout(time: 105, unit: 'MINUTES'){ pre_test() sh ''' date @@ -558,14 +597,13 @@ pipeline { // ./test-all.sh full jdbc // cd ${WKC}/tests // ./test-all.sh full unit - } } } stage('test_b5_s8') { agent{label " slave8 || slave18 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ + steps { + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -577,10 +615,14 @@ pipeline { } stage('test_b6_s9') { agent{label " slave9 || slave19 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ + steps { + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' + cd ${WKC}/tests + ./test-all.sh develop-test + ''' + sh ''' date cd ${WKC}/tests ./test-all.sh b6fq @@ -590,75 +632,79 @@ pipeline { } stage('test_b7_s10') { agent{label " slave10 || slave20 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ + steps { + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' + cd ${WKC}/tests + ./test-all.sh system-test + ''' + sh ''' date cd ${WKC}/tests ./test-all.sh b7fq - date''' + date''' } } - } + } stage('arm64centos7') { agent{label " arm64centos7 "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('arm64centos8') { agent{label " arm64centos8 "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('arm32bionic') { agent{label " arm32bionic "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('arm64bionic') { agent{label " arm64bionic "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('arm64focal') { agent{label " arm64focal "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('centos7') { agent{label " centos7 "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('ubuntu:trusty') { agent{label " trusty "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('ubuntu:xenial') { agent{label " xenial "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('ubuntu:bionic') { agent{label " bionic "} - steps { - pre_test_noinstall() + steps { + pre_test_noinstall() } } stage('Mac_build') { agent{label " catalina "} - steps { - pre_test_mac() + steps { + pre_test_mac() } } @@ -666,7 +712,7 @@ pipeline { agent{label " wintest "} steps { pre_test() - script{ + script{ while(win_stop == 0){ sleep(1) } @@ -676,7 +722,6 @@ pipeline { stage('test'){ agent{label "win"} steps{ - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { pre_test_win() timeout(time: 20, unit: 'MINUTES'){ @@ -685,18 +730,16 @@ pipeline { .\\test-all.bat wintest ''' } - } + } script{ win_stop=1 } } } - - } } } - post { + post { success { emailext ( subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", @@ -723,7 +766,6 @@ pipeline {
  • 提交信息:${env.CHANGE_TITLE}
  • 构建地址:${BUILD_URL}
  • 构建日志:${BUILD_URL}console
  • - @@ -761,7 +803,6 @@ pipeline {
  • 提交信息:${env.CHANGE_TITLE}
  • 构建地址:${BUILD_URL}
  • 构建日志:${BUILD_URL}console
  • - @@ -773,5 +814,5 @@ pipeline { from: "support@taosdata.com" ) } - } + } } diff --git a/README-CN.md b/README-CN.md index f851a906b88a0676abdc39150a2a93ae7fbe7f56..894bdb1c3eeb77196076c75cd2292d57d58cab79 100644 --- a/README-CN.md +++ b/README-CN.md @@ -254,23 +254,25 @@ Query OK, 2 row(s) in set (0.001700s) TDengine 提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用: -- Java +- [Java](https://www.taosdata.com/cn/documentation/connector/java) -- C/C++ +- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp) -- Python +- [Python](https://www.taosdata.com/cn/documentation/connector#python) -- Go +- [Go](https://www.taosdata.com/cn/documentation/connector#go) -- RESTful API +- [RESTful API](https://www.taosdata.com/cn/documentation/connector#restful) -- Node.js +- [Node.js](https://www.taosdata.com/cn/documentation/connector#nodejs) + +- [Rust](https://www.taosdata.com/cn/documentation/connector/rust) ## 第三方连接器 TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。 -- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) +- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) diff --git a/README.md b/README.md index edca04afd486687ea8653e955ae50da457f77ab9..31973af6c7baa6fbbe78dcedda52c40a942752b9 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ At the moment, TDengine only supports building and running on Linux systems. You To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in the project directory. -## Install tools +## Install build dependencies ### Ubuntu 16.04 and above & Debian: ```bash @@ -58,7 +58,16 @@ To install Apache Maven: sudo apt-get install -y maven ``` -### Centos 7: +#### Install build dependencies for taos-tools +We provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. From TDengine 2.4.0.0, taosBenchmark and taosdump were not released together with TDengine. +By default, TDengine compiling does not include taos-tools. You can use 'cmake .. -DBUILD_TOOLS=true' to make them be compiled with TDengine. + +To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. +```bash +sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config +``` + +### CentOS 7: ```bash sudo yum install epel-release sudo yum update @@ -76,7 +85,7 @@ To install Apache Maven: sudo yum install -y maven ``` -### Centos 8 & Fedora: +### CentOS 8 & Fedora: ```bash sudo dnf install -y gcc gcc-c++ make cmake epel-release git ``` @@ -91,6 +100,13 @@ To install Apache Maven: sudo dnf install -y maven ``` +#### Install build dependencies for taos-tools +To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed. +```bash +sudo yum install zlib-devel xz-devel snappy-devel jansson-devel pkgconfig libatomic +``` +Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well. + ### Setup golang environment TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup. @@ -108,7 +124,7 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -The connectors for go & grafana have been moved to separated repositories, +The connectors for go & grafana and some tools have been moved to separated repositories, so you should run this command in the TDengine directory to install them: ```bash git submodule update --init --recursive @@ -129,15 +145,15 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` -Note TDengine 2.3.x.0 and later use a component named 'taosadapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosadapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosadapter source code. Please install go language 1.14 or above for compiling taosadapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem. +Note TDengine 2.3.x.0 and later use a component named 'taosAdapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosAdapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosAdapter source code. Please install go language version 1.14 or above for compiling taosAdapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem. ``` go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct ``` -Or you can use the following command to choose to embed old httpd too. +The embedded http daemon still be built from TDengine source code by default. Or you can use the following command to choose to build taosAdapter. ``` -cmake .. -DBUILD_HTTP=true +cmake .. -DBUILD_HTTP=false ``` You can use Jemalloc as memory allocator instead of glibc: @@ -234,7 +250,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list [Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list sudo apt-get update -apt-get policy tdengine +apt-cache policy tdengine sudo apt-get install tdengine ``` @@ -269,18 +285,19 @@ drop database db; TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. -- [Java](https://www.taosdata.com/en/documentation/connector/#Java-Connector) -- [C/C++](https://www.taosdata.com/en/documentation/connector/#C/C++-Connector) -- [Python](https://www.taosdata.com/en/documentation/connector/#Python-Connector) -- [Go](https://www.taosdata.com/en/documentation/connector/#Go-Connector) -- [RESTful API](https://www.taosdata.com/en/documentation/connector/#RESTful-Connector) -- [Node.js](https://www.taosdata.com/en/documentation/connector/#Node.js-Connector) +- [Java](https://www.taosdata.com/en/documentation/connector/java) +- [C/C++](https://www.taosdata.com/en/documentation/connector#c-cpp) +- [Python](https://www.taosdata.com/en/documentation/connector#python) +- [Go](https://www.taosdata.com/en/documentation/connector#go) +- [RESTful API](https://www.taosdata.com/en/documentation/connector#restful) +- [Node.js](https://www.taosdata.com/en/documentation/connector#nodejs) +- [Rust](https://www.taosdata.com/en/documentation/connector/rust) ### Third Party Connectors The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them. -- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) +- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) diff --git a/cmake/define.inc b/cmake/define.inc index 92044b8c2dd3710c5a1808abcecd7d2358230e7a..6877ee7257cab244c6cc2872e44fa899a798c856 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -53,6 +53,14 @@ IF (TD_PRO) ADD_DEFINITIONS(-D_TD_PRO_) ENDIF () +IF (TD_KH) + ADD_DEFINITIONS(-D_TD_KH_) +ENDIF () + +IF (TD_JH) + ADD_DEFINITIONS(-D_TD_JH_) +ENDIF () + IF (TD_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ENDIF () @@ -128,38 +136,68 @@ IF (TD_ALPINE) MESSAGE(STATUS "aplhine is defined") ENDIF () + IF ("${BUILD_HTTP}" STREQUAL "") IF (TD_LINUX) IF (TD_ARM_32) - SET(BUILD_HTTP "true") + SET(TD_BUILD_HTTP TRUE) ELSE () - SET(BUILD_HTTP "false") + SET(TD_BUILD_HTTP TRUE) ENDIF () ELSEIF (TD_DARWIN) - SET(BUILD_HTTP "false") + SET(TD_BUILD_HTTP TRUE) ELSE () - SET(BUILD_HTTP "true") + SET(TD_BUILD_HTTP TRUE) ENDIF () -ENDIF () - -IF (${BUILD_HTTP} MATCHES "true") - SET(TD_BUILD_HTTP TRUE) ELSEIF (${BUILD_HTTP} MATCHES "false") SET(TD_BUILD_HTTP FALSE) +ELSEIF (${BUILD_HTTP} MATCHES "true") + SET(TD_BUILD_HTTP TRUE) +ELSE () + SET(TD_BUILD_HTTP TRUE) ENDIF () IF (TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF () -IF ("${AVRO_SUPPORT}" MATCHES "true") - SET(TD_AVRO_SUPPORT TRUE) -ELSEIF ("${AVRO_SUPPORT}" MATCHES "false") - SET(TD_AVRO_SUPPORT FALSE) +IF ("${BUILD_TOOLS}" STREQUAL "") + IF (TD_LINUX) + IF (TD_ARM_32) + SET(BUILD_TOOLS "false") + ELSEIF (TD_ARM_64) + SET(BUILD_TOOLS "false") + ELSE () + SET(BUILD_TOOLS "false") + ENDIF () + ELSEIF (TD_DARWIN) + SET(BUILD_TOOLS "false") + ELSE () + SET(BUILD_TOOLS "false") + ENDIF () +ENDIF () + +IF ("${BUILD_TOOLS}" MATCHES "false") + MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}") + SET(TD_TAOS_TOOLS FALSE) +ELSE () + MESSAGE("") + MESSAGE("${Green} Will build taos_tools! ${ColourReset}") + MESSAGE("") + SET(TD_TAOS_TOOLS TRUE) +ENDIF () + +IF (${BUILD_LUA} MATCHES "false") + SET(TD_BUILD_LUA FALSE) ENDIF () -IF (TD_AVRO_SUPPORT) - ADD_DEFINITIONS(-DAVRO_SUPPORT) +IF (TD_BUILD_LUA) + MESSAGE("Enable lua") + ADD_DEFINITIONS(-DLUA_EMBEDDED) + SET(LINK_LUA "lua") +ELSE () + MESSAGE("Disable lua") + SET(LINK_LUA "") ENDIF () IF (TD_LINUX) diff --git a/cmake/input.inc b/cmake/input.inc index 0812711a5824ce0b328374fcdd04fc5f229ad01c..14ac795e7edaef2569ea4a01b4da5e3251e6d7ff 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -52,6 +52,12 @@ ELSEIF (${DBNAME} MATCHES "tq") ELSEIF (${DBNAME} MATCHES "pro") SET(TD_PRO TRUE) MESSAGE(STATUS "pro is true") +ELSEIF (${DBNAME} MATCHES "kh") + SET(TD_KH TRUE) + MESSAGE(STATUS "kh is true") +ELSEIF (${DBNAME} MATCHES "jh") + SET(TD_JH TRUE) + MESSAGE(STATUS "jh is true") ENDIF () IF (${DLLTYPE} MATCHES "go") @@ -90,9 +96,11 @@ IF (${BUILD_JDBC} MATCHES "false") SET(TD_BUILD_JDBC FALSE) ENDIF () -SET(TD_BUILD_HTTP FALSE) +SET(TD_BUILD_HTTP TRUE) -SET(TD_AVRO_SUPPORT FALSE) +SET(TD_TAOS_TOOLS TRUE) + +SET(TD_BUILD_LUA TRUE) SET(TD_MEMORY_SANITIZER FALSE) IF (${MEMORY_SANITIZER} MATCHES "true") diff --git a/cmake/install.inc b/cmake/install.inc index c90aa3f9511e416106309e603853028e7096f082..111efdae2dc3d186db16114ef238ebaddc5e5924 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -5,8 +5,14 @@ IF (TD_LINUX) ELSEIF (TD_WINDOWS) IF (TD_POWER) SET(CMAKE_INSTALL_PREFIX C:/PowerDB) + ELSEIF (TD_TQ) + SET(CMAKE_INSTALL_PREFIX C:/TQueue) ELSEIF (TD_PRO) SET(CMAKE_INSTALL_PREFIX C:/ProDB) + ELSEIF (TD_KH) + SET(CMAKE_INSTALL_PREFIX C:/KingHistorian) + ELSEIF (TD_JH) + SET(CMAKE_INSTALL_PREFIX C:/jh_iot) ELSE () SET(CMAKE_INSTALL_PREFIX C:/TDengine) ENDIF () @@ -25,11 +31,16 @@ ELSEIF (TD_WINDOWS) IF (TD_POWER) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) + ELSEIF (TD_TQ) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/tq.exe DESTINATION .) ELSEIF (TD_PRO) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/prodbc.exe DESTINATION .) + ELSEIF (TD_KH) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/khclient.exe DESTINATION .) + ELSEIF (TD_JH) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .) ELSE () INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .) ENDIF () #INSTALL(TARGETS taos RUNTIME DESTINATION driver) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 38f36c4ed6678675cecfa9c0da1a3d065b58da86..a8b4fd288ea83676c98fa9db5acc464b42f51992 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -15,7 +15,10 @@ ADD_SUBDIRECTORY(cJson) ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(MsvcLibX) ADD_SUBDIRECTORY(rmonotonic) -ADD_SUBDIRECTORY(lua) + +IF (TD_BUILD_LUA) + ADD_SUBDIRECTORY(lua) +ENDIF () IF (TD_LINUX AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) @@ -25,30 +28,6 @@ IF (TD_DARWIN AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) ENDIF () -IF (TD_AVRO_SUPPORT) - MESSAGE("") - MESSAGE("${Green} ENABLE avro format support ${ColourReset}") - MESSAGE("") - include(ExternalProject) - ExternalProject_Add( - apache-avro - PREFIX "avro" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c - BUILD_IN_SOURCE 1 - PATCH_COMMAND - COMMAND git clean -f -d - COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt - COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt - COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt - COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt - CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build - ) -ELSE () - MESSAGE("") - MESSAGE("${Yellow} NO avro format support ${ColourReset}") - MESSAGE("") -ENDIF () - IF (TD_LINUX_64 AND JEMALLOC_ENABLED) MESSAGE("") MESSAGE("${Green} ENABLE jemalloc ${ColourReset}") diff --git a/deps/avro b/deps/avro deleted file mode 160000 index a1fce29d9675b4dd95dfee9db32cc505d0b2227c..0000000000000000000000000000000000000000 --- a/deps/avro +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 79388a2edb9404a0f7b31b9182eb5ce2cb0d52be..232d2633f1013c7fc21b862023cd4f6552a0524c 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -357,7 +357,7 @@ KILL STREAM ; TDengine启动后,会自动创建一个监测数据库SYS,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 -这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 +这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项monitor将其关闭或打开。 ## 文件目录结构 diff --git a/documentation/webdocs/markdowndocs/administrator.md b/documentation/webdocs/markdowndocs/administrator.md index 58b9865be7c044d321e8b43bb6908ce8f1c967e9..05fd408a33f587654c78fc6a62da858b2e15896f 100644 --- a/documentation/webdocs/markdowndocs/administrator.md +++ b/documentation/webdocs/markdowndocs/administrator.md @@ -36,7 +36,7 @@ This section lists only the most important configuration parameters. Please chec - maxUsers: maximum number of users allowed - maxDbs: maximum number of databases allowed - maxTables: maximum number of tables allowed -- enableMonitor: turn on/off system monitoring, 0: off, 1: on +- monitor: turn on/off system monitoring, 0: off, 1: on - logDir: log directory, default is /var/log/taos - numOfLogLines: maximum number of lines in the log file - debugFlag: log level, 131: only error and warnings, 135: all diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index a4fba357bbc47bd84ea7c2a6931a64bf274e5d9b..d94a58eebb129e84137a8c55b1ca07be37ec15af 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -83,6 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 * [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。 +## [TDengine 组件与工具](/tools/adapter) + +* [taosAdapter用户手册](/tools/adapter) + ## [与其他工具的连接](/connections) * [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 @@ -110,7 +114,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [用户管理](/administrator#user):添加、删除TDengine用户,修改用户密码 * [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入 * [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出 -* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等 +* [系统连接、任务查询管理](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等 +* [系统监控](/administrator#monitoring):系统监控,使用TDinsight进行集群监控等 * [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现 * [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录 * [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表 diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index 3f91dbb35130a2ff78e5ef23219b79433af33ce3..c01c2efb514c22883bbc9a8bd07a974ba37d3019 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -4,7 +4,7 @@ 自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosdemo 用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosdemo 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosdemo 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 -运行 taosdemo 很简单,通过下载 TDengine 安装包(https://www.taosdata.com/cn/all-downloads/)或者自行下载 TDengine 代码(https://github.com/taosdata/TDengine)编译都可以在安装目录或者编译结果目录中找到并运行。 +运行 taosdemo 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 接下来本文为大家讲解 taosdemo 的使用介绍及注意事项。 @@ -364,7 +364,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 } ``` 以下为 JSON 文件中和查询相关的特有参数含义: - +``` "query_times": 每种查询类型的查询次数 "query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“resetful”:使用restfule接口。可选项。缺省是“taosc”。 "specified_table_query": { 指定表的查询 @@ -379,7 +379,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 "threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。 "sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 - +``` 以下为一个典型订阅 JSON 示例文件内容: ``` @@ -422,13 +422,13 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 } ``` 以下为订阅功能相关的特有参数含义: - +``` "interval": 执行订阅的间隔,单位是秒。可选项,缺省是0。 "restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限) "keepProgress": 保留订阅信息进度。yes表示保留订阅信息,no表示不保留。该值为yes,restart为no时,才能继续之前的订阅。 "resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 - +``` 结语 -- TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。 @@ -439,7 +439,8 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 附录 - 完整 taosdemo 参数介绍 -- -taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用json格式的配置文件。 +taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 + 一、命令行参数 -f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 @@ -505,11 +506,12 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 --help: 打印命令参数列表。 -二、json格式的配置文件中所有参数说明 +二、JSON 格式的配置文件中所有参数说明 taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 -1、插入功能测试的json配置文件 +1、插入功能测试的 JSON 配置文件 +``` { "filetype": "insert", "cfgdir": "/etc/taos", @@ -571,6 +573,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta }] }] } +``` "filetype": 本taosdemo实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 @@ -596,11 +599,11 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "interlace_rows": 设置轮询插入每个单表数据的条目数,如果interlace_rows*childtable_count*supertable_num小于num_of_records_per_req时,则请求插入的数目以interlace_rows*childtable_count*supertable_num为准。可选项,缺省是0。 -"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taood限制的1M长度(1048576)。可选项,缺省是INT64_MAX 32766(受服务端限制)。0代表不插入数据,建议配置大于0。 +"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taosd限制的1M长度(1048576)。0代表不插入数据,建议配置大于0。 "databases": [{ -"dbinfo": { ​ "name": 数据库名称。必选项。 +"dbinfo": {"name": 数据库名称。必选项。 "drop": 如果数据库已经存在,”yes“:删除后重建;”no“:不删除,直接使用。可选项,缺省是”no“。drop = yes 会使其他子表创建相关条目无效。 @@ -695,8 +698,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "count":该类型的连续列个数,可选项,缺省是1。 }] -2、查询功能测试的json配置文件 +2、查询功能测试的 JSON 配置文件 +``` { "filetype": "query", "cfgdir": "/etc/taos", @@ -734,7 +738,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ] } } -​ +``` "filetype": 本taosdemo实例进行哪种功能测试。"query"表示数据查询功能。必选项。 @@ -784,8 +788,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。 -3、订阅功能测试的json配置文件 +3、订阅功能测试的 JSON 配置文件 +``` { "filetype":"subscribe", "cfgdir": "/etc/taos", @@ -822,7 +827,8 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "result": "./subscribe_res1.txt" }] } - } +} +``` "filetype": 本taosdemo实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** @@ -878,4 +884,4 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "sql": " select count(*) from xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 -​ "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 +"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index adbba4603b94c689cab2e0aaaedf0e232ae3d1f4..f38522b5c257fdb3f72e833e72f14f4c9acdefb0 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -30,7 +30,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list [ beta 版安装包仓库为可选安装项 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list sudo apt-get update -apt-get policy tdengine +apt-cache policy tdengine sudo apt-get install tdengine ``` @@ -178,7 +178,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ## taosdemo 详细功能列表 taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 -taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试?](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 +taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 ## 客户端和报警模块 @@ -190,7 +190,7 @@ taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性 ### TDengine 服务器支持的平台列表 -| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | +| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | X64 | ● | ● | | ○ | ● | ● | ● | | 龙芯 MIPS64 | | | ● | | | | | diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index a82aecd97c832f9b7f276ec27832097e46845dfc..d32a23e9a187e662cf00e2fbe4864472a859b3e0 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -22,19 +22,19 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, **Tips:** -- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M)。 +- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过1M 。 - TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 ## 无模式(Schemaless)写入 **前言** -
    在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 +
    在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
    目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
    无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 **无模式写入行协议** -
    TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 Json 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 +
    TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 对于InfluxDB、OpenTSDB的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。 @@ -74,21 +74,19 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ### 无模式写入的主要处理逻辑 无模式写入按照如下原则来处理行数据: -1. 当 tag_set 中有 ID 字段时,该字段的值将作为子表的表名。 -2. 没有 ID 字段时,将使用如下规则来生成子表名: -首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串 +
    1. 将使用如下规则来生成子表名:首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串 ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` 需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 -
    3. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 -
    4. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 -
    5. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 -
    6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 -
    7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 -
    8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 -
    9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 +
    2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 +
    3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +
    4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 +
    5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +
    6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +
    7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +
    8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 **备注:**
    无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 @@ -99,8 +97,8 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 | **序号** | **值** | **说明** | | ---- | ------------------- | ------------ | | 1 | SML_LINE_PROTOCOL | InfluxDB行协议(Line Protocol) | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB文本行协议 | -| 3 | SML_JSON_PROTOCOL | Json协议格式 | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 | +| 3 | SML_JSON_PROTOCOL | JSON 协议格式 |
    在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
    @@ -116,6 +114,17 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 +**数据模式映射规则** +
    本节将说明行协议的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为 超级表名称。tag_set 中的 标签名称为 数据模式中的标签名,field_set 中的名称为列名称。以如下数据为例,说明映射规则: + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` +该行数据映射生成一个超级表: st, 其包含了 3 个类型为 nchar 的标签,分别是:t1, t2, t3。五个数据列,分别是ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint)。映射成为如下 SQL 语句: +```json +create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) +``` + **数据模式变更处理**
    本节将说明不同行数据写入情况下,对于数据模式的影响。 @@ -145,7 +154,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
    如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR 错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 taos_errstr 获取具体的错误原因。 **后续升级计划** -
    当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 Taos Adapter 采用 REST 的方式直接写入无模式数据。 +
    当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 taosAdapter 采用 REST 的方式直接写入无模式数据。 ## Prometheus 直接写入 @@ -241,10 +250,10 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf 直接写入(通过 taosadapter) +## Telegraf 直接写入(通过 taosAdapter) 安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 配置方法,在 /etc/telegraf/telegraf.conf 增加如下文字,其中 database name 请填写希望在 TDengine 保存 Telegraf 数据的数据库名,TDengine server/cluster host、username和 password 填写 TDengine 实际值: ``` @@ -264,14 +273,14 @@ sudo systemctl start telegraf ``` 即可在 TDengine 中查询 metrics 数据库中 Telegraf 写入的数据。 -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## collectd 直接写入(通过 taosadapter) +## collectd 直接写入(通过 taosAdapter) 安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。 -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` LoadPlugin network @@ -282,15 +291,15 @@ LoadPlugin network ``` sudo systemctl start collectd ``` -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## StatsD 直接写入(通过 taosadapter) +## StatsD 直接写入(通过 taosAdapter) 安装 StatsD 请参考[官方文档](https://github.com/statsd/statsd)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } @@ -305,123 +314,14 @@ port: 8125 } ``` -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 ## 使用 Bailongma 2.0 接入 Telegraf 数据写入 -*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosadapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 - -[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 - -### 从源代码编译 blm_telegraf - -用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - -- Linux操作系统的服务器 -- 安装好Golang,1.10版本以上 -- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器) - -Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下: - -```bash -cd blm_telegraf -go build -``` - -一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 - -### 安装 Telegraf - -目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。 - -### 配置 Telegraf - -修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 - -在output plugins部分,增加[[outputs.http]]配置项: - -- url:Bailongma API服务提供的URL,参考下面的启动示例章节 -- data_format:"json" -- json_timestamp_units:"1ms" - -在agent部分: - -- hostname: 区分不同采集设备的机器名称,需确保其唯一性。 -- metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。 - -关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 - -### 启动 blm_telegraf 程序 - -blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 - -```bash ---host -TDengine服务端的IP地址,缺省值为空。 - ---batch-size -blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。 - ---dbname -设置在TDengine中创建的数据库名称,blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。 - ---dbuser -设置访问TDengine的用户名,缺省值是'root'。 - ---dbpassword -设置访问TDengine的密码,缺省值是'taosdata'。 - ---port -blm_telegraf对telegraf提供服务的端口号。 -``` - -### 启动示例 +**注意:** +TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosAdapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 及之前版本将逐步不再维护。 -通过以下命令启动一个blm_telegraf的API服务: -```bash -./blm_telegraf -host 127.0.0.1 -port 8089 -``` - -假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项: - -```yaml -url = "http://10.1.2.3:8089/telegraf" -``` - -### 查询 telegraf 写入数据 - -telegraf产生的数据格式如下: -```json -{ - "fields": { - "usage_guest": 0, - "usage_guest_nice": 0, - "usage_idle": 89.7897897897898, - "usage_iowait": 0, - "usage_irq": 0, - "usage_nice": 0, - "usage_softirq": 0, - "usage_steal": 0, - "usage_system": 5.405405405405405, - "usage_user": 4.804804804804805 - }, - - "name": "cpu", - "tags": { - "cpu": "cpu2", - "host": "bogon" - }, - "timestamp": 1576464360 -} -``` - -其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。 - -```mysql -use telegraf; -select * from cpu; -``` ## EMQ Broker 直接写入 diff --git a/documentation20/cn/08.connector/02.rust/docs.md b/documentation20/cn/08.connector/02.rust/docs.md index 01d4087e3acf2eed2dbea207d6d48ff360b5aece..b1afdf363cc2559552d2562444510aaa8d49a2d5 100644 --- a/documentation20/cn/08.connector/02.rust/docs.md +++ b/documentation20/cn/08.connector/02.rust/docs.md @@ -12,7 +12,7 @@ 默认情况下,[libtaos-rs] 使用 C 接口连接数据库,所以您需要: -- [TDengine] [客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) +- [TDengine客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) - `clang`: `bindgen` 使用 `libclangAST` 来生成对应的Rust绑定。 ## 特性列表 @@ -62,7 +62,7 @@ libtaos = { version = "*", features = ["r2d2"] } libtaos = { version = "*", features = ["rest"] } ``` -本项目中提供一个 [示例程序]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) 如下: +本项目中提供一个 [示例程序](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs) 如下: ```rust // ... diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index a1689151aabd82b93821a11cb6de107090db0fae..7806de6093b422b40938b701d85c1512b32945ec 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -145,7 +145,7 @@ taos> | **CPU类型** | x64(64bit) | | | ARM64 | ARM32 | | ------------ | ------------ | -------- | -------- | -------- | ---------- | | **OS类型** | Linux | Win64 | Win32 | Linux | Linux | -| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** | +| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** | C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 *taos.h*,里面列出了提供的API的函数原型。安装后,taos.h位于: @@ -208,6 +208,8 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。 + **提示:** 同一进程可以根据不同的host/port 连接多个taosd 集群 + - `char *taos_get_server_info(TAOS *taos)` 获取服务端版本信息。 @@ -1037,43 +1039,62 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC ## CSharp Connector -C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 +* C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 + +* C#连接器现在也支持从[Nuget下载引用](https://www.nuget.org/packages/TDengine.Connector/) +* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 ### 安装准备 * 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 -* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 -* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 +* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 +* 安装[.NET SDK](https://dotnet.microsoft.com/download) ### 示例程序 -示例程序源码位于install_directory/examples/C#,有: +示例程序源码位于 +* {client_install_directory}/examples/C# +* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523) -TDengineTest.cs C#示例源程序 +**注意:** TDengineTest.cs C#示例源程序,包含了数据库连接参数,以及如何执行数据插入、查询等操作。 ### 安装验证 -运行install_directory/examples/C#/C#Checker/C#Checker.exe - +需要先安装 .Net SDK ```cmd -cd {install_directory}/examples/C#/C#Checker -csc /optimize *.cs -C#Checker.exe -h +cd {client_install_directory}/examples/C#/C#Checker +//运行测试 +dotnet run -- -h . // 此步骤会先build,然后再运行。 ``` ### C#连接器的使用 在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示: -1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。 -2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作。 +需要 .NET SDK +* 创建一个c# project. +``` cmd +mkdir test +cd test +dotnet new console +``` +* 通过Nuget引用TDengineDriver包 +``` cmd +dotnet add package TDengine.Connector +``` +* 在项目中需要用到TDengineConnector的地方引用TDengineDriver namespace。 +```c# +using TDengineDriver; +``` +* 用户可以参考[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest)来定义数据库连接参数,以及如何执行数据插入、查询等操作。 -此接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。 **注意:** -1. TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。 -2. 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。 +* TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。 +* 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。 +* 此连接器需要用到taos.dll文件,所以在未安装客户端时需要在执行应用程序前,拷贝Windows{client_install_directory}/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。 + ### 第三方驱动 @@ -1252,7 +1273,7 @@ node nodejsChecker.js host=localhost ### Node.js连接器的使用 -以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)。 +以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](https://github.com/taosdata/TDengine/tree/develop/src/connector/nodejs)。 #### 建立连接 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 4ef1c60112018cb29289314e199feda75ca7c2ba..7b7b2262d470f5226eef780a9971894a65663579 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -114,7 +114,7 @@ taosd -C 下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。** -| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** | +| **#** | **配置参数名称** | **内部** | **SC** | **单位** | **含义** | **取值范围** | **缺省值** | **补充说明** | | ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | | | 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | | @@ -166,7 +166,7 @@ taosd -C | 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 | | 49 | mqttTopic | YES | **S** | | | | | /test | | 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | | -| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 65380 | | +| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 1048576 | | | 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | | | 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | | | 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | | @@ -223,7 +223,6 @@ taosd -C | 105 | compressColData | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | -1 | 2.3.0.0 版本新增。 | | 106 | tsdbMetaCompactRatio | | **C** | | tsdb meta文件中冗余数据超过多少阈值,开启meta文件的压缩功能 | 0:不开启,[1-100]:冗余数据比例 | 0 | | | 107 | rpcForceTcp | | **SC**| | 强制使用TCP传输 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0版本新增。| -| 107 | rpcForceTcp | | **SC** | | 强制使用TCP传输。 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0 版本新增。 | **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)) @@ -553,11 +552,55 @@ KILL STREAM ; 强制关闭流式计算,其中的中stream-id是SHOW STREAMS中显示的connection-id:stream-no字串,如103:2,拷贝粘贴即可。 -## 系统监控 +## 系统监控 TDengine启动后,会自动创建一个监测数据库log,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在log库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 -这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 +这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项monitor将其关闭或打开。 + +### TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案 + +从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。 + +我们提供了一个自动化脚本 `TDinsight.sh` 对TDinsight进行部署。 + +下载 `TDinsight.sh`: + +```bash +wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh +chmod +x TDinsight.sh +``` + +准备: + +1. TDengine Server 信息: + * TDengine RESTful 服务:对本地而言,可以是 http://localhost:6041 ,使用参数 `-a`。 + * TDengine 用户名和密码,使用 `-u` `-p` 参数设置。 + +2. Grafana 告警通知 + * 使用已经存在的Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。 + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E + ``` + + * 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数: + 1. 阿里云短信服务Key ID,参数 `-I` + 2. 阿里云短信服务Key Secret,参数 `K` + 3. 阿里云短信服务签名,参数 `-S` + 4. 短信通知模板号,参数 `-C` + 5. 短信通知模板输入参数,JSON格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` + 6. 逗号分隔的通知手机列表,参数 `-B` + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ + -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ + -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' + ``` + +运行程序并重启 Grafana 服务,打开面板:。 + +更多使用场景和限制请参考[TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) 文档。 ## 性能优化 @@ -687,7 +730,7 @@ rmtaos - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) - 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte +- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte - 数据库副本数:不能超过 3 - 用户名:不能超过 23 个 byte - 用户密码:不能超过 15 个 byte diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 18c39d665483997f1680f0253baddd8ceabcf1d9..31e1ed4cc1ba7372a276391b2711a56f63b63ecc 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -834,7 +834,7 @@ UNION ALL SELECT ... [UNION ALL SELECT ...] ``` -TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。 +TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。在同一个sql语句中,UNION ALL 最多支持100个。 ### SQL 示例 @@ -1589,7 +1589,7 @@ SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters - 表名最大长度为 192,每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 -- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。 +- SQL 语句最大长度 1048576 个字符,也可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 @@ -1603,3 +1603,18 @@ TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进 IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 +## 表(列)名合法性说明 +TDengine 中的表(列)名命名规则如下: +只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。 + +转移后表(列)名规则: +为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 +转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 + +例如: +\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + +需要注意的是转义字符中的内容必须是可打印字符。 + +支持版本 +支持转义符的功能从 2.3.0.1 版本开始。 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index eb5f20e708bb4bb592a1ab2d535fcf261457b989..9132e8dca63c47e4b22ad87ef9fd4d4a1997077a 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -185,23 +185,23 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 | TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | | TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | -| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | -| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | | TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | | UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 | | UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | ## 20. go 语言编写组件编译失败怎样解决? -新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosadapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosadapter 仓库代码后再编译。 +新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 -目前编译方式默认自动编译 taosadapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: +目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: ```sh go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct ``` -如果希望继续使用之前的内置 httpd,可以关闭 taosadapter 编译,使用 +如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md index 04765602dab18fbacf7d92d44ca324db660c0ac4..485e7038f0e8aa122b20ba6608a629de66d7dc8c 100644 --- a/documentation20/cn/14.devops/01.telegraf/docs.md +++ b/documentation20/cn/14.devops/01.telegraf/docs.md @@ -25,8 +25,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### Grafana 请参考[官方文档](https://grafana.com/grafana/download)。 -### 安装 TDengine -从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。 +### TDengine +从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。 ## 数据链路设置 diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md index 5860e70ceafafadc21c5772c96515e0925897e3a..0073cf78340a1100ec97cb70685410ced0cf5d4e 100644 --- a/documentation20/cn/14.devops/02.collectd/docs.md +++ b/documentation20/cn/14.devops/02.collectd/docs.md @@ -40,7 +40,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ``` ### 配置 collectd -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` LoadPlugin network @@ -51,7 +51,7 @@ sudo systemctl start collectd ``` ### 配置 StatsD -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md index b2a68e1b15acef2d574600e59d1b18d890938ac6..4acfecd0cfe903cd993e8c548e9c6b9032dde48a 100644 --- a/documentation20/cn/14.devops/03.immigrate/docs.md +++ b/documentation20/cn/14.devops/03.immigrate/docs.md @@ -8,7 +8,7 @@ - 数据写入和查询的性能远超 OpenTSDB; - 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; -- 安装部署非常简单,单一安装包完成安装部署,除了 taosadapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定; +- 安装部署非常简单,单一安装包完成安装部署,不依赖其他的第三方软件,整个安装部署过程秒级搞定; - 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 - 支持多达 128 个标签,标签总长度可达到 16 KB; - 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 @@ -40,9 +40,13 @@ - **调整数据收集器配置** -在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosadapter 也会自动启用*。*利用 taosadapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/Json 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。 +在TDengine 2.3版本中,我们发布了taosAdapter ,taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 -如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosadapter 部署的节点 IP 地址和端口。假设 taosadapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: +用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 + +通过taosAdapter,用户可以将 collectd 和 StatsD 收集的数据直接推送到TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter还支持Telegraf、Icinga、TCollector 、node_exporter的数据接入,使用详情参考[taosAdapter](https://www.taosdata.com/cn/documentation/tools/adapter)。 + +如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: ```html LoadPlugin write_tsdb @@ -57,28 +61,13 @@ LoadPlugin write_tsdb ``` -即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosadapter, taosadapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 +即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 - **调整看板(Dashborad)系统** -在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。Grafana 暂时还不能够直接连接 TDengine,在 TDengine 的安装目录下 connector/grafanaplugin 有为 Grafana 提供的连接插件。使用该插件的方式很简单: - -首先将grafanaplugin目录下的dist目录整体拷贝到Grafana的插件目录(默认地址为 `/var/lib/grafana/plugins/`),然后重启 Grafana 即可在 **Add Data Source** 菜单下看见 TDengine 数据源。 - -```shell -sudo cp -r . /var/lib/grafana/plugins/tdengine -sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini - -# start grafana service -sudo service grafana-server restart -# or with systemd -sudo systemctl start grafana-server -``` - +在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用TDengine提供的Grafana插件请参考[与其他工具的连接](https://www.taosdata.com/cn/documentation/connections#grafana)。 - -此外,TDengine 还提供了默认的两套Dashboard 模板,供用户快速查看保存到TDengine库里的信息。你只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。 +TDengine 提供了默认的两套Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。 ![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg) @@ -106,7 +95,7 @@ sudo systemctl start grafana-server TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 -截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、Json 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 +截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: @@ -129,8 +118,8 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的 | 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 | | ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | -| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | -| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | @@ -181,7 +170,7 @@ select count(*) from memory 完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。 -TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。你可以用检查附件2获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。 +TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。可以用检查附录1获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。 TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 @@ -191,7 +180,21 @@ TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他 为了方便历史数据的迁移工作,我们为数据同步工具DataX提供了插件,能够将数据自动写入到TDengine中,需要注意的是DataX的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见其使用帮助手册 [github.com/taosdata/datax](http://github.com/taosdata/datax)。 +DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见[基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 + +在对DataX进行迁移实践后,我们发现通过启动多个进程,同时迁移多个metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 + +| datax实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | +| ---- | -------------- | +| 1 | 约13.9万 | +| 2 | 约21.8万 | +| 3 | 约24.9万 | +| 5 | 约29.5万 | +| 10 | 约33万 | + + +
    (注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16核64G硬件设备,channel和batchSize 分别为8和1000,每条记录包含10个tag) + ### 2、手动迁移数据 @@ -353,7 +356,7 @@ Select sum(val) from table_name 完整示例: ```json -//OpenTSDB查询Json +//OpenTSDB查询JSON query = { “start”:1510560000, “end”: 1515000009, diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 0cb6612700a9211ee30fc51fed8a3b3fa77f3342..28905c340a99223e480603ca3ba6151772bada47 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -79,6 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment - [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust +## [Components and Tools](/tools/adapter) + +* [taosAdapter](/tools/adapter) + ## [Connections with Other Tools](/connections) - [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization @@ -106,9 +110,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [User Management](/administrator#user): add/delete TDengine users, modify user password - [Import Data](/administrator#import): import data into TDengine from either script or CSV file - [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool -- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events +- [System Connection and Task Query Management](/administrator#status): show the system connections, queries, streaming calculation and others +- [System Monitor](/administrator#monitoring): monitor TDengine cluster with log database and TDinsight. - [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located -- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords +- [Parameter Limits and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords ## Performance: TDengine vs Others @@ -127,4 +132,4 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## FAQ -- [FAQ: Common questions and answers](/faq) \ No newline at end of file +- [FAQ: Common questions and answers](/faq) diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md index 318c4d09581672b317f487477e655bdb5bd49d13..c872d2971ef3cce250592df0534af5369c4682dd 100644 --- a/documentation20/en/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md @@ -1,4 +1,4 @@ -Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation mehtod, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily maniplate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters. +Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters. Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. @@ -221,7 +221,7 @@ To reach TDengine performance limits, data insertion can be executed by using mu ``` -t, --tables=NUMBER The number of tables. Default is 10000. -n, --records=NUMBER The number of records per table. Default is 10000. --M, --random The value of records generated are totally random. The default is to simulate power equipment senario. +-M, --random The value of records generated are totally random. The default is to simulate power equipment scenario. ``` As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. ``` @@ -374,7 +374,7 @@ The following is the content of a typical query JSON example file. } ``` The following parameters are specific to the query in the JSON file. - +``` "query_times": the number of queries per query type "query_mode": query data interface, "tosc": call TDengine's c interface; "resetful": use restfule interface. Options are available. Default is "taosc". "specified_table_query": { query for the specified table @@ -389,7 +389,7 @@ The following parameters are specific to the query in the JSON file. "threads": the number of threads to execute sqls concurrently, optional, default is 1. Each thread is responsible for a part of sub-tables and executes all sqls. "sql": "select count(*) from xxxx". Query statement for all sub-tables in the super table, where the table name must be written as "xxxx" and the instance will be replaced with the sub-table name automatically. "result": the name of the file to which the query result is written. Optional, the default is null, which means the query results are not written to a file. - +``` The following is a typical subscription JSON example file content. ``` @@ -432,13 +432,13 @@ The following is a typical subscription JSON example file content. } ``` The following are the meanings of the parameters specific to the subscription function. - +``` "interval": interval for executing subscriptions, in seconds. Optional, default is 0. "restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory) "keepProgress": keep the progress of the subscription information. yes means keep the subscription information, no means don't keep it. The value is yes and restart is no to continue the previous subscriptions. "resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again. "result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file. - +``` Conclusion -- TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software. diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index 3236b8aff8777836af492d2066a530c32a9ab75e..0700c6f9b89d8820fdada2dd37bd2c7ece9f441c 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -193,7 +193,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. -### A Typical Data Writinfg Process +### A Typical Data Writing Process To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. @@ -244,7 +244,7 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 2e104b980a91c9ee72d93e41fbf0d4276694d1ef..aa8ea7dde45959347bbc8f51da012fa864e5bf46 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -1,6 +1,6 @@ # Efficient Data Writing -TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion. +TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, collectd, StatsD, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion. ## Data Writing via SQL @@ -107,11 +107,10 @@ Launch an API service for blm_prometheus with the following command: Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as: +```yaml remote_write: - -\- url: "http://10.1.2.3:8088/receive" - - + - url: "http://10.1.2.3:8088/receive" +``` ### Query written data of prometheus @@ -142,141 +141,84 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` +## Data Writing via Telegraf and taosAdapter +Please refer to [Official document](https://portal.influxdata.com/downloads/) for Telegraf installation. +TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from Telegraf. -## Data Writing via Telegraf - -[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. - -### Compile blm_telegraf From Source Code - -Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares: - -- A server running Linux OS -- Golang version 1.10 and higher installed -- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) - -Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows: - -```bash -cd blm_telegraf - -go build +Configuration: +Please add following words in /etc/telegraf/telegraf.conf. Fill 'database name' with the database name you want to store in the TDengine for Telegraf data. Please fill the values in TDengine server/cluster host, username and password fields. +``` +[[outputs.http]] + url = "http://:6041/influxdb/v1/write?db=" + method = "POST" + timeout = "5s" + username = "" + password = "" + data_format = "influx" + influx_max_line_bytes = 250 ``` -If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory. - -### Install Telegraf - -At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads - -### Configure Telegraf - -Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf. - -In the output plugins section, add the [[outputs.http]] configuration: - -- url: The URL provided by bailongma API service, please refer to the example section below -- data_format: "json" -- json_timestamp_units: "1ms" - -In agent section: - -- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness -- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf. - -For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf. - -### Launch blm_telegraf - -blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching. - -```sh ---host - -The ip address of TDengine server, default is null - ---batch-size - -blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time. - ---dbname - -Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus. - ---dbuser - -Set the user name to access TDengine, the default value is 'root ' +Then restart telegraf: +``` +sudo systemctl start telegraf +``` +Now you can query the metrics data of Telegraf from TDengine. ---dbpassword +Please find taosAdapter configuration and usage from `taosadapter --help` output. -Set the password to access TDengine, the default value is'taosdata ' +## collectd 直接写入(通过 taosAdapter) +Please refer to [official document](https://collectd.org/download.shtml) for collectd installation. ---port +TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from collectd. -The port number blm_telegraf used to serve Telegraf. +Configuration: +Please add following words in /etc/collectd/collectd.conf. Please fill the value 'host' and 'port' with what the TDengine and taosAdapter using. ``` +LoadPlugin network + + Server "" "" + +``` +Then restart collectd +``` +sudo systemctl start collectd +``` +Please find taosAdapter configuration and usage from `taosadapter --help` output. +## StatsD 直接写入(通过 taosAdapter) +Please refer to [official document](https://github.com/statsd/statsd) for StatsD installation. +TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from StatsD. -### Example - -Launch an API service for blm_telegraf with the following command - -```bash -./blm_telegraf -host 127.0.0.1 -port 8089 +Please add following words in the config.js file. Please fill the value to 'host' and 'port' with what the TDengine and taosAdapter using. ``` - -Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as: - -```yaml -url = "http://10.1.2.3:8089/telegraf" +add "./backends/repeater" to backends section. +add { host:'', port: } to repeater section. ``` -### Query written data of telegraf - -The format of generated data by telegraf is as follows: - -```json +Example file: +``` { - "fields": { - "usage_guest": 0, - "usage_guest_nice": 0, - "usage_idle": 89.7897897897898, - "usage_iowait": 0, - "usage_irq": 0, - "usage_nice": 0, - "usage_softirq": 0, - "usage_steal": 0, - "usage_system": 5.405405405405405, - "usage_user": 4.804804804804805 - }, - - "name": "cpu", - "tags": { - "cpu": "cpu2", - "host": "bogon" - }, - "timestamp": 1576464360 +port: 8125 +, backends: ["./backends/repeater"] +, repeater: [{ host: '127.0.0.1', port: 6044}] } ``` -Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction. +Please find taosAdapter configuration and usage from `taosadapter --help` output. -```mysql -use telegraf; -select * from cpu; -``` +## Insert data via Bailongma 2.0 and Telegraf + +**Notice:** +TDengine 2.3.0.0+ provides taosAdapter to support Telegraf data writing. Bailongma v2 will be abandoned and no more maintained. -MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine. ## Data Writing via EMQ Broker [EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details. - - ## Data Writing via HiveMQ Broker -[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. \ No newline at end of file +[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md index d906443153bb7e83cee69da4588554893ce154a3..5ba73f297fbe346b54469f24a3a43adb617b31a6 100644 --- a/documentation20/en/06.queries/docs.md +++ b/documentation20/en/06.queries/docs.md @@ -11,7 +11,7 @@ TDengine uses SQL as the query language. Applications can send SQL statements th - Time stamp aligned join query (implicit join) operations - Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc -For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted. +For example, in TAOS shell, the records with voltage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted. ```mysql taos> select * from d1001 where voltage > 215 order by ts desc limit 2; diff --git a/documentation20/en/07.advanced-features/docs.md b/documentation20/en/07.advanced-features/docs.md index 38c70862b637daf5840606535971e412d938b9e8..0bf10183c6babf82744e073ab0cd892602a381d9 100644 --- a/documentation20/en/07.advanced-features/docs.md +++ b/documentation20/en/07.advanced-features/docs.md @@ -110,10 +110,10 @@ First, use `taos_subscribe` to create a subscription: ```c TAOS_SUB* tsub = NULL; if (async) { -  // create an asynchronized subscription, the callback function will be called every 1s +  // create an asynchronous subscription, the callback function will be called every 1s   tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); } else { -  // create an synchronized subscription, need to call 'taos_consume' manually +  // create an synchronous subscription, need to call 'taos_consume' manually   tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); } ``` @@ -201,7 +201,7 @@ taos_unsubscribe(tsub, keep); Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time. -After introducing the code, let's take a look at the actual running effect. For exmaple: +After introducing the code, let's take a look at the actual running effect. For example: - Sample code has been downloaded locally - TDengine has been installed on the same machine diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index 75cc380c141383cce0bc3c9790c91fa97563e3ca..448921349fc0c051effc8a1d7e1a69496cff1199 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -54,33 +54,33 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES( ## JDBC driver version and supported TDengine and JDK versions -| taos-jdbcdriver | TDengine | JDK | -| -------------------- | ----------------- | -------- | -| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x | -| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x | -| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | -| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | -| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | -| 1.0.3 | 1.6.1.x and above | 1.8.x | -| 1.0.2 | 1.6.1.x and above | 1.8.x | -| 1.0.1 | 1.6.1.x and above | 1.8.x | +| taos-jdbcdriver | TDengine | JDK | +| --------------- | ------------------ | ----- | +| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x | +| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | +| 1.0.3 | 1.6.1.x and above | 1.8.x | +| 1.0.2 | 1.6.1.x and above | 1.8.x | +| 1.0.1 | 1.6.1.x and above | 1.8.x | ## DataType in TDengine and Java connector The TDengine supports the following data types and Java data types: | TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) | -| ----------------- | ------------------ | ------------------ | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | +| ----------------- | ---------------------------------- | ----------------------------------- | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | ## Install Java connector @@ -448,7 +448,7 @@ public static void main(String[] args) throws SQLException { config.setMinimumIdle(10); //minimum number of idle connection config.setMaximumPoolSize(10); //maximum number of connection in the pool config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool - config.setMaxLifetime(0); // maximum life time for each connection + config.setMaxLifetime(0); // maximum lifetime for each connection config.setIdleTimeout(0); // max idle time for recycle idle connection config.setConnectionTestQuery("select server_status()"); //validation query HikariDataSource ds = new HikariDataSource(config); //create datasource @@ -456,7 +456,7 @@ public static void main(String[] args) throws SQLException { Statement statement = connection.createStatement(); // get statement //query or insert // ... - connection.close(); // put back to conneciton pool + connection.close(); // put back to connection pool } ``` @@ -480,7 +480,7 @@ public static void main(String[] args) throws Exception { Statement statement = connection.createStatement(); // get statement //query or insert // ... - connection.close(); // put back to conneciton pool + connection.close(); // put back to connection pool } ``` diff --git a/documentation20/en/08.connector/02.rust/docs.md b/documentation20/en/08.connector/02.rust/docs.md index 235a7d074955024989f60e6e689aead276f184f2..583d8fd2ceb7346ee8143d75898d4e15e8bc7301 100644 --- a/documentation20/en/08.connector/02.rust/docs.md +++ b/documentation20/en/08.connector/02.rust/docs.md @@ -12,10 +12,10 @@ Thanks [@songtianyi](https://github.com/songtianyi) for [libtdengine](https://gi if you use the default features, it'll depend on: -- [TDengine] Client library and headers. +- [TDengine Client](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) library and headers. - clang because bindgen will requires the clang AST library. -## Fetures +## Features In-design features: @@ -66,7 +66,7 @@ For REST client: libtaos = { version = "*", features = ["rest"] } ``` -There's a [demo app]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) in examples directory, looks like this: +There's a [demo app](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs) in examples directory, looks like this: ```rust // ... diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 806bebd77738bd4251607237e3f88c589baa4741..b3b4dedabbbc55f554541710c4e0d8abd8e5c892 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -154,7 +154,7 @@ Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and yo | **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** | | -------------------- | ---------------------------- | ------- | ------- | --------- | ------------------ | | **OS Type** | Linux | Win64 | Win32 | Linux | Linux | -| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** | +| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **Yes** | The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): @@ -200,6 +200,8 @@ Create a database connection and initialize the connection context. The paramete * port: Port number A null return value indicates a failure. The application needs to save the returned parameters for subsequent API calls. +Note: The same process can connect to multiple taosd processes based on ip/port + - `char *taos_get_server_info(TAOS *taos)` @@ -841,37 +843,60 @@ Only some configuration parameters related to RESTful interface are listed below ## CSharp Connector -The C # connector supports: Linux 64/Windows x64/Windows x86. + +* The C # connector supports: Linux 64/Windows x64/Windows x86. +* C# connector can be download and include as normal table form [Nuget.org](https://www.nuget.org/packages/TDengine.Connector/). +* On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. ### Installation preparation -- For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). -- . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. -- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. +* For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). +* .NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. +* Install [.NET SDK](https://dotnet.microsoft.com/download) -### Installation verification +### Example Source Code +you can find sample code under follow directions: +* {client_install_directory}/examples/C# +* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523) -Run install_directory/examples/C#/C#Checker/C#Checker.exe +**Tips:** TDengineTest.cs One of C# connector's sample code that include basic examples like connection,sql executions and so on. + +### Installation verification +Run {client_install_directory}/examples/C#/C#Checker/C#Checker.cs +Need install .Net SDK first ```cmd -cd {install_directory}/examples/C#/C#Checker -csc /optimize *.cs -C#Checker.exe -h +cd {client_install_directory}/examples/C#/C#Checker +//run c#checker.cs +dotnet run -- -h //dotnet run will build project first by default. ``` ### How to use C# connector - On Windows system, .NET applications can use the .NET interface of TDengine to perform all database operations. The steps to use it are as follows: -1. Add the. NET interface file TDengineDrivercs.cs to the .NET project where the application is located. -2. Users can refer to TDengineTest.cs to define database connection parameters and how to perform data insert, query and other operations; +need to install .NET SDK first +* create a c# project. +``` cmd +mkdir test +cd test +dotnet new console +``` +* add TDengineDriver as an package through Nuget -This. NET interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows client install_directory/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query. +``` cmd +dotnet add package TDengine.Connector +``` +* include the TDnengineDriver in you application's namespace +```C# +using TDengineDriver; +``` +* user can reference from[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest) and learn how to define database connection,query,insert and other basic data manipulations. **Note:** -1. TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project". -2. This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified. +* TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project". +* This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified. +* Since this. NET connector interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows {client_install_directory}/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query(This step can be skip if the client has been installed on you machine). ### Third-party Driver @@ -1022,7 +1047,7 @@ Steps: ### How to use Node.js -The following are some basic uses of node.js connector. Please refer to [TDengine Node.js connector](http://docs.taosdata.com/node) for details. +The following are some basic uses of node.js connector. Please refer to [TDengine Node.js connector](https://github.com/taosdata/TDengine/tree/develop/src/connector/nodejs)for details. ### Create connection diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index a2c2486b8e96cab95fad0f90470726d508dd63f7..11dd3e482d5e68bb642a94c533f23d390edf61f3 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -400,11 +400,56 @@ KILL STREAM ; Force to turn off the stream computing, in which stream-id is the connection-id: stream-no string displayed in SHOW STREAMS, such as 103: 2, copy and paste it. -## System Monitoring +## System Monitoring After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB. -The collection of these monitoring metrics is turned on by default, but you can modify option enableMonitor in the configuration file to turn it off or on. +The collection of these monitoring metrics is turned on by default, but you can modify option monitor in the configuration file to turn it off or on. + +### TDinsight - Monitor TDengine with Grafana + Data Source + +Starting from v2.3.3.0, TDengine's log database provides more metrics for resources and status monitoring. Here we introduce a zero-dependency monitoring solution - we call it TDinsight - with Grafana. You can find the documentation from [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md). + +We provide an automation shell script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.sh) as a shortcut to help setup TDinsight on the Grafana server. + +First, download `TDinsight.sh` from GitHub: + +```bash +wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh +chmod +x TDinsight.sh +``` + +Some CLI options are needed to use the script: + +1. TDengine server informations: + + - TDengine RESTful endpoint, like `http://localhost:6041`, will be used with option `-a`. + - TDengine user `-u` (`root` by default), and password with `-p` (`taosdata` by default). + +2. Grafana alerting notifications. There's two ways to setup this: + 1. To use existing Grafana notification channel with `uid`, option `-E`. The `uid` could be retrieved with `curl -u admin:admin localhost:3000/api/alert-notifications |'.[]| .uid + "," + .name' -r`, then use it like this: + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E + ``` + + 2. Use TDengine data source plugin's builtin [Aliyun SMS](https://www.aliyun.com/product/sms) alerting support with `-s` flag, and input these options: + 1. Access key id with option `-I` + 2. Access key secret with option `K` + 3. Access key sign name with option `-S` + 4. Message template code with option `-C` + 5. Message template params in JSON format with option `-T`, eg. `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`. + 6. `,`-separated phone numbers list with option `-B` + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ + -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ + -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' + ``` + +Follow the usage of the script and then restart grafana-server service, here we go . + +Refer to [TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) README for more scenario and limitations of the script, and the metrics descriptions for all of the TDinsight. ## File Directory Structure diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 7f9754e80fcf97962177d2690c233cae23f8d491..8533f92a3b59e27df61c16a2bc86961775bf84da 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -228,7 +228,7 @@ Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That ```mysql CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); ``` - Similiar to a standard table creation SQL, but you need to specify name and type of TAGS field. + Similar to a standard table creation SQL, but you need to specify name and type of TAGS field. Note: @@ -673,7 +673,7 @@ Query OK, 1 row(s) in set (0.001091s) SELECT * FROM tb1 WHERE ts >= NOW - 1h; ``` -- Look up table tb1 from 2018-06-01 08:00:00. 000 to 2018-06-02 08:00:00. 000, and col3 string is a record ending in'nny ', and the result is in descending order of timestamp: +- Look up table tb1 from 2018-06-01 08:00:00. 000 to 2018-06-02 08:00:00. 000, and col3 string is a record ending in 'nny ', and the result is in descending order of timestamp: ```mysql SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC; @@ -782,7 +782,7 @@ TDengine supports aggregations over data, they are listed below: Function: return the sum of a statistics/STable. - Return Data Type: long integer INMT64 and Double. + Return Data Type: INT64 and Double. Applicable Fields: All types except timestamp, binary, nchar, bool. @@ -1196,7 +1196,7 @@ SELECT function_list FROM stb_name - FILL statement specifies a filling mode when data missed in a certain interval. Applicable filling modes include the following: - 1. Do not fill: NONE (default filingl mode). + 1. Do not fill: NONE (default filing mode). 2. VALUE filling: Fixed value filling, where the filled value needs to be specified. For example: fill (VALUE, 1.23). 3. NULL filling: Fill the data with NULL. For example: fill (NULL). 4. PREV filling: Filling data with the previous non-NULL value. For example: fill (PREV). diff --git a/documentation20/en/14.devops/01.telegraf/docs.md b/documentation20/en/14.devops/01.telegraf/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..a8b5db08ccc1131611c12fb53970115a89368376 --- /dev/null +++ b/documentation20/en/14.devops/01.telegraf/docs.md @@ -0,0 +1,75 @@ +# Rapidly build an IT DevOps system with TDengine + Telegraf + Grafana + +## Background +TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations. + +There are a lot of time-series data in the IT DevOps scenario, for example: +- Metrics of system resource: CPU, memory, IO and network status, etc. +- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business. + +A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance. + +Here we introduce a way to build an IT DevOps system with TDengine, Telegraf, and Grafana. Even no need one line program code but just modify a few lines of configuration files. + +![IT-DevOps-Solutions-Telegraf.png](../../images/IT-DevOps-Solutions-Telegraf.png) + + +## Installation steps + +### Install Telegraf,Grafana and TDengine +Please refer to each component's official document for Telegraf, Grafana, and TDengine installation. + +### Telegraf +Please refer to the [official document](https://portal.influxdata.com/downloads/). + +### Grafana +Please refer to the [official document](https://grafana.com/grafana/download). + +### TDengine +Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/en/all-downloads/). + + +## Setup data chain +### Download TDengine plugin to Grafana plugin's directory + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service +``` + +### Modify /etc/telegraf/telegraf.conf +Please add few lines in /etc/telegraf/telegraf.conf as below. Please fill database name for what you desire to save Telegraf's data in TDengine. Please specify the correct value for the hostname of the TDengine server/cluster, username, and password: +``` +[[outputs.http]] + url = "http://:6041/influxdb/v1/write?db=" + method = "POST" + timeout = "5s" + username = "" + password = "" + data_format = "influx" + influx_max_line_bytes = 250 +``` + +Then restart telegraf: +``` +sudo systemctl start telegraf +``` + + +### Import dashboard + +Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。 + +Click the 'gear' icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin. + +Click the 'plus' icon from the left bar to select 'Import'. You can download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json then import it to the Grafana. After that, you should see the interface like: + +![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) + + +## Summary + +We demonstrated how to build a full-function IT DevOps system with TDengine, Telegraf, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in a few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website. diff --git a/documentation20/en/14.devops/02.collectd/docs.md b/documentation20/en/14.devops/02.collectd/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..15a83d7f0c78f9e36122d4c7a0c125daddfa1c6a --- /dev/null +++ b/documentation20/en/14.devops/02.collectd/docs.md @@ -0,0 +1,84 @@ +# Rapidly build a IT DevOps system with TDengine + collectd/StatsD + Grafana + +## Background +TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations. + +There are a lot of time-series data in the IT DevOps scenario, for example: +- Metrics of system resource: CPU, memory, IO and network status, etc. +- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business. + +A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance. + +Here we introduce a way to build an IT DevOps system with TDengine, collectd/statsD, and Grafana. Even no need one line program code but just modify a few lines of configuration files. + +![IT-DevOps-Solutions-Collectd-StatsD.png](../../images/IT-DevOps-Solutions-Collectd-StatsD.png) + +## Installation steps +Please refer to each component's official document for collectd, StatsD, Grafana, and TDengine installation. + +### collectd +Please refer to the [official document](https://collectd.org/documentation.shtml). + +### StatsD +Please refer to the [official document](https://github.com/statsd/statsd). + +### Grafana +Please refer to the [official document](https://grafana.com/grafana/download). + +### TDengine +Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/cn/all-downloads/). + +## Setup data chain +### Download TDengine plugin to Grafana plugin's directory + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service +``` + +### To configure collectd +Please add a few lines in /etc/collectd/collectd.conf as below. Please specify the correct value for hostname and the port number: +``` +LoadPlugin network + + Server "" "" + + +sudo systemctl start collectd +``` + +### To configure StatsD +Please add a few lines in the config.js file then restart StatsD. Please use the correct hostname and port number of TDengine and taosAdapter: +``` +fill backends section with "./backends/repeater" +fill repeater section with { host:'', port: } +``` + +### Import dashboard + +Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。 + +Click the gear icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin. + +#### Import collectd dashboard + +Please download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json. + +Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like: + +![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png) + +#### Import StatsD dashboard + +Please download dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json. + +Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like: + +![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png) + +## Summary + +We demonstrated how to build a full-function IT DevOps system with TDengine, collectd, StatsD, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website. diff --git a/documentation20/en/14.devops/03.immigrate/docs.md b/documentation20/en/14.devops/03.immigrate/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..3a00649d30e0db76ba971b5cc0771fd71e5e9920 --- /dev/null +++ b/documentation20/en/14.devops/03.immigrate/docs.md @@ -0,0 +1,436 @@ +# Best practice of immigration from OpenTSDB to TDengine + +As a distributed, scalable, HBase-based distributed temporal database system, OpenTSDB has been introduced and widely used in the field of operation and monitoring by people in DevOps due to its first-mover advantage. However, in recent years, with the rapid development of new technologies such as cloud computing, microservices, and containerization, enterprise-level services have become more and more diverse, and the architecture has become more and more complex, and the application operation infrastructure environment has become more and more diverse, which brings more and more pressure on system and operation monitoring. From this status quo, the use of OpenTSDB as the monitoring backend storage for DevOps is increasingly plagued by performance issues and slow feature upgrades, as well as the resulting increase in application deployment costs and reduced operational efficiency, which are becoming more and more serious as the system scales up. + +In this context, to meet the fast-growing IoT big data market and technical demands, TOS Data has developed an innovative big data processing product TDengine independently after learning the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, etc. TDengine has its unique advantages in time-series big data processing. TDengine can effectively solve the problems currently encountered by OpenTSDB. + +Compared with OpenTSDB, TDengine has the following distinctive features. + +- Performance of data writing and querying far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk. +- The installation and deployment is very simple, a single installation package to complete the installation and deployment, no other third-party software, the entire installation and deployment process in seconds; +- The built-in functions cover all the query functions supported by OpenTSDB, and also support more time-series data query functions, scalar functions and aggregation functions, and support advanced query functions such as multiple time-window aggregation, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules, it is easier to learn and basically has no learning cost. +- Supports up to 128 tags with a total tag length of up to 16 KB. +- In addition to HTTP, it also provides interfaces to Java, Python, C, Rust, Go, and other languages, and supports a variety of enterprise-class standard connector protocols such as JDBC. + +If we migrate applications originally running on OpenTSDB to TDengine, we can not only effectively reduce the consumption of computing and storage resources and the scale of deployed servers, but also greatly reduce the output of operation and maintenance costs, making operation and maintenance management simpler and easier, and significantly reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open sourced, but the difference is that in addition to the stand-alone version, the latter has also achieved the open source of the cluster version, and the concern of being bound by the vendor has been swept away. + +In the following section we will explain how to migrate OpenTSDB applications to TDengine quickly, securely and reliably without coding, using the most typical and widely used DevOps scenarios. Subsequent chapters will provide more in-depth coverage to facilitate migration for non-DevOps scenarios. + +## Rapid migration of DevOps applications + +### 1. Typical Application Scenarios + +The overall system architecture of a typical DevOps application scenario is shown in the figure below (Figure 1). + +![IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg) +
    Figure 1. Typical architecture in a DevOps scenario
    + +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics, data collectors to aggregate information collected by agents, systems for data persistence storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). + +Among them, Agents deployed in application nodes are responsible for providing operational metrics from different sources to collectd/Statsd, and collectd/StatsD is responsible for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization board of Grafana. + +### 2. Migration Service + +- **TDengine installation and deployment** + +First of all, TDengine should be installed. Download the latest stable version of TDengine from the official website, unzip it and run install.sh to install it. For help on using various installation packages, please refer to the blog ["Installation and uninstallation of various TDengine installation packages"](https://www.taosdata.com/blog/2019/08/09/566.html). + +Note that after the installation, do not start the taosd service immediately, but start it after the parameters are correctly configured. + +- **Adjusting the data collector configuration** + +In TDengine version 2.3, an HTTP service taosAdapter is automatically enabled after the backend service taosd is started. The taosAdapter is compatible with Influxdb's Line Protocol and OpenTSDB's telnet/JSON write protocol, allowing data collected by collectd and StatsD to be pushed directly to TDengine. + +If you use collectd, modify the configuration file in its default location /etc/collectd/collectd.conf to point to the IP address and port of the node where taosAdapter is deployed. Assuming the taosAdapter IP address is 192.168.1.130 and the port is 6046, configure it as follows + +```html +LoadPlugin write_tsdb + + + Host "192.168.1.130" + Port "6046" + HostTags "status=production" + StoreRates false + AlwaysAppendDS false + +``` + +This allows collectd to push the data to taosAdapter using the push to OpenTSDB plugin. taosAdapter will call the API to write the data to taosd, thus completing the writing of the data. If you are using StatsD adjust the profile information accordingly. + +- **Adjusting the Dashboard system** + +After the data has been written to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. There is a connection plugin for Grafana in the TDengine installation directory connector/grafanaplugin. The way to use this plugin is simple. + +First copy the entire dist directory under the grafanaplugin directory to Grafana's plugins directory (the default address is /var/lib/grafana/plugins/), and then restart Grafana to see the TDengine data source under the Add Data Source menu. + +```shell +sudo cp -r . /var/lib/grafana/plugins/tdengine +sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini + +# start grafana service +sudo service grafana-server restart +# or with systemd +sudo systemctl start grafana-server +``` + + + +In addition, TDengine provides two default Dashboard templates for users to quickly view the information saved to the TDengine repository. You can simply import the templates from the Grafana directory into Grafana to activate their use. + +![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg) + +
    Figure 2. Importing Grafana Templates
    + +After the above steps, you have completed the migration of OpenTSDB to TDengine. You can see that the whole process is very simple, no code needs to be written, and only some configuration files need to be adjusted to complete the migration work. + +### 3. Post-migration architecture + +After the migration is completed, the overall architecture of the system at this time is shown in the figure below (Figure 3), and the acquisition side, data writing side, and monitoring presentation side all remain stable during the whole process, which does not involve any important changes or alterations except for very few configuration adjustments. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance. + +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 nodes or less) as the storage side of DevOps, relying on OpenTSDB to provide data storage and query functions for the system persistence layer, then you can safely replace it with TDengine and save more compute and storage resources. With the same configuration of computing resources, a single TDengine can meet the service capacity provided by 3~5 OpenTSDB nodes. If the scale is relatively large, then a TDengine cluster is required. + +If your application is particularly complex, or the application domain is not a DevOps scenario, you can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating OpenTSDB applications to TDengine. + +![IT-DevOps-Solutions-Immigrate-TDengine-Arch](../../images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg) + +
    Figure 3. System architecture after the migration is complete
    + +## Migration evaluation and strategy for other scenarios + +### 1. Differences between TDengine and OpenTSDB + +This chapter describes in detail the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can thoroughly evaluate whether you can migrate certain complex OpenTSDB-based applications to TDengine, and what you should pay attention to after the migration. + +TDengine currently only supports Grafana visual kanban rendering, so if your application uses a front-end kanban other than Grafana (e.g. [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.), then the front-end kanban cannot be migrated directly to TDengine and will need to be re-adapted to Grafana before it can function properly. + +As of version 2.3.0.x, TDengine can only support collectd and StatsD as data collection aggregation software, but more data collection aggregation software will be provided in the future. If you use other types of data aggregators on the collection side, your application needs to be adapted to these two data aggregation systems to be able to write data properly. In addition to the two data aggregation end software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's row protocol and OpenTSDB's data writing protocol, JSON format, and you can rewrite the logic on the data push side to write data using the row protocols supported by TDengine. + +In addition, if you use the following features of OpenTSDB in your application, you need to understand the following considerations before migrating your application to TDengine. + +1. `/api/stats`: TDengine provides a new mechanism for handling cluster state monitoring to meet your application's monitoring and maintenance needs of your application. +2. `/api/tree`: TDengine uses a hierarchy of database -> supertable -> sub-table to organize and maintain timelines, with all timelines belonging to the same supertable at the same level in the system. However, it is possible to simulate a logical multi-level structure of the application through the special construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and in some scenarios to access the original results, the opaqueness of this structure makes the application processing logic extremely complex and completely non-portable. TDengine does not support automatic downsampling of multiple timelines and preaggregates (for a range of periods) for the time being, but due to its high-performance query processing logic, it can provide high performance even without relying on Rollup and preaggregates. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely Derivative (whose calculation results are consistent with InfluxDB's Derivative behavior) and IRate (whose calculation results are consistent with the IRate function in Prometheus). However, the results of these two functions differ slightly from Rate, but are more powerful overall. In addition,** all the calculation functions provided by OpenTSDB are supported by TDengine with corresponding query functions, and the query functions of TDengine far exceed the query functions supported by OpenTSDB,** which can greatly simplify your application processing logic. + +Through the above introduction, I believe you should be able to understand the changes brought by the migration of OpenTSDB to TDengine, and this information will also help you correctly judge whether it is acceptable to migrate your application to TDengine, and experience the powerful timing data processing capability and convenient user experience provided by TDengine. + +### 2. Migration strategy + +First of all, the OpenTSDB-based system will be migrated involving data schema design, system scale estimation, data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a period of time, and then the historical data will be migrated to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features, and at the same time, You can consider keeping the original OpenTSDB system running while starting TDengine to provide the main services. + +## Data model design + +On the one hand, TDengine requires a strict schema definition for its incoming data. On the other hand, the data model of TDengine is richer than that of OpenTSDB, and the multi-valued model is compatible with all single-valued model building requirements. + +Now let's assume a DevOps scenario where we use collectd to collect base metrics of devices, including memory, swap, disk, etc. The schema in OpenTSDB is as follows: + +| No. | metric | value | type | tag1 | tag2 | tag3 | tag4 | tag5 | +| --- | ------ | ----- | ------ | ---- | ----------- | -------------------- | --------- | ------ | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | +| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | + + + +TDengine requires data stored to have a data schema, i.e., you need to create a supertable and specify the schema of the supertable before writing the data. For data schema creation, you have two ways to do this: 1) Take full advantage of TDengine's native data writing support for OpenTSDB by calling the API provided by TDengine to write the data (in text line or JSON format) to the super table and automate the creation of the single-value model. And automate the creation of single-value models. This approach does not require major adjustments to the data writing application, nor does it require conversion of the written data format. + +At the C level, TDengine provides taos_insert_lines to write data in OpenTSDB format directly (in version 2.3.x this function corresponds to taos_schemaless_insert). For the code reference example, please refer to the sample code schemaless.c in the installation package directory. + + (2) Based on the full understanding of TDengine's data model, establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually, taking into account that OpenTSDB is a single-value mapping model, it is recommended to use the single-value model in TDengine. TDengine supports both multi-value and single-value models. + +- **Single-valued model**. + +The steps are as follows: the name of the metrics is used as the name of the TDengine super table, which is built with two basic data columns - timestamp and value, and the labels of the super table are equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The sub-tables are named using a fixed rule row naming: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ... ` as sub-table names. + +Create 3 super tables in TDengine. + +```sql +create stable memory(ts timestamp, val float) tags(host binary(12),memory_type binary(20), memory_type_instance binary(20), source binary(20)); +create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); +create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); +``` + + + +For sub-tables use dynamic table creation as shown below: + +```sql +insert into memory_vm130_memory_bufferred_collectd using memory tags(‘vm130’, ‘memory’, 'buffer', 'collectd') values(1632979445, 3.0656); +``` + +Eventually about 340 sub-tables and 3 super-tables will be created in the system. Note that if the use of concatenated tagged values causes the sub-table names to exceed the system limit (191 bytes), then some encoding (e.g. MD5) needs to be used to convert them to an acceptable length. + +- **Multi-value model** + +If you want to take advantage of TDengine's multi-value modeling capabilities, you need to first meet the requirements that different collection quantities have the same collection frequency and can reach the **data writing side simultaneously via a message queue**, thus ensuring that multiple metrics are written at once using SQL statements. The name of the metric is used as the name of the super table to create a multi-column model of data with the same collection frequency and capable of arriving at the same. The data can be collected with the same frequency and arrive in multiple columns. The names of the sub-tables are named using a fixed rule. Each metric above contains only one measurement value, so it cannot be transformed into a multi-value model. + + + +## Data triage and application adaptation + +Data is subscribed from the message queue and an adapted writer is started to write the data. + +After the data starts to be written for a sustained period, SQL statements can be used to check whether the amount of data written meets the expected write requirements. The following SQL statement is used to count the amount of data. + +```sql +select count(*) from memory +``` + +After completing the query, if the written data does not differ from the expected one, and there are no abnormal error messages from the writing program itself, then you can confirm that the data writing is complete and valid. + +TDengine does not support query or data fetch processing using OpenTSDB query syntax, but it does provide support for each type of OpenTSDB query. You can check Annex 2 for the corresponding query processing adjustments and application usage, or refer to the TDengine user manual for a full understanding of the types of queries supported by TDengine. + +TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. See also the user manual for the specific operation and usage help. + +## Historical data migration + +### 1. Use the tool to migrate data automatically + +To facilitate the migration of historical data, we provide a plug-in for the data synchronization tool DataX, which can automatically write data to TDengine, it should be noted that DataX's automated data migration can only support the data migration process of single-value models. + +DataX Please refer to its help manual [github.com/taosdata/datax](http://github.com/taosdata/datax) for details on how to use DataX and how to use it to write data to TDengine. + +### 2. Migrate data manually + +If you need to use a multi-value model for data writing, you need to develop your tool to export data from OpenTSDB, then confirm which timelines can be merged and imported into the same timeline, and then write the time that can be imported at the same time to the database by SQL statement. + +The manual migration of data requires attention to two issues. + +1) When storing the exported data on the disk, the disk needs to have enough storage space to be able to adequately accommodate the exported data files. To avoid straining the disk file storage after exporting the full amount of data, a partial import mode can be adopted, with the timelines belonging to the same super table being exported first, and then the exported part of the data files are imported into the TDengine system + +(2) Under the full-load operation of the system, if there are enough remaining computing and IO resources, a multi-threaded import mechanism can be established to maximize the efficiency of data migration. Considering the huge load on the CPU brought by data parsing, the maximum number of parallel tasks needs to be controlled to avoid the overall system overload triggered by importing historical data. + +Due to the ease of operation of TDegnine itself, there is no need to perform index maintenance, data format change processing, etc. throughout the process, and the whole process only needs to be executed sequentially. + +Once the historical data is fully imported into TDengine, the two systems are running simultaneously, after which the query requests can be switched to TDengine, thus achieving a seamless application switchover. + +## Appendix 1: Correspondence table of OpenTSDB query functions + +**Avg** + +Equivalent function: avg + +Example. + +SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) + +Notes. + +1. the value within the Interval needs to be the same as the interval value of the outer query. +As the interpolation of values in OpenTSDB uses linear interpolation, use fill(linear) to declare the interpolation type in the interpolation clause. The following functions with the same interpolation requirements are handled by this method. 3. +3. The 20s parameter in Interval means that the inner query will generate results in a 20-second window. In a real query, it needs to be adjusted to the time interval between different records. This ensures that the interpolation results are generated equivalently to the original data. +Due to the special interpolation strategy and mechanism of OpenTSDB, the way of interpolation before computation in Aggregate query makes it impossible for the computation result to be the same as TDengine. However, in the case of Downsample, TDengine, and OpenTSDB can obtain the same result (because OpenTSDB uses a completely different interpolation strategy for Aggregate and Downsample queries). +(since OpenTSDB uses a completely different interpolation strategy for aggregated and downsampled queries).[]() + + +**Count** + +Equivalent function: count + +Example. + +select count(*) from super_table_name; + + + +**Dev** + +Equivalent function: stddev + +Example. + +Select stddev(val) from table_name + + + +**Estimated percentiles** + +Equivalent function: apercentile + +Example. + +Select apercentile(col1, 50, “t-digest”) from table_name + +Remark. + +1. t-digest algorithm is used by default in OpenTSDB during approximate query processing, so to get the same calculation result, you need to specify the algorithm used in the apercentile function. tDengine can support two different approximate processing algorithms, which are declared by "default " and "t-digest" to declare. + + + +**First** + +Equivalent function: first + +Example. + +Select first(col1) from table_name + + + +**Last** + +Equivalent function: last + +Example. + +Select last(col1) from table_name + + + +**Max** + +Equivalent function: max + +Example. + +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) + +Note: The Max function requires interpolation, for the reasons given above. + + + +**Min** + +Equivalent function: min + +Example. + +Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); + + + +**MinMax** + +Equivalent function: max + +Select max(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**MimMin** + +Equivalent function: min + +Select min(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**Percentile** + +Equivalent function: percentile + +备注: + + + +**Sum** + +Equivalent function: sum + +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**Zimsum** + +Equivalent function: sum + +Select sum(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +完整示例: + +```json +//OpenTSDB query JSON +query = { +"start":1510560000, +"end": 1515000009, +"queries":[{ +"aggregator": "count", +"metric":"cpu.usage_user", +}] +} + +// Equivalent SQL: +SELECT count(*) +FROM `cpu.usage_user` +WHERE ts>=1510560000 AND ts<=1515000009 +``` + + + +## Appendix 2: Resource Estimation Methodology + +### Data generation environment + +We still use the hypothetical environment from Chapter 4 with 3 measurements. The data writing rate for temperature and humidity is one record every 5 seconds, with a timeline of 100,000. Air quality is written at a rate of one record every 10 seconds, with a timeline of 10,000, and a query request frequency of 500 QPS. + +### Storage resource estimation + +Assuming that the number of sensor devices that generate data and require storage is `n`, the frequency of data generation is ` t` records/second, and the length of each record is `L` bytes, the size of data generated per day is `n×t×L` bytes. assuming a compression ratio of C, the size of data generated per day is `(n×t×L)/C` bytes. storage resources are estimated to be able to accommodate 1.5 years The storage resources are estimated to be able to accommodate 1.5 years of data size. Under the production environment, the compression ratio C of TDengine is generally between 5 and 7, while adding 20% redundancy to the final result, we can calculate the required storage resources. + +```matlab +(n×t×L)×(365×1.5)×(1+20%)/C +``` + +Combining the above formula and bringing the parameters into the calculation formula, the size of the raw data generated per year without considering tagging information is 11.8 TB. It should be noted that since tagging information is associated with each timeline in TDengine, it is not per record. So the size of the data volume to be recorded is somewhat reduced relative to the data generated, and this part of the tag data as a whole can be neglected. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. + +### Storage device selection considerations + +The hard disk should be used with a better random read performance hard disk device, if you can have SSD, consider using SSD as much as possible. better random read performance of the disk is extremely helpful to improve the system query performance and can improve the overall query response performance of the system. To obtain better query performance, the performance index of single-threaded random read IOPS of the hard disk device should not be lower than 1000, it is better to reach 5000 IOPS or more. To obtain an evaluation of the current device random read IO performance, it is recommended that fio software be used to evaluate its operational performance (see Appendix 1 for details on how to use it) to confirm whether it can meet the large file random read performance requirements. + +Hard disk write performance has little impact on TDengine; TDengine writes in append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs, in general, can meet TDengine's requirements for disk write performance well. + +### Computational resource estimation + +Due to the specificity of IoT data, after the frequency of data generation is fixed, the process of TDengine writing maintains a relatively fixed amount of resource consumption (both computation and storage). As described in [TDengine Operation and Maintenance](https://www.taosdata.com/cn/documentation/administrator), 22,000 writes per second in this system consumes less than 1 CPU core. + +In terms of estimating the CPU resources required for queries, assuming that the application requires 10,000 QPS from the database and each query consumes about 1 ms of CPU time, then each core provides 1,000 QPS of queries per second, and at least 10 cores are required to satisfy 10,000 QPS of query requests. To make the overall CPU load of the system less than 50%, the whole cluster needs 10 cores twice as many, i.e., 20 cores. + +### Memory resource estimation + +The database allocates memory for each Vnode by default 16MB*3 buffers, the cluster system includes 22 CPU cores, then 22 virtual node Vnodes will be established by default, each Vnode contains 1000 tables, then it can accommodate all the tables. Then it takes about 1 and a half hours to write a full block, thus triggering a dropped disk, which can be unadjusted. 22 Vnodes require a total memory cache of about 1GB. considering the memory required for queries, assuming a memory overhead of about 50MB per query, then 500 queries concurrently require about 25GB of memory. + +In summary, a single 16-core 32GB machine can be used, or a cluster of two 8-core 16GB machines can be used. + +## Appendix 3: Cluster Deployment and Startup + +TDengine provides a wealth of help documentation on many aspects of cluster installation and deployment, here is an index of responsive documentation for your reference. + +### Cluster Deployment + +The first step is to install TDengine. Download the latest stable version of TDengine from the official website, unzip it and run install.sh to install it. Please refer to the blog ["Installing and uninstalling TDengine packages"](https://www.taosdata.com/blog/2019/08/09/566.html) for help on using the various installation packages. + +Be careful not to start the taosd service immediately after the installation is complete, but only after the parameters are properly configured. + +### Set the running parameters and start the service + +To ensure that the system can get the necessary information to run properly. Please set the following key parameters correctly on the server-side. + +FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. The specific meaning of each parameter and the requirements for setting them can be found in the documentation "TDengine Cluster Installation, Management" (https://www.taosdata.com/cn/ documentation/cluster)". + +Follow the same steps to set the parameters on the node that needs to run and start the taosd service, then add the Dnode to the cluster. + +Finally, start taos and execute the command show dnodes, if you can see all the nodes that have joined the cluster, then the cluster is successfully built. For the specific operation procedure and notes, please refer to the document "[TDengine Cluster Installation, Management](https://www.taosdata.com/cn/documentation/cluster)". + +## Appendix 4: Super table names + +Since the metric name of OpenTSDB has a dot (". "However, the dot has a special meaning in TDengine, as a separator between database and table names. TDengine also provides escapes to allow users to use keywords or special separators (e.g., dot) in (super) table names. To use special characters, the table name needs to be enclosed in escape characters, e.g. `cpu.usage_user` would be a legal (super) table name. + +## Appendix 5: Reference Articles + +1. [Quickly build an IT Ops monitoring system using TDengine + collectd/StatsD + Grafana](https://www.taosdata.com/cn/documentation20/devops/collectd)(Chinese)_ +2. [Writing collection data directly to TDengine via collectd](https://www.taosdata.com/cn/documentation20/insert#collectd) (Chinese) + diff --git a/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png b/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png new file mode 100644 index 0000000000000000000000000000000000000000..b34aec45bdbe30bebbce532d6150c40f80399c25 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3de5fb7a10a1cb22693468029bc26ad63a96d71 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be3704cb72d6c2614614852bfef17147ce49d061 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd406a140beea43fbfe2c417c85b872cfd6a2219 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png b/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png new file mode 100644 index 0000000000000000000000000000000000000000..e1334bb937febd395eca0b0c44c8a2f315910606 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..17d0fd31b9424b071783696668d5706b90274867 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..f122cbc5dc0bb5b7faccdbc7c4c8bcca59b6c9ed Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..d695a3af30154d2fc2217996f3ff4878abab097c Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png differ diff --git a/importSampleData/README.md b/importSampleData/README.md index 56c5be0da422aadc5e05fe000ab83c312d29b6c8..c945cf52cb82723681f37efc42d3325f89011d39 100644 --- a/importSampleData/README.md +++ b/importSampleData/README.md @@ -1,36 +1,28 @@ -## 样例数据导入 +# 样例数据导入 该工具可以根据用户提供的 `json` 或 `csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。 为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。 -## 下载安装 +## 编译安装 -### 下载可执行文件 +由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2]。执行以下命令即可编译成可执行文件 `bin/taosimport`。 -由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`。 - -### go 源码编译 - -由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`。 ```shell -go get https://github.com/taosdata/TDengine/importSampleData -cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData +go mod tidy go build -o bin/taosimport app/main.go ``` -> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。 -> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`。 - ## 使用 ### 快速体验 执行命令 `bin/taosimport` 会根据默认配置执行以下操作: + 1. 创建数据库 - 自动创建名称为 `test_yyyyMMdd` 的数据库。 - + 自动创建名称为 `test_yyyyMMdd` 的数据库,`yyyyMMdd` 是当前日期,如`20211111`。 + 2. 创建超级表 根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。 @@ -48,21 +40,25 @@ go build -o bin/taosimport app/main.go taos> use test_yyyyMMdd; taos> select count(*) from s_sensor_info; ``` + * 查询各个分组的记录数 ```shell taos> select count(*) from s_sensor_info group by devgroup; ``` + * 按 1h 间隔查询各聚合指标 ```shell taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h); ``` + * 查询指定位置最新上传指标 ```shell taos> select last(*) from s_sensor_info where location = 'beijing'; ``` + > 更多查询及函数使用请参考 [数据查询][4] ### 详细使用说明 @@ -70,23 +66,23 @@ go build -o bin/taosimport app/main.go 执行命令 `bin/taosimport -h` 可以查看详细参数使用说明: * -cfg string - + 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`。 - + * -cases string 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`。 - + * -hnum int 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。 - + * -vnum int 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次。 * -delay int - + 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。 * -tick int @@ -102,25 +98,25 @@ go build -o bin/taosimport app/main.go 当 save 为 1 时保存统计信息的表名, 默认 statistic。 * -auto int - + 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。 - + * -start string 导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空。 - + * -interval int 导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。 * -thread int - + 执行导入数据的线程数目,默认为 10。 * -batch int - + 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录。 - + * -host string 导入的 TDengine 服务器 IP,默认为 127.0.0.1。 @@ -138,7 +134,7 @@ go build -o bin/taosimport app/main.go 导入的 TDengine 用户密码,默认为 taosdata。 * -dropdb int - + 导入数据之前是否删除数据库,1 是,0 否, 默认 0。 * -db string @@ -160,7 +156,7 @@ go build -o bin/taosimport app/main.go 执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。 ### config/cfg.toml 配置文件说明 - + ``` toml # 传感器场景 [sensor_info] # 场景名称 @@ -237,8 +233,6 @@ devid,location,color,devgroup,ts,temperature,humidity 0, beijing, white, 0, 1575129601000, 22, 14.377142 ``` - - [1]: https://github.com/taosdata/TDengine [2]: https://golang.org/doc/install [3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector diff --git a/importSampleData/go.mod b/importSampleData/go.mod index fa1d978e597b3eb5b9f35e45f599d5a0f97ff267..d2e58d302b3c917922206cbfc3a7d5afef8266c9 100644 --- a/importSampleData/go.mod +++ b/importSampleData/go.mod @@ -3,6 +3,6 @@ module github.com/taosdata/TDengine/importSampleData go 1.13 require ( - github.com/pelletier/go-toml v1.9.0 // indirect - github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect + github.com/pelletier/go-toml v1.9.0 + github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 ) diff --git a/packaging/cfg/nginxd.service b/packaging/cfg/nginxd.service new file mode 100644 index 0000000000000000000000000000000000000000..50bbc1a21de5e6645404ec1d4e9bcd6f177f69d2 --- /dev/null +++ b/packaging/cfg/nginxd.service @@ -0,0 +1,22 @@ +[Unit] +Description=Nginx For TDengine Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=forking +PIDFile=/usr/local/nginxd/logs/nginx.pid +ExecStart=/usr/local/nginxd/sbin/nginx +ExecStop=/usr/local/nginxd/sbin/nginx -s stop +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service new file mode 100644 index 0000000000000000000000000000000000000000..452488b4e951e36c043c823e17cca5ab7dbfd21b --- /dev/null +++ b/packaging/cfg/taosd.service @@ -0,0 +1,21 @@ +[Unit] +Description=TDengine server service +After=network-online.target taosadapter.service +Wants=network-online.target taosadapter.service + +[Service] +Type=simple +ExecStart=/usr/bin/taosd +ExecStartPre=/usr/local/taos/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/tarbitratord.service b/packaging/cfg/tarbitratord.service new file mode 100644 index 0000000000000000000000000000000000000000..d60cb536b094fe6b6c472d55076dc4d1db669d68 --- /dev/null +++ b/packaging/cfg/tarbitratord.service @@ -0,0 +1,20 @@ +[Unit] +Description=TDengine arbitrator service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/tarbitrator +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/check_package.sh b/packaging/check_package.sh index 0870e8c8eccc1a745ae5b081e2726ed8d809cf2b..e625f90912825f30ba279ecf6dbe59ff7dade97f 100755 --- a/packaging/check_package.sh +++ b/packaging/check_package.sh @@ -142,11 +142,11 @@ function check_main_path() { function check_bin_path() { # check install bin dir and all sub dir - bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") + bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh") for i in "${bin_dir[@]}";do check_file ${sbin_dir} $i done - lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") + lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core") for i in "${lbin_dir[@]}";do check_link ${bin_link_dir}/$i done @@ -170,7 +170,7 @@ function check_lib_path() { function check_header_path() { # check all header - header_dir=("taos.h" "taoserror.h") + header_dir=("taos.h" "taosdef.h" "taoserror.h") for i in "${header_dir[@]}";do check_link ${inc_link_dir}/$i done diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 235834a747e82886eef6c4540877307aa4dd3996..b6979cf26435e4c4b0d19f5c93bb92cda988b3bf 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -27,7 +27,6 @@ else ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index f28d98ba9a6fae4390bfa301760aff9583ba4e40..f2d6dcde4b2eb8e7b5ff8eb06067a8426e1d3f91 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -58,8 +58,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then @@ -69,6 +67,7 @@ fi cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include +cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector @@ -78,16 +77,6 @@ cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/c install_user_local_path="/usr/local" -if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then - mkdir -p ${pkg_dir}${install_user_local_path}/lib - cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/ - ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23 - ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so -fi -if [ -f ${compile_dir}/build/lib/libavro.a ]; then - cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/ -fi - if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ @@ -128,13 +117,7 @@ chmod 755 ${pkg_dir}/DEBIAN/* debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control -if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then - sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control -fi - #get taos version, then set deb name - - if [ "$verMode" == "cluster" ]; then debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then @@ -153,13 +136,11 @@ else exit 1 fi - - # make deb package dpkg -b ${pkg_dir} $debname echo "make deb package success!" cp ${pkg_dir}/*.deb ${output_dir} -# clean tmep dir +# clean temp dir rm -rf ${pkg_dir} diff --git a/packaging/deb/taosd b/packaging/deb/taosd index 5002607da20b621ca69a8a2a25e713879d0308af..fe356ca6565c916086273e5669918b04065964cd 100644 --- a/packaging/deb/taosd +++ b/packaging/deb/taosd @@ -7,19 +7,19 @@ # chkconfig: 2345 99 01 # ### BEGIN INIT INFO -# Provides: TDEngine +# Provides: TDengine # Required-Start: $local_fs $network $syslog # Required-Stop: $local_fs $network $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 -# Short-Description: Starts TDEngine taosd -# Description: Starts TDEngine taosd, a time-series database engine +# Short-Description: Starts TDengine taosd +# Description: Starts TDengine taosd, a time-series database engine ### END INIT INFO set -e PATH="/bin:/usr/bin:/sbin:/usr/sbin" -NAME="TDEngine" +NAME="TDengine" USER="root" GROUP="root" DAEMON="/usr/local/taos/bin/taosd" @@ -40,7 +40,7 @@ MAX_OPEN_FILES=65535 case "$1" in start) - log_action_begin_msg "Starting TDEngine..." + log_action_begin_msg "Starting TDengine..." $DAEMON_HTTPD & if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then @@ -57,7 +57,7 @@ case "$1" in ;; stop) - log_action_begin_msg "Stopping TDEngine..." + log_action_begin_msg "Stopping TDengine..." pkill -9 $DAEMON_HTTPD_NAME set +e if [ -f "$PID_FILE" ]; then @@ -66,12 +66,12 @@ case "$1" in log_action_cont_msg "TSD is not running but pid file exists, cleaning up" elif [ $? -eq 3 ]; then PID="`cat $PID_FILE`" - log_failure_msg "Failed to stop TDEngine (pid $PID)" + log_failure_msg "Failed to stop TDengine (pid $PID)" exit 1 fi rm -f "$PID_FILE" else - log_action_cont_msg "TDEngine was not running" + log_action_cont_msg "TDengine was not running" fi log_action_end_msg 0 set -e diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index c49bc0a8a356c960e27f3231c3e901de6d9a72ef..a54e9ca499330855b22daf523286ea5bbc509bb8 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -18,5 +18,5 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ LC_ALL=en_US.UTF-8 EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 -CMD ["taosd"] -VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] \ No newline at end of file +CMD ["run_taosd.sh"] +VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] diff --git a/packaging/release.sh b/packaging/release.sh index c82d5704ac5c4d89837f5afe4b1f6e27419279cc..ca8715f68430fc08b86f008936ddfc9409ae58a8 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -3,7 +3,7 @@ # Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os set -e -#set -x +set -x # release.sh -v [cluster | edge] # -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] @@ -11,9 +11,10 @@ set -e # -V [stable | beta] # -l [full | lite] # -s [static | dynamic] -# -d [taos | power | tq ] +# -d [taos | power | tq | pro | kh | jh] # -n [2.0.0.3] # -m [2.0.0.0] +# -H [ false | true] # set parameters by default value verMode=edge # [cluster, edge] @@ -22,12 +23,13 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] pagMode=full # [full | lite] soMode=dynamic # [static | dynamic] -dbName=taos # [taos | power | tq | pro] +dbName=taos # [taos | power | tq | pro | kh | jh] allocator=glibc # [glibc | jemalloc] verNumber="" verNumberComp="1.0.0.0" +httpdBuild=false -while getopts "hv:V:c:o:l:s:d:a:n:m:" arg +while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg do case $arg in v) @@ -70,6 +72,10 @@ do #echo "osType=$OPTARG" osType=$(echo $OPTARG) ;; + H) + #echo "httpdBuild=$OPTARG" + httpdBuild=$(echo $OPTARG) + ;; h) echo "Usage: `basename $0` -v [cluster | edge] " echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " @@ -78,9 +84,10 @@ do echo " -l [full | lite] " echo " -a [glibc | jemalloc] " echo " -s [static | dynamic] " - echo " -d [taos | power | tq | pro] " + echo " -d [taos | power | tq | pro | kh | jh] " echo " -n [version number] " echo " -m [compatible version number] " + echo " -H [false | true] " exit 0 ;; ?) #unknow option @@ -90,7 +97,7 @@ do esac done -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}" +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}" curr_dir=$(pwd) @@ -192,22 +199,295 @@ else allocator_macro="" fi +# for powerdb +if [[ "$dbName" == "power" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/PowerDB/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/powerdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/power config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is power/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/power\/power\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/PowerDB/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/PowerDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for tq +if [[ "$dbName" == "tq" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/TQueue/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/tq\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/tqdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/tq_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/tq config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/tqdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/tqlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/tqinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is tqueue/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/tq\/tq\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/TQ/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by TQ, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"tq> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for prodb if [[ "$dbName" == "pro" ]]; then - sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/prodbc\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/prodemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/prodb_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/prodlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/prolog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/proinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is prodb/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/ProDB\/prodb\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by Hanatech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"ProDB> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for KingHistorian +if [[ "$dbName" == "kh" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/khclient\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/khdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/kh_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/kh config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + sed -i "s/support@taosdata.com/support@wellintech.com/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/khserverlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/khclientlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/khinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/dnode/CMakeLists.txt + sed -i "s/Default is taosdata/Default is khroot/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/kinghistorian\/kinghistorian\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"kh> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c fi -echo "build ${pagMode} package ..." -if [[ "$pagMode" == "lite" ]]; then +# for jinheng +if [[ "$dbName" == "jh" ]]; then + # Following files to change: + # * src/client/src/tscSystem.c + # * src/inc/taosdef.h + # * src/kit/shell/CMakeLists.txt + # * src/kit/shell/inc/shell.h + # * src/kit/shell/src/shellEngine.c + # * src/kit/shell/src/shellWindows.c + # * src/kit/taosdemo/taosdemo.c + # * src/kit/taosdump/taosdump.c + # * src/os/src/linux/linuxEnv.c + # * src/os/src/windows/wEnv.c + # * src/util/src/tconfig.c + # * src/util/src/tlog.c + + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeSystem.c + # src/dnode/src/dnodeMain.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeMain.c + # TODO: src/dnode/CMakeLists.txt +fi + +if [[ "$httpdBuild" == "true" ]]; then BUILD_HTTP=true +else + BUILD_HTTP=false +fi + +if [[ "$pagMode" == "full" ]]; then + BUILD_TOOLS=true +else + BUILD_TOOLS=false fi # check support cpu type if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then if [ "$verMode" != "cluster" ]; then - cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro} + # community-version compile + cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} else - cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro} + # enterprise-version compile + if [[ "$dbName" == "power" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/PowerDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "tq" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "pro" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "kh" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "jh" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + + cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} fi else echo "input cpuType=${cpuType} error!!!" @@ -216,9 +496,9 @@ fi if [[ "$allocator" == "jemalloc" ]]; then # jemalloc need compile first, so disable parallel build - make V=1 && ${csudo} make install + make -j 8 && ${csudo} make install else - make -j8 && ${csudo} make install + make -j 8 && ${csudo} make install fi cd ${curr_dir} @@ -237,10 +517,20 @@ if [ "$osType" != "Darwin" ]; then ${csudo} mkdir -p ${output_dir} cd ${script_dir}/deb ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - else + + if [[ "$pagMode" == "full" ]]; then + if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then + cd ${top_dir}/src/kit/taos-tools/packaging/deb + [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" + + taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}') + ${csudo} ./make-taos-tools-deb.sh ${top_dir} \ + ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} + fi + fi + else echo "==========dpkg command not exist, so not release deb package!!!" fi - ret='0' command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } if [ "$ret" -eq 0 ]; then @@ -252,7 +542,18 @@ if [ "$osType" != "Darwin" ]; then ${csudo} mkdir -p ${output_dir} cd ${script_dir}/rpm ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - else + + if [[ "$pagMode" == "full" ]]; then + if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then + cd ${top_dir}/src/kit/taos-tools/packaging/rpm + [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" + + taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}'|sed -e 's/-/_/g') + ${csudo} ./make-taos-tools-rpm.sh ${top_dir} \ + ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} + fi + fi + else echo "==========rpmbuild command not exist, so not release rpm package!!!" fi fi @@ -272,12 +573,21 @@ if [ "$osType" != "Darwin" ]; then ${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + elif [[ "$dbName" == "kh" ]]; then + ${csudo} ./makepkg_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} + ${csudo} ./makeclient_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} + ${csudo} ./makearbi_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + elif [[ "$dbName" == "jh" ]]; then + ${csudo} ./makepkg_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} + ${csudo} ./makeclient_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} + ${csudo} ./makearbi_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} else ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} fi else + # only make client for Darwin cd ${script_dir}/tools ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName} fi diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 42ceeb791b6154f7d22a477bf3b3c3b8c726869c..b4cf3d1450619f6a43a5303afa45f71c4402c2e7 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -56,10 +56,6 @@ cd ${pkg_dir} ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS -if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then - sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file} -fi - ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} # copy rpm package to output_dir, and modify package name, then clean temp dir diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd index 46dd712e3139dad69d3db6db8b289d0f2424811a..f8a5a2357ea1e8f399d0692f1b0e0d6398e8f855 100644 --- a/packaging/rpm/taosd +++ b/packaging/rpm/taosd @@ -1,10 +1,10 @@ #!/bin/bash # -# taosd This shell script takes care of starting and stopping TDEngine. +# taosd This shell script takes care of starting and stopping TDengine. # # chkconfig: 2345 99 01 -# description: TDEngine is a districuted, scalable, high-performance Time Series Database -# (TSDB). More than just a pure database, TDEngine also provides the ability +# description: TDengine is a districuted, scalable, high-performance Time Series Database +# (TSDB). More than just a pure database, TDengine also provides the ability # to do stream computing, aggregation etc. # # @@ -13,8 +13,8 @@ # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $local_fs $remote_fs # Short-Description: start and stop taosd -# Description: TDEngine is a districuted, scalable, high-performance Time Series Database -# (TSDB). More than just a pure database, TDEngine also provides the ability +# Description: TDengine is a districuted, scalable, high-performance Time Series Database +# (TSDB). More than just a pure database, TDengine also provides the ability # to do stream computing, aggregation etc. ### END INIT INFO diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index f7b8462dbedc74a270a8560bb51a853e292cff27..e1a8a9f9f6aee3ebe7fbcdacd579235cde7165e8 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -71,10 +71,9 @@ cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin if [ -f %{_compiledir}/build/bin/taosadapter ]; then cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||: fi -cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin -cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include +cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector @@ -82,15 +81,6 @@ cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/conn cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples -if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then - cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver - ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23 - ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so -fi -if [ -f %{_compiledir}/build/lib/libavro.a ]; then - cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver -fi - if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then mkdir -p %{buildroot}%{userlocalpath}/bin mkdir -p %{buildroot}%{userlocalpath}/lib @@ -205,10 +195,9 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosadapter || : - ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index dcd4a83da8929d76aa61d848985b5c4ffe46b9c5..6d007c0dd1527c281b09bf1f3623eab873b235d6 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -185,23 +185,25 @@ function install_bin() { # Remove links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosadapter || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : - ${csudo} rm -f ${bin_link_dir}/tarbitrator || : - ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : + [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then @@ -236,6 +238,29 @@ function install_lib() { ${csudo} ldconfig } +function install_avro() { + if [ "$osType" != "Darwin" ]; then + avro_dir=${script_dir}/avro + if [ -f "${avro_dir}/lib/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/$1 + ${csudo} /usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.so.23.0.0 /usr/local/$1 + ${csudo} ln -sf /usr/local/$1/libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ${csudo} ln -sf /usr/local/$1/libavro.so.23 /usr/local/$1/libavro.so + + ${csudo} /usr/bin/install -c -d /usr/local/$1 + [ -f ${avro_dir}/lib/libavro.a ] && + ${csudo} /usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi + fi +} + function install_jemalloc() { jemalloc_dir=${script_dir}/jemalloc @@ -281,7 +306,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -290,9 +315,10 @@ function install_jemalloc() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -676,78 +702,93 @@ function clean_service_on_systemd() { function install_service_on_systemd() { clean_service_on_systemd - taosd_service_config="${service_config_dir}/taosd.service" - ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" - #${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + [ -f ${script_dir}/cfg/taosd.service ] &&\ + ${csudo} cp ${script_dir}/cfg/taosd.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #taosd_service_config="${service_config_dir}/taosd.service" + #${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" + #${csudo} bash -c "echo >> ${taosd_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + ##${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" + #${csudo} bash -c "echo >> ${taosd_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + [ -f ${script_dir}/cfg/tarbitratord.service ] &&\ + ${csudo} cp ${script_dir}/cfg/tarbitratord.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + #${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" #${csudo} systemctl enable tarbitratord if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo} bash -c "echo >> ${nginx_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo} bash -c "echo >> ${nginx_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + [ -f ${script_dir}/cfg/nginxd.service ] &&\ + ${csudo} cp ${script_dir}/cfg/nginxd.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #nginx_service_config="${service_config_dir}/nginxd.service" + #${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + #${csudo} bash -c "echo >> ${nginx_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + #${csudo} bash -c "echo >> ${nginx_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" if ! ${csudo} systemctl enable nginxd &> /dev/null; then ${csudo} systemctl daemon-reexec ${csudo} systemctl enable nginxd @@ -807,7 +848,7 @@ vercomp () { function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + curr_version=`ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}'` if [ -f ${script_dir}/driver/vercomp.txt ]; then min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` @@ -848,6 +889,8 @@ function update_TDengine() { fi tar -zxf taos.tar.gz install_jemalloc + #install_avro lib + #install_avro lib64 echo -e "${GREEN}Start to update TDengine...${NC}" # Stop the service if running @@ -960,6 +1003,9 @@ function install_TDengine() { install_header install_lib install_jemalloc + #install_avro lib + #install_avro lib64 + if [ "$pagMode" != "lite" ]; then install_connector fi diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh index 3a5e64153836096268dee2be08919cd774b68ebe..33896a08b3f653dbddc1ad480ddab3bf73a513ef 100755 --- a/packaging/tools/install_arbi.sh +++ b/packaging/tools/install_arbi.sh @@ -116,9 +116,10 @@ function install_bin() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -167,7 +168,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" diff --git a/packaging/tools/install_arbi_jh.sh b/packaging/tools/install_arbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..2403f8fbd79abf4324577fe3dca3a8e0eac8ed01 --- /dev/null +++ b/packaging/tools/install_arbi_jh.sh @@ -0,0 +1,286 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) + +bin_link_dir="/usr/bin" +#inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/tarbitrator" + +# old bin dir +bin_dir="/usr/local/tarbitrator/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact jhict.com for support." + os_type=1 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/bin + #${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_jh.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install server service + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function install_service_on_systemd() { + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo} systemctl enable tarbitratord +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + kill_tarbitrator + fi +} + +function update() { + # Start to update + echo -e "${GREEN}Start to update jh_iot's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop tarbitratord || : + elif ((${service_mod}==1)); then + ${csudo} service tarbitratord stop || : + else + kill_tarbitrator + fi + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mjh_iot's arbitrator is updated successfully!${NC}" +} + +function install() { + # Start to install + echo -e "${GREEN}Start to install jh_iot's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi + + echo -e "\033[44;32;1mjh_iot's arbitrator is installed successfully!${NC}" + echo +} + + +## ==============================Main program starts from here============================ +# Install server and client +if [ -x ${bin_dir}/tarbitrator ]; then + update_flag=1 + update +else + install +fi + diff --git a/packaging/tools/install_arbi_kh.sh b/packaging/tools/install_arbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..9a2542936d935b70b762702f0f2f6ff92b51a4f3 --- /dev/null +++ b/packaging/tools/install_arbi_kh.sh @@ -0,0 +1,286 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) + +bin_link_dir="/usr/bin" +#inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/tarbitrator" + +# old bin dir +bin_dir="/usr/local/tarbitrator/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact wellintech.com for support." + os_type=1 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/bin + #${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_kh.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install khserver service + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function install_service_on_systemd() { + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo} systemctl enable tarbitratord +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + kill_tarbitrator + fi +} + +function update() { + # Start to update + echo -e "${GREEN}Start to update KingHistorian's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop tarbitratord || : + elif ((${service_mod}==1)); then + ${csudo} service tarbitratord stop || : + else + kill_tarbitrator + fi + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mKingHistorian's arbitrator is updated successfully!${NC}" +} + +function install() { + # Start to install + echo -e "${GREEN}Start to install KingHistorian's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi + + echo -e "\033[44;32;1mKingHistorian's arbitrator is installed successfully!${NC}" + echo +} + + +## ==============================Main program starts from here============================ +# Install server and client +if [ -x ${bin_dir}/tarbitrator ]; then + update_flag=1 + update +else + install +fi + diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh index 883db2b7169d125309125887cb72279c92c4602a..755684d1bdb1ea02781518e9a78ccf1d881fb271 100755 --- a/packaging/tools/install_arbi_power.sh +++ b/packaging/tools/install_arbi_power.sh @@ -160,7 +160,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -169,16 +169,14 @@ function install_jemalloc() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -279,7 +277,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -306,7 +303,6 @@ function update_PowerDB() { install_jemalloc echo - #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -329,7 +325,6 @@ function install_PowerDB() { install_jemalloc echo - #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh index 11165dbdd8bdf6afb4659250499cf1d9184c2395..3e80ad8215e3ec709c17a6a46e34f5bd6cf7ac6c 100755 --- a/packaging/tools/install_arbi_pro.sh +++ b/packaging/tools/install_arbi_pro.sh @@ -116,16 +116,14 @@ function install_bin() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -221,7 +219,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -247,7 +244,6 @@ function update_prodb() { install_service echo - #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -268,7 +264,6 @@ function install_prodb() { install_bin install_service echo - #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh index bd852dd0ad2c9114f2424193adccf56b0cb40412..8757326605b05aca63a585d1c3e1c66c98f6aaa7 100755 --- a/packaging/tools/install_arbi_tq.sh +++ b/packaging/tools/install_arbi_tq.sh @@ -116,16 +116,14 @@ function install_bin() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -226,7 +224,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -252,7 +249,6 @@ function update_tq() { install_service echo - #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -273,7 +269,6 @@ function install_tq() { install_bin install_service echo - #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 3df7013b197baaf4d78bb0f0ae5d507d6be92715..a73f6ac9718064855a245c0505e179b6376d7c96 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -86,7 +86,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -97,7 +96,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : @@ -128,7 +126,7 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi - + if [ "$osType" != "Darwin" ]; then ${csudo} ldconfig else @@ -137,9 +135,10 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -188,7 +187,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" diff --git a/packaging/tools/install_client_jh.sh b/packaging/tools/install_client_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..a8599812feaea06120aa3391771e7c94523a53c2 --- /dev/null +++ b/packaging/tools/install_client_jh.sh @@ -0,0 +1,239 @@ +#!/bin/bash +# +# This file is used to install jh_taos client on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/jh_taos" + log_dir="/var/log/jh_taos" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/jh_taos" + log_dir="~/jh_taos/log" +fi + +log_link_dir="/usr/local/jh_taos/log" + +cfg_install_dir="/etc/jh_taos" + +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi + +#install main path +install_main_dir="/usr/local/jh_taos" + +# old bin dir +bin_dir="/usr/local/jh_taos/bin" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "jh_taos" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : + [ -x ${install_main_dir}/bin/jh_taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosdump ${bin_link_dir}/jh_taosdump || : + fi + [ -x ${install_main_dir}/bin/remove_client_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_jh.sh ${bin_link_dir}/rmjh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi + + ${csudo} ldconfig +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update() { + # Start to update + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to update jh_iot client...${NC}" + # Stop the client shell if running + if pidof jh_taos &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" + + rm -rf $(tar -tf jh_taos.tar.gz) +} + +function install() { + # Start to install + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to install jh_taos client...${NC}" + + install_main_path + install_log + install_header + install_lib + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" + + rm -rf $(tar -tf jh_taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/jh_taosd ]; then + echo -e "\033[44;32;1mThere are already installed jh_iot server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/jh_taos ]; then + update_flag=1 + update + else + install + fi diff --git a/packaging/tools/install_client_kh.sh b/packaging/tools/install_client_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..3e7df18486a20b9ea75dba3d2644d46bee6b423b --- /dev/null +++ b/packaging/tools/install_client_kh.sh @@ -0,0 +1,240 @@ +#!/bin/bash +# +# This file is used to install kinghistorian client on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/kinghistorian" + log_dir="/var/log/kinghistorian" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/kinghistorian" + log_dir="~/kinghistorian/log" +fi + +log_link_dir="/usr/local/kinghistorian/log" + +cfg_install_dir="/etc/kinghistorian" + +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi + +#install main path +install_main_dir="/usr/local/kinghistorian" + +# old bin dir +bin_dir="/usr/local/kinghistorian/bin" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "khclient" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/khclient || : + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : + [ -x ${install_main_dir}/bin/khdump ] && ${csudo} ln -s ${install_main_dir}/bin/khdump ${bin_link_dir}/khdump || : + fi + [ -x ${install_main_dir}/bin/remove_client_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_kh.sh ${bin_link_dir}/rmkh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi + + ${csudo} ldconfig +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org + ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update() { + # Start to update + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to update KingHistorian client...${NC}" + # Stop the client shell if running + if pidof khclient &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + +function install() { + # Start to install + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to install KingHistorian client...${NC}" + + install_main_path + install_log + install_header + install_lib + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/khserver ]; then + echo -e "\033[44;32;1mThere are already installed KingHistorian server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/khclient ]; then + update_flag=1 + update + else + install + fi diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh index 31da0d61319045800fe3a454d071118aa3a4768e..3c9abddb09646ea54f44e28664afea49822055f6 100755 --- a/packaging/tools/install_client_power.sh +++ b/packaging/tools/install_client_power.sh @@ -133,9 +133,10 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -184,7 +185,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -193,16 +194,14 @@ function install_jemalloc() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/power.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org + ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg } @@ -248,9 +247,6 @@ function update_PowerDB() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config @@ -276,9 +272,6 @@ function install_PowerDB() { install_header install_lib install_jemalloc - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh index fff8ae31200669ee3ab918a873e33fc32ece37c8..e34dc6d4ac1d1ef1715ff903b38b1a8735001985 100755 --- a/packaging/tools/install_client_pro.sh +++ b/packaging/tools/install_client_pro.sh @@ -109,7 +109,6 @@ function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* @@ -130,23 +129,22 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org + ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg } @@ -191,9 +189,6 @@ function update_prodb() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config @@ -218,9 +213,6 @@ function install_prodb() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config @@ -235,14 +227,14 @@ function install_prodb() { ## ==============================Main program starts from here============================ # Install or updata client and client # if server is already install, don't install client - if [ -e ${bin_dir}/prodbs ]; then - echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" - exit 0 - fi +if [ -e ${bin_dir}/prodbs ]; then + echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" + exit 0 +fi - if [ -x ${bin_dir}/prodbc ]; then - update_flag=1 - update_prodb - else - install_prodb - fi +if [ -x ${bin_dir}/prodbc ]; then + update_flag=1 + update_prodb +else + install_prodb +fi diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh index 2537442ee264e9aeb4eb6b3d25a17faf60f4df9a..b7f10324dcd1c62fee20fe11399fbb61d8a38577 100755 --- a/packaging/tools/install_client_tq.sh +++ b/packaging/tools/install_client_tq.sh @@ -133,23 +133,22 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/tq.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org + ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg } @@ -194,9 +193,6 @@ function update_tq() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config @@ -221,9 +217,6 @@ function install_tq() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi install_examples install_bin install_config diff --git a/packaging/tools/install_jh.sh b/packaging/tools/install_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..ecc166fba10ef4c3f8e0bacc04ff27ebf717e409 --- /dev/null +++ b/packaging/tools/install_jh.sh @@ -0,0 +1,950 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) +# Dynamic directory +data_dir="/var/lib/jh_taos" +log_dir="/var/log/jh_taos" + +data_link_dir="/usr/local/jh_taos/data" +log_link_dir="/usr/local/jh_taos/log" + +cfg_install_dir="/etc/jh_taos" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/jh_taos" + +# old bin dir +bin_dir="/usr/local/jh_taos/bin" + +service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact jhict.com for support." + os_type=1 +fi + + +# ============================= get input parameters ================================================= + +# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] + +# set parameters by default value +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] + +while getopts "hv:e:i:" arg +do + case $arg in + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$( echo $OPTARG ) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin +# ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver +# ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jh_taosd || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : + [ -x ${install_main_dir}/bin/jh_taosd ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosd ${bin_link_dir}/jh_taosd || : + [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : + [ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi +} + +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" + + # first full-qualified domain name (FQDN) for jh_iot cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.jhict.com:6030) of an existing jh_iot cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + ${csudo} mkdir -p ${data_dir} + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof jh_taosd &> /dev/null; then + ${csudo} service jh_taosd stop || : + fi + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} chkconfig --del jh_taosd || : + fi + + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} insserv -r jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} update-rc.d -f jh_taosd remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/jh_taosd || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install jh_taosd service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/jh_taosd.deb ${install_main_dir}/init.d/jh_taosd + ${csudo} cp ${script_dir}/init.d/jh_taosd.deb ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/jh_taosd.rpm ${install_main_dir}/init.d/jh_taosd + ${csudo} cp ${script_dir}/init.d/jh_taosd.rpm ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add jh_taosd || : + ${csudo} chkconfig --level 2345 jh_taosd on || : + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv jh_taosd || : + ${csudo} insserv -d jh_taosd || : + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d jh_taosd defaults || : + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + jh_taosd_service_config="${service_config_dir}/jh_taosd.service" + if systemctl is-active --quiet jh_taosd; then + echo "jh_iot is running, stopping it..." + ${csudo} systemctl stop jh_taosd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable jh_taosd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${jh_taosd_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for jh_iot is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${nginx_service_config} + fi +} + +function install_service_on_systemd() { + clean_service_on_systemd + + service_config="${service_config_dir}/jh_taosd.service" + ${csudo} bash -c "echo '[Unit]' >> ${service_config}" + ${csudo} bash -c "echo 'Description=jh_iot server service' >> ${service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Service]' >> ${service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/jh_taosd' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStartPre=/usr/local/jh_taos/bin/startPre.sh' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Install]' >> ${service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" + ${csudo} systemctl enable jh_taosd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For jh_iot Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop jh_taosd + kill_process jh_taosd + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` + else + min_compatible_version=$(${script_dir}/bin/jh_taosd -V | head -1 | cut -d ' ' -f 5) + fi + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update() { + # Start to update + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + install_jemalloc + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update jh_iot...${NC}" + # Stop the service if running + if pidof jh_taosd &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop jh_taosd || : + elif ((${service_mod}==1)); then + ${csudo} service jh_taosd stop || : + else + kill_process jh_taosd + fi + sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for jh_iot is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" + fi + fi + fi + + #echo + #echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}" + else + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ./jh_taosd${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell${NC}" + fi + + echo + echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf jh_taos.tar.gz) +} + +function install() { + # Start to install + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to install jh_iot...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for jh_iot is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}" + else + echo -e "${GREEN_DARK}To start jh_iot ${NC}: jh_taosd${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $serverFqdn${GREEN_DARK} to login into jh_iot server${NC}" + echo + fi + echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf jh_taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +serverFqdn=$(hostname) +if [ "$verType" == "server" ]; then + # Install server and client + if [ -x ${bin_dir}/jh_taosd ]; then + update_flag=1 + update + else + install + fi +elif [ "$verType" == "client" ]; then + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/jh_taos ]; then + update_flag=1 + update client + else + install client + fi +else + echo "please input correct verType" +fi diff --git a/packaging/tools/install_kh.sh b/packaging/tools/install_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..e53e014351ff9ae64e727151c72f3157a2077043 --- /dev/null +++ b/packaging/tools/install_kh.sh @@ -0,0 +1,950 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) +# Dynamic directory +data_dir="/var/lib/kinghistorian" +log_dir="/var/log/kinghistorian" + +data_link_dir="/usr/local/kinghistorian/data" +log_link_dir="/usr/local/kinghistorian/log" + +cfg_install_dir="/etc/kinghistorian" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/kinghistorian" + +# old bin dir +bin_dir="/usr/local/kinghistorian/bin" + +service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact wellintech.com for support." + os_type=1 +fi + + +# ============================= get input parameters ================================================= + +# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] + +# set parameters by default value +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] + +while getopts "hv:e:i:" arg +do + case $arg in + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$( echo $OPTARG ) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin +# ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver +# ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khserver || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : + [ -x ${install_main_dir}/bin/khserver ] && ${csudo} ln -s ${install_main_dir}/bin/khserver ${bin_link_dir}/khserver || : + [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : + [ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi +} + +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org + ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" + + # first full-qualified domain name (FQDN) for KingHistorian cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.wellintech.com:6030) of an existing KingHistorian cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/kinghistorian.cfg + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + ${csudo} mkdir -p ${data_dir} + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof khserver &> /dev/null; then + ${csudo} service khserver stop || : + fi + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} chkconfig --del khserver || : + fi + + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} insserv -r khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} update-rc.d -f khserver remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/khserver || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install khserver service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/khserver.deb ${install_main_dir}/init.d/khserver + ${csudo} cp ${script_dir}/init.d/khserver.deb ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/khserver.rpm ${install_main_dir}/init.d/khserver + ${csudo} cp ${script_dir}/init.d/khserver.rpm ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add khserver || : + ${csudo} chkconfig --level 2345 khserver on || : + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv khserver || : + ${csudo} insserv -d khserver || : + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d khserver defaults || : + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + khserver_service_config="${service_config_dir}/khserver.service" + if systemctl is-active --quiet khserver; then + echo "KingHistorian is running, stopping it..." + ${csudo} systemctl stop khserver &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable khserver &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${khserver_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for KingHistorian is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${nginx_service_config} + fi +} + +function install_service_on_systemd() { + clean_service_on_systemd + + service_config="${service_config_dir}/khserver.service" + ${csudo} bash -c "echo '[Unit]' >> ${service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian server service' >> ${service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Service]' >> ${service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/khserver' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Install]' >> ${service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" + ${csudo} systemctl enable khserver + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For KingHistorian Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop khserver + kill_process khserver + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` + else + min_compatible_version=$(${script_dir}/bin/khserver -V | head -1 | cut -d ' ' -f 5) + fi + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update() { + # Start to update + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + install_jemalloc + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update KingHistorian...${NC}" + # Stop the service if running + if pidof khserver &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop khserver || : + elif ((${service_mod}==1)); then + ${csudo} service khserver stop || : + else + kill_process khserver + fi + sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for KingHistorian is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" + fi + fi + fi + + #echo + #echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}" + else + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ./khserver${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell${NC}" + fi + + echo + echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + +function install() { + # Start to install + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to install KingHistorian...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for KingHistorian is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}" + else + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: khserver${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $serverFqdn${GREEN_DARK} to login into KingHistorian server${NC}" + echo + fi + echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + + +## ==============================Main program starts from here============================ +serverFqdn=$(hostname) +if [ "$verType" == "server" ]; then + # Install server and client + if [ -x ${bin_dir}/khserver ]; then + update_flag=1 + update + else + install + fi +elif [ "$verType" == "client" ]; then + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/khclient ]; then + update_flag=1 + update client + else + install client + fi +else + echo "please input correct verType" +fi diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 0e0ee7ba31f4715b2c5585dd040727d604aa90b1..cde33f9b368041d7072bc851d54b68e30a067a95 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -177,6 +177,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -186,6 +187,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then @@ -210,13 +212,6 @@ function install_lib() { ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - #if [ "$verMode" == "cluster" ]; then - # # Compatible with version 1.5 - # ${csudo} mkdir -p ${v15_java_app_dir} - # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar - # ${csudo} chmod 777 ${v15_java_app_dir} || : - #fi - ${csudo} ldconfig } @@ -265,7 +260,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -274,9 +269,10 @@ function install_jemalloc() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -328,7 +324,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/power.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -363,7 +359,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg serverFqdn=$localFqdn echo return @@ -385,7 +381,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg serverFqdn=$localFqdn break fi @@ -432,16 +428,14 @@ function local_fqdn_check() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/power.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org + ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -471,7 +465,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/power.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -607,7 +601,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for PowerDB is running, stopping it..." ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null @@ -646,7 +640,7 @@ function install_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo >> ${tarbitratord_service_config}" @@ -828,7 +822,7 @@ function update_PowerDB() { #echo #echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg" + echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}" elif ((${service_mod}==1)); then @@ -905,7 +899,7 @@ function install_PowerDB() { #echo #echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg" + echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh index e5675b858066148df07508ad2438b0f00d7ce7bf..44a21abc19f19a1bffc2be42e71db30157d4e220 100755 --- a/packaging/tools/install_pro.sh +++ b/packaging/tools/install_pro.sh @@ -172,6 +172,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/rmprodb || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -181,6 +182,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : [ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then @@ -212,9 +214,10 @@ function install_lib() { } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -263,7 +266,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -316,7 +319,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/prodb.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -351,7 +354,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg serverFqdn=$localFqdn echo return @@ -373,7 +376,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg serverFqdn=$localFqdn break fi @@ -420,14 +423,14 @@ function local_fqdn_check() { } function install_config() { - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org + ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -457,7 +460,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/prodb.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -805,7 +808,7 @@ function update_prodb() { #echo #echo -e "\033[44;32;1mProDB is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" elif ((${service_mod}==1)); then @@ -882,7 +885,7 @@ function install_prodb() { #echo #echo -e "\033[44;32;1mProDB is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh index ef5fb8c05a4a98a55918ee217125bd0f0a09b955..481cbb19792b654710aab13d99c50ab4b5475be1 100755 --- a/packaging/tools/install_tq.sh +++ b/packaging/tools/install_tq.sh @@ -177,6 +177,7 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/rmtq || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -186,6 +187,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || : [ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then @@ -210,20 +212,14 @@ function install_lib() { ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - #if [ "$verMode" == "cluster" ]; then - # # Compatible with version 1.5 - # ${csudo} mkdir -p ${v15_java_app_dir} - # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar - # ${csudo} chmod 777 ${v15_java_app_dir} || : - #fi - ${csudo} ldconfig } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -272,7 +268,7 @@ function install_jemalloc() { fi if [ -d /etc/ld.so.conf.d ]; then - ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -328,7 +324,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/tq.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -363,7 +359,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg serverFqdn=$localFqdn echo return @@ -385,7 +381,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg serverFqdn=$localFqdn break fi @@ -432,16 +428,14 @@ function local_fqdn_check() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/tq.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org + ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -471,7 +465,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/tq.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -607,7 +601,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for TQ is running, stopping it..." ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null @@ -646,7 +640,7 @@ function install_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo >> ${tarbitratord_service_config}" @@ -828,7 +822,7 @@ function update_tq() { #echo #echo -e "\033[44;32;1mTQ is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg" + echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}" elif ((${service_mod}==1)); then @@ -905,7 +899,7 @@ function install_tq() { #echo #echo -e "\033[44;32;1mTQ is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg" + echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 093b2bb0a7ea8033b7509e231200b8b4ad6901be..38b45871a6a96887fee19cd460f056410f52fbc5 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -4,7 +4,7 @@ # is required to use systemd to manage services at boot set -e -# set -x +#set -x # -----------------------Variables definition source_dir=$1 @@ -165,6 +165,7 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/perfMonitor || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin @@ -172,6 +173,7 @@ function install_bin() { ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin ${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin + ${csudo} cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin ${csudo} cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin ${csudo} chmod 0555 ${install_main_dir}/bin/* @@ -183,6 +185,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : else @@ -232,7 +235,7 @@ function install_jemalloc() { /usr/local/lib/pkgconfig fi if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -253,7 +256,7 @@ function install_jemalloc() { function install_avro() { if [ "$osType" != "Darwin" ]; then - if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then + if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then ${csudo} /usr/bin/install -c -d /usr/local/$1 ${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1 ${csudo} ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23 @@ -263,7 +266,7 @@ function install_avro() { ${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1 if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf + echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" ${csudo} ldconfig else echo "/etc/ld.so.conf.d not found!" @@ -329,15 +332,16 @@ function install_lib() { function install_header() { if [ "$osType" != "Darwin" ]; then - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \ + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h else - ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \ + ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ ${install_main_dir}/include \ - || ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \ + || ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ ${install_main_2_dir}/include \ && ${csudo} chmod 644 ${install_main_dir}/include/* \ || ${csudo} chmod 644 ${install_main_2_dir}/include/* @@ -345,9 +349,7 @@ function install_header() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh index 6dcabc2a0622e5fec67431c8663541a2b40048e1..d654910480e52b99e040df09e1fb9ecedbe5cad5 100755 --- a/packaging/tools/makearbi.sh +++ b/packaging/tools/makearbi.sh @@ -34,7 +34,7 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh" install_files="${script_dir}/install_arbi.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord diff --git a/packaging/tools/makearbi_jh.sh b/packaging/tools/makearbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..5457b163599421d0a5917156efde1c8814a6f514 --- /dev/null +++ b/packaging/tools/makearbi_jh.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# +# Generate arbitrator's tar.gz setup package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-arbitrator-${version}" +else + install_dir="${release_dir}/jh_iot-arbitrator-${version}" +fi + +# Directories and files. +bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_jh.sh" +install_files="${script_dir}/install_arbi_jh.sh" + +init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord +init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord + +# make directories. +mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_jh.sh || : +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makearbi_kh.sh b/packaging/tools/makearbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..c7fa40eb4f1fc4003e6a584bdc5c4534616754d6 --- /dev/null +++ b/packaging/tools/makearbi_kh.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# +# Generate arbitrator's tar.gz setup package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-arbitrator-${version}" +else + install_dir="${release_dir}/KingHistorian-arbitrator-${version}" +fi + +# Directories and files. +bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_kh.sh" +install_files="${script_dir}/install_arbi_kh.sh" + +init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord +init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord + +# make directories. +mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_kh.sh || : +#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh index fd50ecd43878de08e7bb94249da8cb64c3630e6e..a942a7860dd4fd0a6590fceadc00abfc19815414 100755 --- a/packaging/tools/makearbi_power.sh +++ b/packaging/tools/makearbi_power.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh" install_files="${script_dir}/install_arbi_power.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord @@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -58,8 +57,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh index 6ce3765e44acc408ced9730c54b793338eb37b38..c432e97d4762da7a5a68672c46e118f76c59ae20 100755 --- a/packaging/tools/makearbi_pro.sh +++ b/packaging/tools/makearbi_pro.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh" install_files="${script_dir}/install_arbi_pro.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord @@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -58,8 +57,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh index c10dfec255d411965a3887942e5d2aded4635979..3460696b08c11815a68edc12a61d53f2651d699a 100755 --- a/packaging/tools/makearbi_tq.sh +++ b/packaging/tools/makearbi_tq.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh" install_files="${script_dir}/install_arbi_tq.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord @@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -58,8 +57,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 39a35e384fffdd4f319e72fbeb819fe08f7871b8..4c3278e41b0c51e86d84c3e200092b4554e6523c 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,8 +45,10 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \ - ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" + bin_files="${script_dir}/remove_client.sh \ + ${script_dir}/set_core.sh \ + ${script_dir}/get_client.sh" + #${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else @@ -54,7 +56,7 @@ else lib_files="${build_dir}/lib/libtaos.${version}.dylib" fi -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else diff --git a/packaging/tools/makeclient_jh.sh b/packaging/tools/makeclient_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..bfbdcfc578bc7f8dfb15fef302d9817014ff3bef --- /dev/null +++ b/packaging/tools/makeclient_jh.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client in all os system +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-client-${version}" +else + install_dir="${release_dir}/jh_iot-client-${version}" +fi + +# Directories and files. + +if [ "$osType" != "Darwin" ]; then + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_jh.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + +install_files="${script_dir}/install_client_jh.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg + +sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg + +mkdir -p ${install_dir}/bin +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos + cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin + else + cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos + cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + fi +else + cp ${bin_files} ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + +cd ${install_dir} + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f jh_taos.tar.gz * --remove-files || : +else + tar -zcv -f jh_taos.tar.gz * || : + mv jh_taos.tar.gz .. + rm -rf ./* + mv ../jh_taos.tar.gz . +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh + mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh + mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh +fi +chmod a+x ${install_dir}/install_client_jh.sh + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi + +cd ${curr_dir} diff --git a/packaging/tools/makeclient_kh.sh b/packaging/tools/makeclient_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..fe171664f62b07152c876846e8a64cc41b8c2eed --- /dev/null +++ b/packaging/tools/makeclient_kh.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client in all os system +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-client-${version}" +else + install_dir="${release_dir}/KingHistorian-client-${version}" +fi + +# Directories and files. + +if [ "$osType" != "Darwin" ]; then + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_kh.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + +install_files="${script_dir}/install_client_kh.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg + +sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg + +mkdir -p ${install_dir}/bin +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/khclient + cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin + else + cp ${build_dir}/bin/taos ${install_dir}/bin/khclient + cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + fi +else + cp ${bin_files} ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + +cd ${install_dir} + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f kinghistorian.tar.gz * --remove-files || : +else + tar -zcv -f kinghistorian.tar.gz * || : + mv kinghistorian.tar.gz .. + rm -rf ./* + mv ../kinghistorian.tar.gz . +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh + mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh + mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh +fi +chmod a+x ${install_dir}/install_client_kh.sh + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi + +cd ${curr_dir} diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 19e24b3dafb7f1f95832e637e181449e4c381faf..0931d1350197bf31cfaa9f8a87cd1fe50d28ced3 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -53,7 +53,7 @@ else lib_files="${build_dir}/lib/libtaos.${version}.dylib" fi -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -109,15 +109,15 @@ if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${script_dir}/remove_power.sh ${install_dir}/bin + cp ${script_dir}/remove_client_power.sh ${install_dir}/bin else cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${script_dir}/remove_power.sh ${install_dir}/bin + cp ${script_dir}/remove_client_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin fi else cp ${bin_files} ${install_dir}/bin diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh index 4a0b033d30e6478f37a62f9cc896aee0903d39c9..023c16cf820481fcc16bb26f31e6acf58d8edbc1 100755 --- a/packaging/tools/makeclient_pro.sh +++ b/packaging/tools/makeclient_pro.sh @@ -46,7 +46,7 @@ else lib_files="${build_dir}/lib/libtaos.${version}.dylib" fi -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -58,26 +58,26 @@ install_files="${script_dir}/install_client_pro.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg mkdir -p ${install_dir}/bin if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc - cp ${script_dir}/remove_pro.sh ${install_dir}/bin + cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin else cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc - cp ${script_dir}/remove_pro.sh ${install_dir}/bin + cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin fi else cp ${bin_files} ${install_dir}/bin diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh index 1cc7003661a7491b1df625916dd289de32434ee9..d554a05f6af40b79362d397071026591cf6714d4 100755 --- a/packaging/tools/makeclient_tq.sh +++ b/packaging/tools/makeclient_tq.sh @@ -40,20 +40,13 @@ fi # Directories and files. if [ "$osType" != "Darwin" ]; then -# if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/tqd -# strip ${build_dir}/bin/tq -# bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh" -# else -# bin_files="${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${script_dir}/remove_client_tq.sh ${script_dir}/set_core.sh" -# fi lib_files="${build_dir}/lib/libtaos.so.${version}" else bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh" lib_files="${build_dir}/lib/libtaos.${version}.dylib" fi -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -65,26 +58,26 @@ install_files="${script_dir}/install_client_tq.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg -sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg +sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg +sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg mkdir -p ${install_dir}/bin if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${script_dir}/remove_tq.sh ${install_dir}/bin + cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin else cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${script_dir}/remove_tq.sh ${install_dir}/bin + cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin fi else cp ${bin_files} ${install_dir}/bin diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 7ad703be86016bd0c0ce55c80b76bf34914c54bb..89bbbf9370e545d10aa8c8f9a4b16e0319693e30 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -31,27 +31,42 @@ else install_dir="${release_dir}/TDengine-server-${version}" fi +if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then + cd ${top_dir}/src/kit/taos-tools/packaging/deb + [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" + + taostools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}') + taostools_install_dir="${release_dir}/taos-tools-${taostools_ver}" + + cd ${curr_dir} +else + taostools_install_dir="${release_dir}/taos-tools-${version}" +fi + # Directories and files if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos # lite version doesn't include taosadapter, which will lead to no restful interface bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" + taostools_bin_files="" else bin_files="${build_dir}/bin/taosd \ ${build_dir}/bin/taos \ ${build_dir}/bin/taosadapter \ - ${build_dir}/bin/taosdump \ - ${build_dir}/bin/taosdemo \ ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ + ${script_dir}/run_taosd.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" + + taostools_bin_files=" ${build_dir}/bin/taosdump \ + ${build_dir}/bin/taosBenchmark" fi lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -78,6 +93,7 @@ mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg + if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || : fi @@ -86,12 +102,47 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || : fi +if [ -f "${cfg_dir}/taosd.service" ]; then + cp ${cfg_dir}/taosd.service ${install_dir}/cfg || : +fi +if [ -f "${cfg_dir}/tarbitratord.service" ]; then + cp ${cfg_dir}/tarbitratord.service ${install_dir}/cfg || : +fi +if [ -f "${cfg_dir}/nginxd.service" ]; then + cp ${cfg_dir}/nginxd.service ${install_dir}/cfg || : +fi + mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : +if [ -n "${taostools_bin_files}" ]; then + mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}" + mkdir -p ${taostools_install_dir}/bin \ + && cp ${taostools_bin_files} ${taostools_install_dir}/bin \ + && chmod a+x ${taostools_install_dir}/bin/* || : + [ -f ${taostools_install_dir}/bin/taosBenchmark ] && \ + ln -sf ${taostools_install_dir}/bin/taosBenchmark \ + ${taostools_install_dir}/bin/taosdemo + + if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then + cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \ + ${taostools_install_dir}/ > /dev/null \ + && chmod a+x ${taostools_install_dir}/install-taostools.sh \ + || echo -e "failed to copy install-taostools.sh" + else + echo -e "install-taostools.sh not found" + fi + + if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then + mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro" + cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib + cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig + fi +fi + if [ -f ${build_dir}/bin/jemalloc-config ]; then mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin @@ -219,6 +270,8 @@ cd ${release_dir} # install_dir has been distinguishes cluster from edege, so comments this code pkg_name=${install_dir}-${osType}-${cpuType} +taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType} + # if [ "$verMode" == "cluster" ]; then # pkg_name=${install_dir}-${osType}-${cpuType} # elif [ "$verMode" == "edge" ]; then @@ -230,8 +283,10 @@ pkg_name=${install_dir}-${osType}-${cpuType} if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then pkg_name=${install_dir}-${verType}-${osType}-${cpuType} + taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType} elif [ "$verType" == "stable" ]; then pkg_name=${pkg_name} + taostools_pkg_name=${taostools_pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 @@ -241,11 +296,20 @@ if [ "$pagMode" == "lite" ]; then pkg_name=${pkg_name}-Lite fi -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || : exitcode=$? if [ "$exitcode" != "0" ]; then echo "tar ${pkg_name}.tar.gz error !!!" exit $exitcode fi +if [ -n "${taostools_bin_files}" ]; then + tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || : + exitcode=$? + if [ "$exitcode" != "0" ]; then + echo "tar ${taostools_pkg_name}.tar.gz error !!!" + exit $exitcode + fi +fi + cd ${curr_dir} diff --git a/packaging/tools/makepkg_jh.sh b/packaging/tools/makepkg_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..fdc7ba5c79f66bdc5d9567999c644c2a3b4ad1f1 --- /dev/null +++ b/packaging/tools/makepkg_jh.sh @@ -0,0 +1,161 @@ +#!/bin/bash +# +# Generate tar.gz package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 +versionComp=$9 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +# package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-server-${version}" +else + install_dir="${release_dir}/jh_iot-server-${version}" +fi + +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi +install_files="${script_dir}/install_jh.sh" +nginx_dir="${code_dir}/../../enterprise/src/plugins/web" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/bin + +# bin +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos +else + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump + cp ${build_dir}/bin/tarbitrator ${install_dir}/bin + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/run_taosd.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/startPre.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin +fi +cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos +cp ${build_dir}/bin/taosd ${install_dir}/bin/jh_taosd +cp ${script_dir}/remove_jh.sh ${install_dir}/bin +chmod a+x ${install_dir}/bin/* || : + +# cluster +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_jh.sh >> remove_jh_temp.sh + mv remove_jh_temp.sh ${install_dir}/bin/remove_jh.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + # replace the OEM name + sed -i -e 's/www.taosdata.com/www.jhict.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/TAOS Data/Jinheng Technology/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/taosd/jh_taosd/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` + sed -i -e 's/taosd<\/th>/jh_taosd<\/th>/g' ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['jh_taosd', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'jh_taosd',/g" ${install_dir}/nginxd/admin/monitor.html + sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/*.html + sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/js/*.js + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + +sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg +sed -i "s/support@taosdata.com/jhkj@njsteel.com.cn/g" ${install_dir}/cfg/taos.cfg +sed -i "s/taos client/client/g" ${install_dir}/cfg/taos.cfg +sed -i "s/taosd/server/g" ${install_dir}/cfg/taos.cfg + +cd ${install_dir} +tar -zcv -f jh_taos.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar jh_taos.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_jh.sh >> install_jh_temp.sh + mv install_jh_temp.sh ${install_dir}/install_jh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/jh_taos_history/g" ${install_dir}/install.sh >> install_jh_temp.sh + mv install_jh_temp.sh ${install_dir}/install_jh.sh +fi + +sed -i "/install_connector$/d" ${install_dir}/install_jh.sh +sed -i "/install_examples$/d" ${install_dir}/install_jh.sh +chmod a+x ${install_dir}/install_jh.sh + +# Copy driver +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg_kh.sh b/packaging/tools/makepkg_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..35efd2e0d7d4b5dad8bc2b360910eca69118bc6c --- /dev/null +++ b/packaging/tools/makepkg_kh.sh @@ -0,0 +1,161 @@ +#!/bin/bash +# +# Generate tar.gz package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 +versionComp=$9 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +# package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-server-${version}" +else + install_dir="${release_dir}/KingHistorian-server-${version}" +fi + +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi +install_files="${script_dir}/install_kh.sh" +nginx_dir="${code_dir}/../../enterprise/src/plugins/web" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg +mkdir -p ${install_dir}/bin + +# bin +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos +else + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump + cp ${build_dir}/bin/tarbitrator ${install_dir}/bin + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/run_taosd.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/startPre.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin +fi +cp ${build_dir}/bin/taos ${install_dir}/bin/khclient +cp ${build_dir}/bin/taosd ${install_dir}/bin/khserver +cp ${script_dir}/remove_kh.sh ${install_dir}/bin +chmod a+x ${install_dir}/bin/* || : + +# cluster +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_kh.sh >> remove_kh_temp.sh + mv remove_kh_temp.sh ${install_dir}/bin/remove_kh.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + # replace the OEM name, add by yangzy@2021-09-22 + sed -i -e 's/www.taosdata.com/www.wellintech.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/TAOS Data/Wellintech/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/taosd/khserver/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` + sed -i -e 's/taosd<\/th>/khserver<\/th>/g' ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['khserver', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'khserver',/g" ${install_dir}/nginxd/admin/monitor.html + sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/*.html + sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/js/*.js + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + +sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/support@taosdata.com/support@wellintech.com/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/taos client/khclient/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/taosd/khserver/g" ${install_dir}/cfg/kinghistorian.cfg + +cd ${install_dir} +tar -zcv -f kinghistorian.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar kinghistorian.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_kh.sh >> install_kh_temp.sh + mv install_kh_temp.sh ${install_dir}/install_kh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/kh_history/g" ${install_dir}/install.sh >> install_kh_temp.sh + mv install_kh_temp.sh ${install_dir}/install_kh.sh +fi + +sed -i "/install_connector$/d" ${install_dir}/install_kh.sh +sed -i "/install_examples$/d" ${install_dir}/install_kh.sh +chmod a+x ${install_dir}/install_kh.sh + +# Copy driver +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 65200ddd047358f92f8e3a612c08eedb60053311..ab2a662b118b9a6a62dbf87fc54d78358ab1861f 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -31,18 +31,8 @@ else install_dir="${release_dir}/PowerDB-server-${version}" fi -# Directories and files. -#if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/taosd -# strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh" -#else -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\ -# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" -#fi - lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -51,13 +41,6 @@ fi install_files="${script_dir}/install_power.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" -# Init file -#init_dir=${script_dir}/deb -#if [ $package_type = "centos" ]; then -# init_dir=${script_dir}/rpm -#fi -#init_files=${init_dir}/powerd -# temp use rpm's powerd. TODO: later modify according to os type init_file_deb=${script_dir}/../deb/powerd init_file_rpm=${script_dir}/../rpm/powerd init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord @@ -66,7 +49,7 @@ init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/power.cfg #mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/bin @@ -81,12 +64,13 @@ else # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd - cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: + cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/run_taosd.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin @@ -109,9 +93,9 @@ if [ "$verMode" == "cluster" ]; then sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js - sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg - sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg - sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg + sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg + sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg + sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/power.cfg if [ "$cpuType" == "aarch64" ]; then cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh index 457cb0de6f02f7000dc7437cde61bfec28c7205c..ea370b2d95437b13f932b1925a4de8d1073cd294 100755 --- a/packaging/tools/makepkg_pro.sh +++ b/packaging/tools/makepkg_pro.sh @@ -32,7 +32,7 @@ else fi lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -44,7 +44,7 @@ nginx_dir="${code_dir}/../../enterprise/src/plugins/web" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg mkdir -p ${install_dir}/bin # bin @@ -56,6 +56,7 @@ else cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/run_taosd.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin @@ -94,12 +95,12 @@ if [ "$verMode" == "cluster" ]; then rm -rf ${install_dir}/nginxd/sbin/arm fi -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg -sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/prodb.cfg cd ${install_dir} tar -zcv -f prodb.tar.gz * --remove-files || : @@ -124,50 +125,9 @@ sed -i "/install_connector$/d" ${install_dir}/install_pro.sh sed -i "/install_examples$/d" ${install_dir}/install_pro.sh chmod a+x ${install_dir}/install_pro.sh -# Copy example code -#mkdir -p ${install_dir}/examples -#examples_dir="${top_dir}/tests/examples" -#cp -r ${examples_dir}/c ${install_dir}/examples -#sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c -#sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c -# -#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then -# cp -r ${examples_dir}/JDBC ${install_dir}/examples -# cp -r ${examples_dir}/matlab ${install_dir}/examples -# mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m -# cp -r ${examples_dir}/python ${install_dir}/examples -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py -# cp -r ${examples_dir}/R ${install_dir}/examples -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt -# cp -r ${examples_dir}/go ${install_dir}/examples -# mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go -# sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go -#fi - # Copy driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt -# Copy connector -#connector_dir="${code_dir}/connector" -#mkdir -p ${install_dir}/connector -#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then -# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - -# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then -# cp -r ${connector_dir}/go ${install_dir}/connector -# else -# echo "WARNING: go connector not found, please check if want to use it!" -# fi -# cp -r ${connector_dir}/python ${install_dir}/connector/ -# mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py - -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py - -# sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py -#fi - cd ${release_dir} if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh index 07032379d7e4bab2636f3685b6edb620780a124a..ff406421eaacf290e2e9269c358176fee20a80bb 100755 --- a/packaging/tools/makepkg_tq.sh +++ b/packaging/tools/makepkg_tq.sh @@ -31,18 +31,8 @@ else install_dir="${release_dir}/TQ-server-${version}" fi -# Directories and files. -#if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/taosd -# strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" -#else -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\ -# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" -#fi - lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then cfg_dir="${top_dir}/../enterprise/packaging/cfg" else @@ -51,53 +41,33 @@ fi install_files="${script_dir}/install_tq.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" -# Init file -#init_dir=${script_dir}/deb -#if [ $package_type = "centos" ]; then -# init_dir=${script_dir}/rpm -#fi -#init_files=${init_dir}/tqd -# temp use rpm's tqd. TODO: later modify according to os type -#init_file_deb=${script_dir}/../deb/tqd -#init_file_rpm=${script_dir}/../rpm/tqd -#init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -#init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg -#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/bin if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${script_dir}/remove_tq.sh ${install_dir}/bin else -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${script_dir}/remove_tq.sh ${install_dir}/bin cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/run_taosd.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin fi chmod a+x ${install_dir}/bin/* || : -#mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/tqd.deb -#mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/tqd.rpm -#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh @@ -109,9 +79,9 @@ if [ "$verMode" == "cluster" ]; then sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js - sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg - sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg - sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg + sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg + sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg + sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg if [ "$cpuType" == "aarch64" ]; then cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ @@ -181,10 +151,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py fi -# Copy release note -# cp ${script_dir}/release_note ${install_dir} - -# exit 1 cd ${release_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 2f4b07067fd08ee3a9591f97e7291305307ff498..7a90435d8f1ce8dc190e0f1513aee080838e4645 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -81,29 +81,12 @@ function kill_taosd() { } function install_include() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h|| : + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h|| : ${csudo} ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h ${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h } -function install_avro_lib() { - ${csudo} rm -f ${lib_link_dir}/libavro* || : - ${csudo} rm -f ${lib64_link_dir}/libavro* || : - - if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then - ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0 - ${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23 - ${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then - ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || : - ${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || : - ${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || : - fi - fi - - ${csudo} ldconfig -} function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos* || : ${csudo} rm -f ${lib64_link_dir}/libtaos* || : @@ -468,8 +451,8 @@ function install_service_on_systemd() { function install_taosadapter_service() { if ((${service_mod}==0)); then - [ -f ${script_dir}/cfg/taosadapter.service ] &&\ - ${csudo} cp ${script_dir}/cfg/taosadapter.service \ + [ -f ${script_dir}/../cfg/taosadapter.service ] &&\ + ${csudo} cp ${script_dir}/../cfg/taosadapter.service \ ${service_config_dir}/ || : ${csudo} systemctl daemon-reload fi @@ -503,7 +486,6 @@ function install_TDengine() { # Install include, lib, binary and service install_include install_lib - install_avro_lib install_bin install_config install_taosadapter_config diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index d2d36364208f23492d2ba6aefa783c85ad6d5572..43fead76ba675dfa8ee45422d9f9dc8166b2488d 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -122,10 +122,10 @@ ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : -${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${cfg_link_dir}/*.new || : ${csudo} rm -f ${inc_link_dir}/taos.h || : +${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 07a8362b2c45676986513020da668ff9235f00fa..4f814692ebaacf8017ede030a977af36528a27c1 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -27,6 +27,7 @@ install_nginxd_dir="/usr/local/nginxd" service_config_dir="/etc/systemd/system" taos_service_name="taosd" +taosadapter_service_name="taosadapter" tarbitrator_service_name="tarbitratord" nginx_service_name="nginxd" csudo="" @@ -78,12 +79,13 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosadapter || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : } function clean_lib() { @@ -96,6 +98,7 @@ function clean_lib() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } @@ -111,14 +114,20 @@ function clean_log() { function clean_service_on_systemd() { taosd_service_config="${service_config_dir}/${taos_service_name}.service" - taosadapter_service_config="${service_config_dir}/taosadapter.service" if systemctl is-active --quiet ${taos_service_name}; then echo "TDengine taosd is running, stopping it..." ${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} - [ -f ${taosadapter_service_config} ] && ${sudo} rm -f ${taosadapter_service_config} + + taosadapter_service_config="${service_config_dir}/taosadapter.service" + if systemctl is-active --quiet ${taosadapter_service_name}; then + echo "TDengine taosAdapter is running, stopping it..." + ${csudo} systemctl stop ${taosadapter_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${taosadapter_service_name} &> /dev/null || echo &> /dev/null + [ -f ${taosadapter_service_config} ] && ${csudo} rm -f ${taosadapter_service_config} tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then diff --git a/packaging/tools/remove_arbi.sh b/packaging/tools/remove_arbi.sh index 68fd9275fb922d3eb0ab82bc010262c6c61b2962..4495f25f3617c3c28c6dbd22ddeeda93cdf5423a 100755 --- a/packaging/tools/remove_arbi.sh +++ b/packaging/tools/remove_arbi.sh @@ -57,6 +57,7 @@ function clean_bin() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } diff --git a/packaging/tools/remove_arbi_jh.sh b/packaging/tools/remove_arbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..8b690771c761ac51772dac83cafec46360a16be3 --- /dev/null +++ b/packaging/tools/remove_arbi_jh.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Script to stop the service and uninstall jh_iot's arbitrator + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/tarbitrator" +bin_link_dir="/usr/bin" + +service_config_dir="/etc/systemd/system" +tarbitrator_service_name="tarbitratord" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf /arbitrator.log || : +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +##clean_header +# Remove log file +clean_log + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}jh_iot's arbitrator is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_arbi_kh.sh b/packaging/tools/remove_arbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..ec3254b01649add57f9485c59878059e086b2669 --- /dev/null +++ b/packaging/tools/remove_arbi_kh.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Script to stop the service and uninstall KingHistorian's arbitrator + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/tarbitrator" +bin_link_dir="/usr/bin" + +service_config_dir="/etc/systemd/system" +tarbitrator_service_name="tarbitratord" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf /arbitrator.log || : +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +##clean_header +# Remove log file +clean_log + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}KingHistorian's arbitrator is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_arbi_power.sh b/packaging/tools/remove_arbi_power.sh index 077b19ec7d4208c604c2042c2aa1eacab2033c5b..27b08a47e87c28395faa004515702d9e1b51492a 100755 --- a/packaging/tools/remove_arbi_power.sh +++ b/packaging/tools/remove_arbi_power.sh @@ -57,6 +57,7 @@ function clean_bin() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } @@ -127,4 +128,4 @@ clean_log ${csudo} rm -rf ${install_main_dir} echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}" -echo \ No newline at end of file +echo diff --git a/packaging/tools/remove_arbi_pro.sh b/packaging/tools/remove_arbi_pro.sh index ff10478881628bdaf027c618a1b89f204ebbdb35..4bb435cc97e7b31341007ac56c6eb1bbe75a9fda 100755 --- a/packaging/tools/remove_arbi_pro.sh +++ b/packaging/tools/remove_arbi_pro.sh @@ -57,6 +57,7 @@ function clean_bin() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh index 3d99b6d41a74938d74383df3d8cdfc75c2ebb7c8..e0b401255f1071a21f0a5e09cd9b6e0e307ec5ba 100755 --- a/packaging/tools/remove_arbi_tq.sh +++ b/packaging/tools/remove_arbi_tq.sh @@ -57,6 +57,7 @@ function clean_bin() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } @@ -127,4 +128,4 @@ clean_log ${csudo} rm -rf ${install_main_dir} echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}" -echo \ No newline at end of file +echo diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 7579162dc60e290754e71ed6a71c10cfaee5537b..aad8d67d948d566b72820625391ba7592859c079 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -52,13 +52,14 @@ function clean_lib() { function clean_header() { # Remove link - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} rm -f ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { # Remove link - ${csudo} rm -f ${cfg_link_dir}/* || : + ${csudo} rm -f ${cfg_link_dir}/* || : } function clean_log() { diff --git a/packaging/tools/remove_client_jh.sh b/packaging/tools/remove_client_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..a3f5dfd10debb0a28211b3682becd083d49ca9c6 --- /dev/null +++ b/packaging/tools/remove_client_jh.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/jh_taos" + +log_link_dir="/usr/local/jh_taos/log" +cfg_link_dir="/usr/local/jh_taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + if [ -n "$(pidof jh_taos)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}jh_iot client is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_client_kh.sh b/packaging/tools/remove_client_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..6a44e875e3426b14400508b1bdbd7510c2ae49cb --- /dev/null +++ b/packaging/tools/remove_client_kh.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/kinghistorian" + +log_link_dir="/usr/local/kinghistorian/log" +cfg_link_dir="/usr/local/kinghistorian/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + if [ -n "$(pidof khclient)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}KingHistorian client is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh index 580c46e2077d7f21e06d4d4a8f69dcd5b6bbf51d..75e9717e54b6e02ad5a5d8b28244caf89ab570fb 100755 --- a/packaging/tools/remove_client_power.sh +++ b/packaging/tools/remove_client_power.sh @@ -52,13 +52,14 @@ function clean_lib() { function clean_header() { # Remove link - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} rm -f ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { # Remove link - ${csudo} rm -f ${cfg_link_dir}/* || : + ${csudo} rm -f ${cfg_link_dir}/* || : } function clean_log() { diff --git a/packaging/tools/remove_client_pro.sh b/packaging/tools/remove_client_pro.sh index 59e4e8997620af035821df5a975fe58f1357c9dc..b7ddb27bf48c3e416523c021d42c6ae468fb04e2 100755 --- a/packaging/tools/remove_client_pro.sh +++ b/packaging/tools/remove_client_pro.sh @@ -46,13 +46,14 @@ function clean_lib() { function clean_header() { # Remove link - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} rm -f ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { # Remove link - ${csudo} rm -f ${cfg_link_dir}/* || : + ${csudo} rm -f ${cfg_link_dir}/* || : } function clean_log() { diff --git a/packaging/tools/remove_client_tq.sh b/packaging/tools/remove_client_tq.sh index ad8056c18cc32623edb8b77bf6aa17070acc1cbc..d701217c77e671a4ad24234bdfb4a196f5545970 100755 --- a/packaging/tools/remove_client_tq.sh +++ b/packaging/tools/remove_client_tq.sh @@ -52,13 +52,14 @@ function clean_lib() { function clean_header() { # Remove link - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} rm -f ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { # Remove link - ${csudo} rm -f ${cfg_link_dir}/* || : + ${csudo} rm -f ${cfg_link_dir}/* || : } function clean_log() { diff --git a/packaging/tools/remove_jh.sh b/packaging/tools/remove_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..6965ba388f99e8a2ac1c33def2f696f8d7c3898e --- /dev/null +++ b/packaging/tools/remove_jh.sh @@ -0,0 +1,210 @@ +#!/bin/bash +# +# Script to stop the service and uninstall jh_taos, but retain the config, data and log files. + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/jh_taos" +data_link_dir="/usr/local/jh_taos/data" +log_link_dir="/usr/local/jh_taos/log" +cfg_link_dir="/usr/local/jh_taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +service_config_dir="/etc/systemd/system" +service_name="jh_taosd" +tarbitrator_service_name="tarbitratord" +nginx_service_name="nginxd" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_process() { + pid=$(ps -ef | grep "jh_taosd" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jh_taosd || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +function clean_service_on_systemd() { + service_config="${service_config_dir}/${service_name}.service" + if systemctl is-active --quiet ${service_name}; then + echo "jh_iot's jh_taosd is running, stopping it..." + ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${service_config} + + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for jh_iot is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi +} + +function clean_service_on_sysvinit() { + if pidof jh_taosd &> /dev/null; then + echo "jh_iot's jh_taosd is running, stopping it..." + ${csudo} service jh_taosd stop || : + fi + + if pidof tarbitrator &> /dev/null; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} chkconfig --del jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} insserv -r jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} update-rc.d -f jh_taosd remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/jh_taosd || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + kill_process + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config +# Remove data link directory +${csudo} rm -rf ${data_link_dir} || : + +${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} +if [[ -e /etc/os-release ]]; then + osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +else + osinfo="" +fi + +echo -e "${GREEN}jh_iot is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_kh.sh b/packaging/tools/remove_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..cf6a93fdf2bf8884bda02b4f44058eb7f8d77210 --- /dev/null +++ b/packaging/tools/remove_kh.sh @@ -0,0 +1,210 @@ +#!/bin/bash +# +# Script to stop the service and uninstall kinghistorian, but retain the config, data and log files. + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/kinghistorian" +data_link_dir="/usr/local/kinghistorian/data" +log_link_dir="/usr/local/kinghistorian/log" +cfg_link_dir="/usr/local/kinghistorian/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +service_config_dir="/etc/systemd/system" +service_name="khserver" +tarbitrator_service_name="tarbitratord" +nginx_service_name="nginxd" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_process() { + pid=$(ps -ef | grep "khserver" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khserver || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +function clean_service_on_systemd() { + service_config="${service_config_dir}/${service_name}.service" + if systemctl is-active --quiet ${service_name}; then + echo "KingHistorian's khserver is running, stopping it..." + ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${service_config} + + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for KingHistorian is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi +} + +function clean_service_on_sysvinit() { + if pidof khserver &> /dev/null; then + echo "KingHistorian's khserver is running, stopping it..." + ${csudo} service khserver stop || : + fi + + if pidof tarbitrator &> /dev/null; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} chkconfig --del khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} insserv -r khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} update-rc.d -f khserver remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/khserver || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + kill_process + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config +# Remove data link directory +${csudo} rm -rf ${data_link_dir} || : + +${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} +if [[ -e /etc/os-release ]]; then + osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +else + osinfo="" +fi + +echo -e "${GREEN}KingHistorian is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh index 816869cf444d8001e0c0aae30840d2c40a9e6af4..70d0095f978f8c7a2578c63110cdd118fef43e30 100755 --- a/packaging/tools/remove_power.sh +++ b/packaging/tools/remove_power.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to stop the service and uninstall TDengine, but retain the config, data and log files. +# Script to stop the service and uninstall PowerDB, but retain the config, data and log files. set -e #set -x @@ -76,6 +76,7 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : } function clean_lib() { @@ -88,6 +89,7 @@ function clean_lib() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } @@ -112,7 +114,7 @@ function clean_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TDengine tarbitrator is running, stopping it..." + echo "PowerDB tarbitrator is running, stopping it..." ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null @@ -122,7 +124,7 @@ function clean_service_on_systemd() { nginx_service_config="${service_config_dir}/${nginx_service_name}.service" if [ -d ${bin_dir}/web ]; then if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for PowerDB is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null @@ -133,9 +135,6 @@ function clean_service_on_systemd() { } function clean_service_on_sysvinit() { - #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof powerd &> /dev/null; then echo "PowerDB powerd is running, stopping it..." ${csudo} service powerd stop || : @@ -183,7 +182,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_powerd kill_tarbitrator fi diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh index f6dad22bc21b02a9d717d530c50bc19c5a718478..1572a7f08c6083f7da3d81176f107e4cb977d4f9 100755 --- a/packaging/tools/remove_pro.sh +++ b/packaging/tools/remove_pro.sh @@ -74,6 +74,7 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/rmprodb || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : } function clean_lib() { @@ -85,6 +86,7 @@ function clean_lib() { function clean_header() { # Remove link ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } @@ -177,7 +179,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_prodbs kill_tarbitrator fi diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh index 211eed4dff09ab5da00d5c475cd93148b5ce1b24..eb83b92d1a5ef7e9e4ac498d98cc538a34da2a4f 100755 --- a/packaging/tools/remove_tq.sh +++ b/packaging/tools/remove_tq.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to stop the service and uninstall TDengine, but retain the config, data and log files. +# Script to stop the service and uninstall TQ, but retain the config, data and log files. set -e #set -x @@ -76,6 +76,7 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/rmtq || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : + ${csudo} rm -f ${bin_link_dir}/run_taosd.sh || : } function clean_lib() { @@ -87,13 +88,14 @@ function clean_lib() { function clean_header() { # Remove link - ${csudo} rm -f ${inc_link_dir}/taos.h || : - ${csudo} rm -f ${inc_link_dir}/taoserror.h || : + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taosdef.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { # Remove link - ${csudo} rm -f ${cfg_link_dir}/* || : + ${csudo} rm -f ${cfg_link_dir}/* || : } function clean_log() { @@ -109,10 +111,10 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable ${tq_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${tq_service_config} - + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TDengine tarbitrator is running, stopping it..." + echo "TQ tarbitrator is running, stopping it..." ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null @@ -122,7 +124,7 @@ function clean_service_on_systemd() { nginx_service_config="${service_config_dir}/${nginx_service_name}.service" if [ -d ${bin_dir}/web ]; then if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for TQ is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null @@ -183,7 +185,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_tqd kill_tarbitrator fi @@ -212,16 +213,5 @@ else osinfo="" fi -#if echo $osinfo | grep -qwi "ubuntu" ; then -## echo "this is ubuntu system" -# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "debian" ; then -## echo "this is debian system" -# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "centos" ; then -## echo "this is centos system" -# ${csudo} rpm -e --noscripts tdengine || : -#fi - echo -e "${GREEN}TQ is removed successfully!${NC}" echo diff --git a/packaging/tools/run_taosd.sh b/packaging/tools/run_taosd.sh new file mode 100755 index 0000000000000000000000000000000000000000..9ab9eb484a4a5bbc4e3d3994d97b61e0f4bd328d --- /dev/null +++ b/packaging/tools/run_taosd.sh @@ -0,0 +1,3 @@ +#!/bin/bash +[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter & +taosd diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index e508b66a16a0c14f99ac6cbd14445882f42513c3..48d3fe8675f7c5ab0e1f0678a269b03bc154a337 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -26,7 +26,7 @@ IF (TD_LINUX) ADD_LIBRARY(taos SHARED ${SRC}) TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt cJson) IF (TD_LINUX_64) - TARGET_LINK_LIBRARIES(taos lua cJson) + TARGET_LINK_LIBRARIES(taos ${LINK_LUA} cJson) ENDIF () SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) @@ -45,13 +45,13 @@ ELSEIF (TD_DARWIN) # set the static lib name ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua cJson) + TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m ${LINK_LUA} cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua cJson) + TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m ${LINK_LUA} cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) #set version of .dylib @@ -77,7 +77,7 @@ ELSEIF (TD_WINDOWS) IF (NOT TD_GODLL) SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) ENDIF () - TARGET_LINK_LIBRARIES(taos trpc tutil query lua cJson) + TARGET_LINK_LIBRARIES(taos trpc tutil query ${LINK_LUA} cJson) ELSEIF (TD_DARWIN) SET(CMAKE_MACOSX_RPATH 1) @@ -85,12 +85,12 @@ ELSEIF (TD_DARWIN) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua cJson) + TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m ${LINK_LUA} cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua cJson) + TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m ${LINK_LUA} cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index b6f0ec712c9bbd0d48b560a5e72768a021e2b74d..5293a176d85a0d928e3cf6211c31e301352ce2e7 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -42,7 +42,7 @@ int32_t tscHandleInsertRetry(SSqlObj* parent, SSqlObj* child); void tscBuildResFromSubqueries(SSqlObj *pSql); TAOS_ROW doSetResultRowData(SSqlObj *pSql); -char *getArithmeticInputSrc(void *param, const char *name, int32_t colId); +char *getScalarExprInputSrc(void *param, const char *name, int32_t colId); void tscLockByThread(int64_t *lockedBy); diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b183598fcceff926cfba235e42d8634546b36a48..dd9db517956e9e72ebef040c6b765c8a315a95ad 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -35,12 +35,12 @@ extern "C" { #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) -#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ - (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo))) - #define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) +#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ + (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo))) + #define UTIL_GET_VGROUPMAP(pSql) \ (pSql->pTscObj->pClusterInfo->vgroupMap) @@ -256,7 +256,7 @@ void tscColumnListDestroy(SArray* pColList); void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid); void tscColumnListCopyAll(SArray* dst, const SArray* src); -void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar); +void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar, bool convertJson); void tscDequoteAndTrimToken(SStrToken* pToken); void tscRmEscapeAndTrimToken(SStrToken* pToken); @@ -264,7 +264,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) void tscIncStreamExecutionCount(void* pStream); -bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams); +bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId); // get starter position of metric query condition (query on tags) in SSqlCmd.payload SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid); @@ -364,7 +364,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx); bool tscSetSqlOwner(SSqlObj* pSql); void tscClearSqlOwner(SSqlObj* pSql); -int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); +int32_t doScalarExprCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); char* serializeTagData(STagData* pTagData, char* pMsg); int32_t copyTagData(STagData* dst, const STagData* src); @@ -394,6 +394,9 @@ void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id); char* cloneCurrentDBName(SSqlObj* pSql); +int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, int16_t startColId); +int8_t jsonType2DbType(double data, int jsonType); +void getJsonKey(SStrToken *t0); char* cloneCurrentDBName(SSqlObj* pSql); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 7f35cf0ea5080cbb49db3a78b7d53df58cb9724c..10cfe8bcc5053fa9fe44982268d9e8843d071ce6 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -115,8 +115,9 @@ typedef struct SParsedDataColInfo { int16_t numOfCols; int16_t numOfBound; uint16_t flen; // TODO: get from STSchema - uint16_t allNullLen; // TODO: get from STSchema + uint16_t allNullLen; // TODO: get from STSchema(base on SDataRow) uint16_t extendedVarLen; + uint16_t boundNullLen; // bound column len with all NULL value(without VarDataOffsetT/SColIdx part) int32_t * boundedColumns; // bound column idx according to schema SBoundColumn * cols; SBoundIdxInfo *colIdxInfo; @@ -132,7 +133,7 @@ typedef struct { typedef struct { uint8_t memRowType; // default is 0, that is SDataRow uint8_t compareStat; // 0 no need, 1 need compare - TDRowTLenT kvRowInitLen; + int32_t rowSize; SMemRowInfo *rowInfo; } SMemRowBuilder; @@ -150,8 +151,7 @@ typedef struct { int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); -int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, - int32_t allNullLen); +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColInfo *pColInfo); void destroyMemRowBuilder(SMemRowBuilder *pBuilder); /** @@ -453,7 +453,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo); int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted); -void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar); +void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar, bool convertJson); void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent); void destroyTableNameList(SInsertStatementParam* pInsertParam); @@ -499,6 +499,8 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes); void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols); char *tscGetErrorMsgPayload(SSqlCmd *pCmd); +int32_t tscGetErrorMsgLength(SSqlCmd* pCmd); + int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql); int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql); @@ -531,16 +533,6 @@ static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; } -static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) { - if (isDataRow(row)) { - if (kvLen < (dataLen * KVRatioConvert)) { - memRowSetConvert(row); - } - } else if (kvLen > dataLen) { - memRowSetConvert(row); - } -} - static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) { memRowSetType(row, memRowType); if (isDataRowT(memRowType)) { @@ -640,8 +632,7 @@ static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str, - bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId, - int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) { + bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId) { int64_t iv; int32_t ret; char * endptr = NULL; @@ -653,26 +644,22 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok switch (pSchema->type) { case TSDB_DATA_TYPE_BOOL: { // bool if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { if (strncmp(pToken->z, "true", pToken->n) == 0) { - tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &TRUE_VALUE, true, colId, pSchema->type, toffset); } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, &FALSE_VALUE, true, colId, pSchema->type, toffset); } else { return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); } } else if (pToken->type == TK_INTEGER) { iv = strtoll(pToken->z, NULL, 10); - tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, - dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset); } else if (pToken->type == TK_FLOAT) { double dv = strtod(pToken->z, NULL); - tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, - dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset); } else { return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); } @@ -682,8 +669,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok case TSDB_DATA_TYPE_TINYINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); if (ret != TSDB_CODE_SUCCESS) { @@ -693,15 +679,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } uint8_t tmpVal = (uint8_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_UTINYINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); if (ret != TSDB_CODE_SUCCESS) { @@ -711,15 +696,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } uint8_t tmpVal = (uint8_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_SMALLINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); if (ret != TSDB_CODE_SUCCESS) { @@ -729,15 +713,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } int16_t tmpVal = (int16_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_USMALLINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); if (ret != TSDB_CODE_SUCCESS) { @@ -747,15 +730,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } uint16_t tmpVal = (uint16_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_INT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); if (ret != TSDB_CODE_SUCCESS) { @@ -765,15 +747,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } int32_t tmpVal = (int32_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_UINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); if (ret != TSDB_CODE_SUCCESS) { @@ -783,15 +764,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } uint32_t tmpVal = (uint32_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_BIGINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); if (ret != TSDB_CODE_SUCCESS) { @@ -800,14 +780,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); } - tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &iv, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_UBIGINT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); if (ret != TSDB_CODE_SUCCESS) { @@ -817,14 +796,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } uint64_t tmpVal = (uint64_t)iv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_FLOAT: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { double dv; if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { @@ -837,14 +815,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok } float tmpVal = (float)dv; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_DOUBLE: if (isNullStr(pToken)) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { double dv; if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { @@ -855,15 +832,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); } - tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &dv, true, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_BINARY: // binary data cannot be null-terminated char string, otherwise the last char of the string is lost if (pToken->type == TK_NULL) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { // too long values will return invalid sql, not be truncated automatically if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); @@ -871,14 +847,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); char *rowEnd = memRowEnd(row); STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n); - tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, rowEnd, false, colId, pSchema->type, toffset); } break; case TSDB_DATA_TYPE_NCHAR: if (pToken->type == TK_NULL) { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } else { // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' int32_t output = 0; @@ -890,7 +865,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok return tscInvalidOperationMsg(msg, buf, pToken->z); } varDataSetLen(rowEnd, output); - tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, rowEnd, false, colId, pSchema->type, toffset); } break; @@ -899,17 +874,16 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok if (primaryKey) { // When building SKVRow primaryKey, we should not skip even with NULL value. int64_t tmpVal = 0; - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } else { - tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, - compareStat); + tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset); } } else { int64_t tmpVal; if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); } - tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset); } break; diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 5127aaf665b8059a12ef0985140c2a01ea328bfa..32a07b3aad20d8399620b13bf8c4fdb440a8e106 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -547,6 +547,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn jniFromNCharToByteArray(env, (char *)row[i], length[i])); break; } + case TSDB_DATA_TYPE_JSON: { + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, + jniFromNCharToByteArray(env, (char *)row[i], length[i])); + break; + } case TSDB_DATA_TYPE_TIMESTAMP: { int precision = taos_result_precision(result); (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]), precision); diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index 2ca99e072791791a9ec70b522a9fb5bc2334a467..5d936fe7067a9ce13a590537c2ba6162cf2a6c83 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -233,7 +233,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor // sort before flush to disk, the data must be consecutively put on tFilePage. if (pDesc->orderInfo.numOfCols > 0) { - tColDataQSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType); + tColDataMergeSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType); } #ifdef _DEBUG_VIEW @@ -364,7 +364,9 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j); int32_t functionId = pExprInfo->base.functionId; - if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG)) { + + if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) { + orderColIndexList[i] = j; break; } @@ -606,6 +608,7 @@ static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, i SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE); } else { + assert(!TSDB_FUNC_IS_SCALAR(functionId)); aAggs[functionId].mergeFunc(&pCtx[j]); } } @@ -622,6 +625,7 @@ static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); } else { + assert(!TSDB_FUNC_IS_SCALAR(functionId)); aAggs[functionId].xFinalize(&pCtx[j]); } } @@ -661,8 +665,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD if (pCtx[j].functionId < 0) { continue; } - - aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo); + { + assert(!TSDB_FUNC_IS_SCALAR(pCtx[j].functionId)); + aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo); + } } doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr); @@ -704,12 +710,12 @@ SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint } // todo remove it -int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { +int32_t doScalarExprCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { int32_t maxRowSize = MAX(rowSize, finalRowSize); char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize)); size_t size = tscNumOfFields(pQueryInfo); - SArithmeticSupport arithSup = {0}; + SScalarExprSupport arithSup = {0}; // todo refactor arithSup.offset = 0; @@ -730,7 +736,10 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_ // calculate the result from several other columns if (pSup->pExpr->pExpr != NULL) { arithSup.pExprInfo = pSup->pExpr; - arithmeticTreeTraverse(arithSup.pExprInfo->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc); + tExprOperandInfo output; + output.data = pbuf + pOutput->num*offset; + exprTreeNodeTraverse(arithSup.pExprInfo->pExpr, (int32_t)pOutput->num, &output, &arithSup, TSDB_ORDER_ASC, + getScalarExprInputSrc); } else { SExprInfo* pExpr = pSup->pExpr; memcpy(pbuf + pOutput->num * offset, pExpr->base.offset * pOutput->num + pOutput->data, (size_t)(pExpr->base.resBytes * pOutput->num)); @@ -902,8 +911,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) { clearOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity); continue; } - - aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo); + { + assert(!TSDB_FUNC_IS_SCALAR(pCtx->functionId)); + aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo); + } } } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index c3c65018a50aea8e7f36d89c15c6b7faa12f2047..90379e6f7e5ccb5da12e6007ca0e94cfc859ee53 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -85,16 +85,15 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1); dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i; - STR_WITH_MAXSIZE_TO_VARSTR(dst, type, pField->bytes); - + int32_t bytes = pSchema[i].bytes; - if (pSchema[i].type == TSDB_DATA_TYPE_BINARY || pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { + if (pSchema[i].type == TSDB_DATA_TYPE_BINARY){ bytes -= VARSTR_HEADER_SIZE; - - if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { - bytes = bytes / TSDB_NCHAR_SIZE; - } + } + else if(pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) { + bytes -= VARSTR_HEADER_SIZE; + bytes = bytes / TSDB_NCHAR_SIZE; } pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2); @@ -222,7 +221,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt return -1; } uint8_t type = fields[idx].type; - int32_t length = lengths[idx]; + int32_t length = lengths[idx]; switch (type) { case TSDB_DATA_TYPE_BOOL: @@ -248,6 +247,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_JSON: memcpy(result, val, length); break; case TSDB_DATA_TYPE_TIMESTAMP: @@ -440,7 +440,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); int32_t numOfRows = 1; if (strlen(ddl) == 0) { - + } pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self); tscInitResForMerge(&pSql->res); @@ -459,7 +459,7 @@ static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char * int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result); tscFieldInfoUpdateOffset(pQueryInfo); - return tscSCreateSetValueToResObj(pSql, rowLen, str, result); + return tscSCreateSetValueToResObj(pSql, rowLen, str, result); } int32_t tscRebuildCreateTableStatement(void *param,char *result) { SCreateBuilder *builder = (SCreateBuilder *)param; @@ -473,8 +473,8 @@ int32_t tscRebuildCreateTableStatement(void *param,char *result) { code = tscGetTableTagValue(builder, buf); if (code == TSDB_CODE_SUCCESS) { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE TABLE `%s` USING `%s` TAGS %s", builder->buf, builder->sTableName, buf); - code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_TABLE, builder->buf, result); - } + code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_TABLE, builder->buf, result); + } free(buf); return code; } @@ -490,27 +490,27 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { TAOS_FIELD *fields = taos_fetch_fields(pSql); int num_fields = taos_num_fields(pSql); - char buf[TSDB_DB_NAME_LEN + 64] = {0}; + char buf[TSDB_DB_NAME_LEN + 64] = {0}; do { memset(buf, 0, sizeof(buf)); - int32_t* lengths = taos_fetch_lengths(pSql); + int32_t* lengths = taos_fetch_lengths(pSql); int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf); if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); for (int i = 1; i < num_fields; i++) { for (int j = 0; showColumns[j] != NULL; j++) { if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) { memset(buf, 0, sizeof(buf)); ret = tscGetNthFieldResult(row, fields, lengths, i, buf); if (ret == 0) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); } } } } break; - } - + } + row = tscFetchRow(builder); } while (row != NULL); @@ -528,9 +528,9 @@ int32_t tscRebuildCreateDBStatement(void *param,char *result) { if (buf == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - code = tscGetDBInfo(param, buf); + code = tscGetDBInfo(param, buf); if (code == TSDB_CODE_SUCCESS) { - code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_DB, builder->buf, buf); + code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_DB, builder->buf, buf); } free(buf); return code; @@ -539,7 +539,7 @@ int32_t tscRebuildCreateDBStatement(void *param,char *result) { static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) { char *buf = (char *)malloc(TSDB_MAX_BINARY_LEN); if (buf == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return TSDB_CODE_TSC_OUT_OF_MEMORY; } buf[0] = 0; @@ -548,33 +548,33 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) { pMeta->tableType == TSDB_STREAM_TABLE) { free(buf); return TSDB_CODE_TSC_INVALID_VALUE; - } + } - SSchema *pTagsSchema = tscGetTableTagSchema(pMeta); + SSchema *pTagsSchema = tscGetTableTagSchema(pMeta); int32_t numOfTags = tscGetNumOfTags(pMeta); for (int32_t i = 0; i < numOfTags; i++) { if (i != numOfTags - 1) { - snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s,", pTagsSchema[i].name); + snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "`%s`,", pTagsSchema[i].name); } else { - snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s", pTagsSchema[i].name); + snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "`%s`", pTagsSchema[i].name); } - } + } *result = buf; return TSDB_CODE_SUCCESS; -} +} static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) { SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; - SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); if (pInterSql == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + } - SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder)); + SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder)); if (param == NULL) { free(pInterSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -636,9 +636,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, if (type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes/TSDB_NCHAR_SIZE; } - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes); } else { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name); } } sprintf(result + strlen(result) - 1, "%s", ")"); @@ -663,9 +663,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, if (type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes/TSDB_NCHAR_SIZE; } - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"`%s` %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); } else { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[type].name); } } snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS ("); @@ -677,9 +677,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, if (type == TSDB_DATA_TYPE_NCHAR) { bytes = bytes/TSDB_NCHAR_SIZE; } - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); } else { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[type].name); } } sprintf(result + strlen(result) - 1, "%s", ")"); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 94f9a3018aae175f0f27c1c24b735f5a0392102d..05b8b031d99c2b0f4e54e9fc3392a20a9e1bcfcc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -41,9 +41,8 @@ enum { static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, char *str, char **end); -int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, - int32_t allNullLen) { - ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols)); +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColInfo *pColInfo) { + ASSERT(nRows >= 0 && pColInfo->numOfCols > 0 && (pColInfo->numOfBound <= pColInfo->numOfCols)); if (nRows > 0) { // already init(bind multiple rows by single column) if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { @@ -51,41 +50,12 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } - // default compareStat is ROW_COMPARE_NO_NEED - if (nBoundCols == 0) { // file input - pBuilder->memRowType = SMEM_ROW_DATA; - return TSDB_CODE_SUCCESS; + uint32_t dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + pColInfo->allNullLen; + uint32_t kvLen = TD_MEM_ROW_KV_HEAD_SIZE + pColInfo->numOfBound * sizeof(SColIdx) + pColInfo->boundNullLen; + if (isUtilizeKVRow(kvLen, dataLen)) { + pBuilder->memRowType = SMEM_ROW_KV; } else { - float boundRatio = ((float)nBoundCols / (float)nCols); - - if (boundRatio < KVRatioKV) { - pBuilder->memRowType = SMEM_ROW_KV; - return TSDB_CODE_SUCCESS; - } else if (boundRatio > KVRatioData) { - pBuilder->memRowType = SMEM_ROW_DATA; - return TSDB_CODE_SUCCESS; - } - pBuilder->compareStat = ROW_COMPARE_NEED; - - if (boundRatio < KVRatioPredict) { - pBuilder->memRowType = SMEM_ROW_KV; - } else { - pBuilder->memRowType = SMEM_ROW_DATA; - } - } - - pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); - - if (nRows > 0) { - pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo)); - if (pBuilder->rowInfo == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - for (int i = 0; i < nRows; ++i) { - (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; - (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; - } + pBuilder->memRowType = SMEM_ROW_DATA; } return TSDB_CODE_SUCCESS; @@ -385,6 +355,19 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha } break; + case TSDB_DATA_TYPE_JSON: + if (pToken->n >= pSchema->bytes) { // reserve 1 byte for select + return tscInvalidOperationMsg(msg, "json tag length too long", pToken->z); + } + if (pToken->type == TK_NULL) { + *(int8_t *)payload = TSDB_DATA_TINYINT_NULL; + } else if (pToken->type != TK_STRING){ + tscInvalidOperationMsg(msg, "invalid json data", pToken->z); + } else{ + *((int8_t *)payload) = TSDB_DATA_JSON_PLACEHOLDER; + } + break; + case TSDB_DATA_TYPE_TIMESTAMP: { if (pToken->type == TK_NULL) { if (primaryKey) { @@ -455,8 +438,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i STableMeta * pTableMeta = pDataBlocks->pTableMeta; SSchema * schema = tscGetTableSchema(pTableMeta); SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; - int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE; - int32_t kvLen = pBuilder->kvRowInitLen; bool isParseBindParam = false; initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); @@ -533,8 +514,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int16_t colId = -1; tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); - int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, - colId, &dataLen, &kvLen, pBuilder->compareStat); + int32_t ret = + tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, colId); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -549,13 +530,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i } if (!isParseBindParam) { - // 2. check and set convert flag - if (pBuilder->compareStat == ROW_COMPARE_NEED) { - checkAndConvertMemRow(row, dataLen, kvLen); - } - - // 3. set the null value for the columns that do not assign values - if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) { + // set the null value for the columns that do not assign values + if ((spd->numOfBound < spd->numOfCols) && isDataRow(row)) { SDataRow dataRow = memRowDataBody(row); for (int32_t i = 0; i < spd->numOfCols; ++i) { if (spd->cols[i].valStat == VAL_STAT_NONE) { @@ -565,7 +541,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i } } - *len = getExtendedRowSize(pDataBlocks); + *len = pBuilder->rowSize; return TSDB_CODE_SUCCESS; } @@ -618,11 +594,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn int32_t extendedRowSize = getExtendedRowSize(pDataBlock); - if (TSDB_CODE_SUCCESS != - (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound, - pDataBlock->boundColumnInfo.allNullLen))) { + if (TSDB_CODE_SUCCESS != (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, &pDataBlock->boundColumnInfo))) { return code; } + pDataBlock->rowBuilder.rowSize = extendedRowSize; + while (1) { index = 0; sToken = tStrGetToken(*str, &index, false); @@ -701,6 +677,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 pColInfo->boundedColumns[i] = i; } pColInfo->allNullLen += pColInfo->flen; + pColInfo->boundNullLen = pColInfo->allNullLen; // default set allNullLen pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } @@ -1094,9 +1071,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC return code; } - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); - } + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal, false); + if(pSchema->type == TSDB_DATA_TYPE_JSON){ + assert(spd.numOfBound == 1); + if(sToken.n > TSDB_MAX_JSON_TAGS_LEN/TSDB_NCHAR_SIZE){ + tdDestroyKVRowBuilder(&kvRowBuilder); + tscDestroyBoundColumnInfo(&spd); + return tscSQLSyntaxErrMsg(pInsertParam->msg, "json tag too long", NULL); + } + char* json = strndup(sToken.z, sToken.n); + code = parseJsontoTagData(json, &kvRowBuilder, pInsertParam->msg, pTagSchema[spd.boundedColumns[0]].colId); + if (code != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + tscDestroyBoundColumnInfo(&spd); + tfree(json); + return code; + } + tfree(json); + } + } tscDestroyBoundColumnInfo(&spd); SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); @@ -1110,7 +1104,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC if (pInsertParam->tagData.dataLen <= 0){ return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL); } - + char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen); if (pTag == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1224,6 +1218,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nCols = pColInfo->numOfCols; pColInfo->numOfBound = 0; + pColInfo->boundNullLen = 0; memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); for (int32_t i = 0; i < nCols; ++i) { pColInfo->cols[i].valStat = VAL_STAT_NONE; @@ -1281,6 +1276,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; + switch (pSchema[t].type) { + case TSDB_DATA_TYPE_BINARY: + pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + break; + case TSDB_DATA_TYPE_NCHAR: + pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + break; + default: + pColInfo->boundNullLen += TYPE_BYTES[pSchema[t].type]; + break; + } findColumnIndex = true; if (isOrdered && (lastColIdx > t)) { isOrdered = false; @@ -1304,6 +1310,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; + switch (pSchema[t].type) { + case TSDB_DATA_TYPE_BINARY: + pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + break; + case TSDB_DATA_TYPE_NCHAR: + pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + break; + default: + pColInfo->boundNullLen += TYPE_BYTES[pSchema[t].type]; + break; + } findColumnIndex = true; if (isOrdered && (lastColIdx > t)) { isOrdered = false; @@ -1754,13 +1771,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows); + int32_t extendedRowSize = getExtendedRowSize(pTableDataBlock); + tscAllocateMemIfNeed(pTableDataBlock, extendedRowSize, &maxRows); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); if (tokenBuf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + // insert from .csv means full and ordered columns, thus use SDataRow all the time + ASSERT(SMEM_ROW_DATA == pTableDataBlock->rowBuilder.memRowType); + pTableDataBlock->rowBuilder.rowSize = extendedRowSize; + while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 568574cf0f7e88babbbd364da68af998bfa62e98..085b949cc102e3752abd41caacdf07985351e1db 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -32,6 +32,10 @@ typedef struct { static uint64_t linesSmlHandleId = 0; +static int32_t insertChildTablePointsBatch(void* pVoid, char* name, char* name1, SArray* pArray, SArray* pArray1, + SArray* pArray2, SArray* pArray3, size_t size, SSmlLinesInfo* info); +static int32_t doInsertChildTablePoints(void* pVoid, char* sql, char* name, SArray* pArray, SArray* pArray1, + SSmlLinesInfo* info); uint64_t genLinesSmlId() { uint64_t id; @@ -152,7 +156,9 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen, SSmlLinesInfo* info) { tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id); - qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); + if (point->tagNum) { + qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); + } SStringBuilder sb; memset(&sb, 0, sizeof(sb)); char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; @@ -175,16 +181,26 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa MD5Init(&context); MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len); MD5Final(&context); + uint64_t digest1 = *(uint64_t*)(context.digest); + uint64_t digest2 = *(uint64_t*)(context.digest + 8); *tableNameLen = snprintf(tableName, *tableNameLen, - "t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0], - context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6], - context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11], - context.digest[12], context.digest[13], context.digest[14], context.digest[15]); + "t_%016"PRIx64"%016"PRIx64, digest1, digest2); taosStringBuilderDestroy(&sb); tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName); return 0; } +static int32_t buildSmlChildTableName(TAOS_SML_DATA_POINT* point, SSmlLinesInfo* info) { + tscDebug("SML:0x%"PRIx64" taos_sml_insert build child table name", info->id); + char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; + int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE; + getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); + point->childTableName = calloc(1, tableNameLen+1); + strncpy(point->childTableName, childTableName, tableNameLen); + point->childTableName[tableNameLen] = '\0'; + return 0; +} + static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) { int32_t code = 0; SHashObj* sname2shema = taosHashInit(32, @@ -205,8 +221,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, schema.sTableName[stableNameLen] = '\0'; schema.fields = taosArrayInit(64, sizeof(SSchema)); schema.tags = taosArrayInit(8, sizeof(SSchema)); - schema.tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); - schema.fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); + schema.tagHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); + schema.fieldHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); pStableSchema = taosArrayPush(stableSchemas, &schema); stableIdx = taosArrayGetSize(stableSchemas) - 1; @@ -216,12 +232,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, for (int j = 0; j < point->tagNum; ++j) { TAOS_SML_KV* tagKv = point->tags + j; if (!point->childTableName) { - char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; - int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE; - getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); - point->childTableName = calloc(1, tableNameLen+1); - strncpy(point->childTableName, childTableName, tableNameLen); - point->childTableName[tableNameLen] = '\0'; + buildSmlChildTableName(point, info); } code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info); @@ -231,6 +242,27 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, } } + //for Line Protocol tags may be omitted, add a tag with NULL value + if (point->tagNum == 0) { + if (!point->childTableName) { + buildSmlChildTableName(point, info); + } + char tagNullName[TSDB_COL_NAME_LEN] = {0}; + size_t nameLen = strlen(tsSmlTagNullName); + strncpy(tagNullName, tsSmlTagNullName, nameLen); + addEscapeCharToString(tagNullName, (int32_t)nameLen); + size_t* pTagNullIdx = taosHashGet(pStableSchema->tagHash, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE); + if (!pTagNullIdx) { + SSchema tagNull = {0}; + tagNull.type = TSDB_DATA_TYPE_NCHAR; + tagNull.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; + strncpy(tagNull.name, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE); + taosArrayPush(pStableSchema->tags, &tagNull); + size_t tagNullIdx = taosArrayGetSize(pStableSchema->tags) - 1; + taosHashPut(pStableSchema->tagHash, tagNull.name, nameLen + TS_ESCAPE_CHAR_SIZE, &tagNullIdx, sizeof(tagNullIdx)); + } + } + for (int j = 0; j < point->fieldNum; ++j) { TAOS_SML_KV* fieldKv = point->fields + j; code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info); @@ -525,11 +557,75 @@ static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSc return TSDB_CODE_SUCCESS; } +static int32_t getSuperTableMetaFromLocalCache(TAOS* taos, char* tableName, STableMeta** outTableMeta, SSmlLinesInfo* info) { + int32_t code = 0; + STableMeta* tableMeta = NULL; + + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); + if (pSql == NULL) { + tscError("SML:0x%" PRIx64 " failed to allocate memory, reason:%s", info->id, strerror(errno)); + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + return code; + } + pSql->pTscObj = taos; + pSql->signature = pSql; + pSql->fp = NULL; + + registerSqlObj(pSql); + char tableNameBuf[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + memcpy(tableNameBuf, tableName, strlen(tableName)); + SStrToken tableToken = {.z = tableNameBuf, .n = (uint32_t)strlen(tableName), .type = TK_ID}; + tGetToken(tableNameBuf, &tableToken.type); + bool dbIncluded = false; + // Check if the table name available or not + if (tscValidateName(&tableToken, true, &dbIncluded) != TSDB_CODE_SUCCESS) { + code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; + sprintf(pSql->cmd.payload, "table name is invalid"); + taosReleaseRef(tscObjRef, pSql->self); + return code; + } + + SName sname = {0}; + if ((code = tscSetTableFullName(&sname, &tableToken, pSql, dbIncluded)) != TSDB_CODE_SUCCESS) { + taosReleaseRef(tscObjRef, pSql->self); + return code; + } + + char fullTableName[TSDB_TABLE_FNAME_LEN] = {0}; + memset(fullTableName, 0, tListLen(fullTableName)); + tNameExtractFullName(&sname, fullTableName); + + size_t size = 0; + taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size); + + STableMeta* stableMeta = tableMeta; + if (tableMeta != NULL && tableMeta->tableType == TSDB_CHILD_TABLE) { + taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), tableMeta->sTableName, strlen(tableMeta->sTableName), NULL, + (void**)stableMeta, &size); + } + taosReleaseRef(tscObjRef, pSql->self); + + if (stableMeta != tableMeta) { + free(tableMeta); + } + + if (stableMeta != NULL) { + if (outTableMeta != NULL) { + *outTableMeta = stableMeta; + } else { + free(stableMeta); + } + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_TSC_NO_META_CACHED; + } +} + static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTableMeta, SSmlLinesInfo* info) { int32_t code = 0; int32_t retries = 0; STableMeta* tableMeta = NULL; - while (retries++ < TSDB_MAX_REPLICA && tableMeta == NULL) { + while (retries++ <= TSDB_MAX_REPLICA && tableMeta == NULL) { STscObj* pObj = (STscObj*)taos; if (pObj == NULL || pObj->signature != pObj) { terrno = TSDB_CODE_TSC_DISCONNECTED; @@ -537,55 +633,24 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl } tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName); - - char sql[256]; - snprintf(sql, 256, "describe %s", tableName); - TAOS_RES* res = taos_query(taos, sql); - code = taos_errno(res); - if (code != 0) { - tscError("SML:0x%" PRIx64 " describe table failure. %s", info->id, taos_errstr(res)); + code = getSuperTableMetaFromLocalCache(taos, tableName, &tableMeta, info); + if (code == TSDB_CODE_SUCCESS) { + tscDebug("SML:0x%" PRIx64 " successfully retrieved table meta. super table name: %s", info->id, tableName); + break; + } else if (code == TSDB_CODE_TSC_NO_META_CACHED) { + char sql[256]; + snprintf(sql, 256, "describe %s", tableName); + TAOS_RES* res = taos_query(taos, sql); + code = taos_errno(res); + if (code != 0) { + tscError("SML:0x%" PRIx64 " describe table failure. %s", info->id, taos_errstr(res)); + taos_free_result(res); + return code; + } taos_free_result(res); + } else { return code; } - taos_free_result(res); - - SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); - if (pSql == NULL) { - tscError("SML:0x%" PRIx64 " failed to allocate memory, reason:%s", info->id, strerror(errno)); - code = TSDB_CODE_TSC_OUT_OF_MEMORY; - return code; - } - pSql->pTscObj = taos; - pSql->signature = pSql; - pSql->fp = NULL; - - registerSqlObj(pSql); - char tableNameBuf[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; - memcpy(tableNameBuf, tableName, strlen(tableName)); - SStrToken tableToken = {.z = tableNameBuf, .n = (uint32_t)strlen(tableName), .type = TK_ID}; - tGetToken(tableNameBuf, &tableToken.type); - bool dbIncluded = false; - // Check if the table name available or not - if (tscValidateName(&tableToken, true, &dbIncluded) != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; - sprintf(pSql->cmd.payload, "table name is invalid"); - taosReleaseRef(tscObjRef, pSql->self); - return code; - } - - SName sname = {0}; - if ((code = tscSetTableFullName(&sname, &tableToken, pSql, dbIncluded)) != TSDB_CODE_SUCCESS) { - taosReleaseRef(tscObjRef, pSql->self); - return code; - } - - char fullTableName[TSDB_TABLE_FNAME_LEN] = {0}; - memset(fullTableName, 0, tListLen(fullTableName)); - tNameExtractFullName(&sname, fullTableName); - - size_t size = 0; - taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size); - taosReleaseRef(tscObjRef, pSql->self); } if (tableMeta != NULL) { @@ -637,6 +702,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* if (code == TSDB_CODE_SUCCESS) { pointSchema->precision = dbSchema.precision; + size_t pointTagSize = taosArrayGetSize(pointSchema->tags); size_t pointFieldSize = taosArrayGetSize(pointSchema->fields); @@ -688,72 +754,303 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* return 0; } -static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName, - SArray* tagsSchema, SArray* tagsBind, SSmlLinesInfo* info) { - size_t numTags = taosArrayGetSize(tagsSchema); +static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int numPoints, + SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) { + for (int32_t i = 0; i < numPoints; ++i) { + TAOS_SML_DATA_POINT * point = points + i; + SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, point->schemaIdx); + + for (int j = 0; j < point->tagNum; ++j) { + TAOS_SML_KV* kv = point->tags + j; + if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) { + int64_t ts = *(int64_t*)(kv->value); + ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision); + *(int64_t*)(kv->value) = ts; + } + } + + for (int j = 0; j < point->fieldNum; ++j) { + TAOS_SML_KV* kv = point->fields + j; + if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) { + int64_t ts = *(int64_t*)(kv->value); + ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision); + *(int64_t*)(kv->value) = ts; + } + } + + SArray* cTablePoints = NULL; + SArray** pCTablePoints = taosHashGet(cname2points, point->childTableName, strlen(point->childTableName)); + if (pCTablePoints) { + cTablePoints = *pCTablePoints; + } else { + cTablePoints = taosArrayInit(64, sizeof(point)); + taosHashPut(cname2points, point->childTableName, strlen(point->childTableName), &cTablePoints, POINTER_BYTES); + } + taosArrayPush(cTablePoints, &point); + } + + return 0; +} + +static int32_t applyChildTableDataPointsWithInsertSQL(TAOS* taos, char* cTableName, char* sTableName, SSmlSTableSchema* sTableSchema, + SArray* cTablePoints, size_t rowSize, SSmlLinesInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + size_t numTags = taosArrayGetSize(sTableSchema->tags); + size_t numCols = taosArrayGetSize(sTableSchema->fields); + size_t rows = taosArrayGetSize(cTablePoints); + SArray* tagsSchema = sTableSchema->tags; + SArray* colsSchema = sTableSchema->fields; + + TAOS_SML_KV* tagKVs[TSDB_MAX_TAGS] = {0}; + for (int i= 0; i < rows; ++i) { + TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i); + for (int j = 0; j < pDataPoint->tagNum; ++j) { + TAOS_SML_KV* kv = pDataPoint->tags + j; + tagKVs[kv->fieldSchemaIdx] = kv; + } + } + char* sql = malloc(tsMaxSQLStringLen+1); if (sql == NULL) { tscError("malloc sql memory error"); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - int freeBytes = tsMaxSQLStringLen + 1; - sprintf(sql, "create table if not exists %s using %s", cTableName, sTableName); - snprintf(sql+strlen(sql), freeBytes-strlen(sql), "("); + int32_t freeBytes = tsMaxSQLStringLen + 1 ; + int32_t totalLen = 0; + totalLen += sprintf(sql, "insert into %s using %s (", cTableName, sTableName); for (int i = 0; i < numTags; ++i) { SSchema* tagSchema = taosArrayGet(tagsSchema, i); - snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", tagSchema->name); + totalLen += snprintf(sql+totalLen, freeBytes-totalLen, "%s,", tagSchema->name); } - snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")"); + --totalLen; + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, ")"); - snprintf(sql + strlen(sql), freeBytes-strlen(sql), " tags ("); + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, " tags ("); +// for (int i = 0; i < numTags; ++i) { +// snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,"); +// } for (int i = 0; i < numTags; ++i) { - snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,"); + if (tagKVs[i] == NULL) { + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, "NULL,"); + } else { + TAOS_SML_KV* kv = tagKVs[i]; + size_t beforeLen = totalLen; + int32_t len = 0; + converToStr(sql+beforeLen, kv->type, kv->value, kv->length, &len); + totalLen += len; + totalLen += snprintf(sql+totalLen, freeBytes-totalLen, ","); + } } - snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")"); - sql[strlen(sql)] = '\0'; + --totalLen; + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, ") ("); - tscDebug("SML:0x%"PRIx64" create table : %s", info->id, sql); + for (int i = 0; i < numCols; ++i) { + SSchema* colSchema = taosArrayGet(colsSchema, i); + totalLen += snprintf(sql+totalLen, freeBytes-totalLen, "%s,", colSchema->name); + } + --totalLen; + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, ") values "); - TAOS_STMT* stmt = taos_stmt_init(taos); - if (stmt == NULL) { - free(sql); - return TSDB_CODE_TSC_OUT_OF_MEMORY; + TAOS_SML_KV** colKVs = malloc(numCols*sizeof(TAOS_SML_KV*)); + for (int r = 0; r < rows; ++r) { + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, "("); + + memset(colKVs, 0, numCols*sizeof(TAOS_SML_KV*)); + + TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, r); + for (int i = 0; i < point->fieldNum; ++i) { + TAOS_SML_KV* kv = point->fields + i; + colKVs[kv->fieldSchemaIdx] = kv; + } + + for (int i = 0; i < numCols; ++i) { + if (colKVs[i] == NULL) { + totalLen += snprintf(sql + totalLen, freeBytes-totalLen, "NULL,"); + } else { + TAOS_SML_KV* kv = colKVs[i]; + size_t beforeLen = totalLen; + int32_t len = 0; + converToStr(sql+beforeLen, kv->type, kv->value, kv->length, &len); + totalLen += len; + totalLen += snprintf(sql+totalLen, freeBytes-totalLen, ","); + } + } + --totalLen; + totalLen += snprintf(sql+totalLen, freeBytes - totalLen, ")"); } - int32_t code; - code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)); + free(colKVs); + sql[totalLen] = '\0'; + + tscDebug("SML:0x%"PRIx64" insert child table table %s of super table %s sql: %s", info->id, cTableName, sTableName, sql); + TAOS_RES* res = taos_query(taos, sql); free(sql); + code = taos_errno(res); + info->affectedRows += taos_affected_rows(res); + taos_free_result(res); + return code; +} - if (code != 0) { - tscError("SML:0x%"PRIx64" taos_stmt_prepare returns %d:%s", info->id, code, tstrerror(code)); - taos_stmt_close(stmt); - return code; +static int32_t applyChildTableDataPointsWithStmt(TAOS* taos, char* cTableName, char* sTableName, SSmlSTableSchema* sTableSchema, + SArray* cTablePoints, size_t rowSize, SSmlLinesInfo* info) { + size_t numTags = taosArrayGetSize(sTableSchema->tags); + size_t numCols = taosArrayGetSize(sTableSchema->fields); + size_t rows = taosArrayGetSize(cTablePoints); + + TAOS_SML_KV* tagKVs[TSDB_MAX_TAGS] = {0}; + for (int i= 0; i < rows; ++i) { + TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i); + for (int j = 0; j < pDataPoint->tagNum; ++j) { + TAOS_SML_KV* kv = pDataPoint->tags + j; + tagKVs[kv->fieldSchemaIdx] = kv; + } } - code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind)); - if (code != 0) { - tscError("SML:0x%"PRIx64" taos_stmt_bind_param returns %d:%s", info->id, code, tstrerror(code)); - taos_stmt_close(stmt); - return code; + //tag bind + SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND)); + taosArraySetSize(tagBinds, numTags); + int isNullColBind = TSDB_TRUE; + for (int j = 0; j < numTags; ++j) { + TAOS_BIND* bind = taosArrayGet(tagBinds, j); + bind->is_null = &isNullColBind; + } + for (int j = 0; j < numTags; ++j) { + if (tagKVs[j] == NULL) continue; + TAOS_SML_KV* kv = tagKVs[j]; + TAOS_BIND* bind = taosArrayGet(tagBinds, kv->fieldSchemaIdx); + bind->buffer_type = kv->type; + bind->length = malloc(sizeof(uintptr_t*)); + *bind->length = kv->length; + bind->buffer = kv->value; + bind->is_null = NULL; } - code = taos_stmt_execute(stmt); - if (code != 0) { - tscError("SML:0x%"PRIx64" taos_stmt_execute returns %d:%s", info->id, code, tstrerror(code)); - taos_stmt_close(stmt); - return code; + //rows bind + SArray* rowsBind = taosArrayInit(rows, POINTER_BYTES); + for (int i = 0; i < rows; ++i) { + TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, i); + + TAOS_BIND* colBinds = calloc(numCols, sizeof(TAOS_BIND)); + if (colBinds == NULL) { + tscError("SML:0x%"PRIx64" taos_sml_insert insert points, failed to allocated memory for TAOS_BIND, " + "num of rows: %zu, num of cols: %zu", info->id, rows, numCols); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int j = 0; j < numCols; ++j) { + TAOS_BIND* bind = colBinds + j; + bind->is_null = &isNullColBind; + } + for (int j = 0; j < point->fieldNum; ++j) { + TAOS_SML_KV* kv = point->fields + j; + TAOS_BIND* bind = colBinds + kv->fieldSchemaIdx; + bind->buffer_type = kv->type; + bind->length = malloc(sizeof(uintptr_t*)); + *bind->length = kv->length; + bind->buffer = kv->value; + bind->is_null = NULL; + } + taosArrayPush(rowsBind, &colBinds); } - code = taos_stmt_close(stmt); + int32_t code = 0; + code = insertChildTablePointsBatch(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, sTableSchema->fields, rowsBind, rowSize, info); if (code != 0) { - tscError("SML:0x%"PRIx64" taos_stmt_close return %d:%s", info->id, code, tstrerror(code)); - return code; + tscError("SML:0x%"PRIx64" insert into child table %s failed. error %s", info->id, cTableName, tstrerror(code)); } + + //free rows bind + for (int i = 0; i < rows; ++i) { + TAOS_BIND* colBinds = taosArrayGetP(rowsBind, i); + for (int j = 0; j < numCols; ++j) { + TAOS_BIND* bind = colBinds + j; + free(bind->length); + } + free(colBinds); + } + taosArrayDestroy(rowsBind); + //free tag bind + for (int i = 0; i < taosArrayGetSize(tagBinds); ++i) { + TAOS_BIND* bind = taosArrayGet(tagBinds, i); + free(bind->length); + } + taosArrayDestroy(tagBinds); return code; } -static int32_t doInsertChildTableWithStmt(TAOS* taos, char* sql, char* cTableName, SArray* batchBind, SSmlLinesInfo* info) { +static int32_t insertChildTablePointsBatch(TAOS* taos, char* cTableName, char* sTableName, + SArray* tagsSchema, SArray* tagsBind, + SArray* colsSchema, SArray* rowsBind, + size_t rowSize, SSmlLinesInfo* info) { + size_t numTags = taosArrayGetSize(tagsSchema); + size_t numCols = taosArrayGetSize(colsSchema); + char* sql = malloc(tsMaxSQLStringLen+1); + if (sql == NULL) { + tscError("malloc sql memory error"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + int32_t freeBytes = tsMaxSQLStringLen + 1 ; + sprintf(sql, "insert into ? using %s (", sTableName); + for (int i = 0; i < numTags; ++i) { + SSchema* tagSchema = taosArrayGet(tagsSchema, i); + snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", tagSchema->name); + } + snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")"); + + snprintf(sql + strlen(sql), freeBytes-strlen(sql), " tags ("); + + for (int i = 0; i < numTags; ++i) { + snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,"); + } + snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ") ("); + + for (int i = 0; i < numCols; ++i) { + SSchema* colSchema = taosArrayGet(colsSchema, i); + snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", colSchema->name); + } + snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ") values ("); + + for (int i = 0; i < numCols; ++i) { + snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,"); + } + snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ")"); + sql[strlen(sql)] = '\0'; + + tscDebug("SML:0x%"PRIx64" insert child table table %s of super table %s : %s", info->id, cTableName, sTableName, sql); + + size_t rows = taosArrayGetSize(rowsBind); + size_t maxBatchSize = TSDB_MAX_WAL_SIZE/rowSize * 4 / 5; + size_t batchSize = MIN(maxBatchSize, rows); + tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu, batch size: %zu", + info->id, cTableName, rows, batchSize); + SArray* batchBind = taosArrayInit(batchSize, POINTER_BYTES); + int32_t code = TSDB_CODE_SUCCESS; + for (int i = 0; i < rows;) { + int j = i; + for (; j < i + batchSize && j i) { + tscDebug("SML:0x%"PRIx64" insert child table batch from line %d to line %d.", info->id, i, j - 1); + code = doInsertChildTablePoints(taos, sql, cTableName, tagsBind, batchBind, info); + if (code != 0) { + taosArrayDestroy(batchBind); + tfree(sql); + return code; + } + taosArrayClear(batchBind); + } + i = j; + } + taosArrayDestroy(batchBind); + tfree(sql); + return code; + +} +static int32_t doInsertChildTablePoints(TAOS* taos, char* sql, char* cTableName, SArray* tagsBind, SArray* batchBind, + SSmlLinesInfo* info) { int32_t code = 0; TAOS_STMT* stmt = taos_stmt_init(taos); @@ -772,7 +1069,7 @@ static int32_t doInsertChildTableWithStmt(TAOS* taos, char* sql, char* cTableNam bool tryAgain = false; int32_t try = 0; do { - code = taos_stmt_set_tbname(stmt, cTableName); + code = taos_stmt_set_tbname_tags(stmt, cTableName, TARRAY_GET_START(tagsBind)); if (code != 0) { tscError("SML:0x%"PRIx64" taos_stmt_set_tbname return %d:%s", info->id, code, taos_stmt_errstr(stmt)); @@ -813,7 +1110,7 @@ static int32_t doInsertChildTableWithStmt(TAOS* taos, char* sql, char* cTableNam tscError("SML:0x%"PRIx64" taos_stmt_execute return %d:%s, try:%d", info->id, code, taos_stmt_errstr(stmt), try); } tscDebug("SML:0x%"PRIx64" taos_stmt_execute inserted %d rows", info->id, taos_stmt_affected_rows(stmt)); - + tryAgain = false; if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID @@ -846,189 +1143,19 @@ static int32_t doInsertChildTableWithStmt(TAOS* taos, char* sql, char* cTableNam taos_stmt_close(stmt); return code; -} - -static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* colsSchema, SArray* rowsBind, size_t rowSize, SSmlLinesInfo* info) { - size_t numCols = taosArrayGetSize(colsSchema); - char* sql = malloc(tsMaxSQLStringLen+1); - if (sql == NULL) { - tscError("malloc sql memory error"); - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - int32_t freeBytes = tsMaxSQLStringLen + 1 ; - sprintf(sql, "insert into ? ("); - - for (int i = 0; i < numCols; ++i) { - SSchema* colSchema = taosArrayGet(colsSchema, i); - snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", colSchema->name); - } - snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ") values ("); - - for (int i = 0; i < numCols; ++i) { - snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,"); - } - snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ")"); - sql[strlen(sql)] = '\0'; - - size_t rows = taosArrayGetSize(rowsBind); - size_t maxBatchSize = TSDB_MAX_WAL_SIZE/rowSize * 4 / 5; - size_t batchSize = MIN(maxBatchSize, rows); - tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu, batch size: %zu", - info->id, cTableName, rows, batchSize); - SArray* batchBind = taosArrayInit(batchSize, POINTER_BYTES); - int32_t code = TSDB_CODE_SUCCESS; - for (int i = 0; i < rows;) { - int j = i; - for (; j < i + batchSize && j i) { - tscDebug("SML:0x%"PRIx64" insert child table batch from line %d to line %d.", info->id, i, j - 1); - code = doInsertChildTableWithStmt(taos, sql, cTableName, batchBind, info); - if (code != 0) { - taosArrayDestroy(batchBind); - tfree(sql); - return code; - } - taosArrayClear(batchBind); - } - i = j; - } - taosArrayDestroy(batchBind); - tfree(sql); - return code; -} - -static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int numPoints, - SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) { - for (int32_t i = 0; i < numPoints; ++i) { - TAOS_SML_DATA_POINT * point = points + i; - SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, point->schemaIdx); - - for (int j = 0; j < point->tagNum; ++j) { - TAOS_SML_KV* kv = point->tags + j; - if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) { - int64_t ts = *(int64_t*)(kv->value); - ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision); - *(int64_t*)(kv->value) = ts; - } - } - - for (int j = 0; j < point->fieldNum; ++j) { - TAOS_SML_KV* kv = point->fields + j; - if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) { - int64_t ts = *(int64_t*)(kv->value); - ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision); - *(int64_t*)(kv->value) = ts; - } - } - - SArray* cTablePoints = NULL; - SArray** pCTablePoints = taosHashGet(cname2points, point->childTableName, strlen(point->childTableName)); - if (pCTablePoints) { - cTablePoints = *pCTablePoints; - } else { - cTablePoints = taosArrayInit(64, sizeof(point)); - taosHashPut(cname2points, point->childTableName, strlen(point->childTableName), &cTablePoints, POINTER_BYTES); - } - taosArrayPush(cTablePoints, &point); - } return 0; } -static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableName, - SSmlSTableSchema* sTableSchema, SArray* cTablePoints, SSmlLinesInfo* info) { - size_t numTags = taosArrayGetSize(sTableSchema->tags); - size_t rows = taosArrayGetSize(cTablePoints); - - TAOS_SML_KV* tagKVs[TSDB_MAX_TAGS] = {0}; - for (int i= 0; i < rows; ++i) { - TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i); - for (int j = 0; j < pDataPoint->tagNum; ++j) { - TAOS_SML_KV* kv = pDataPoint->tags + j; - tagKVs[kv->fieldSchemaIdx] = kv; - } - } - - SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND)); - taosArraySetSize(tagBinds, numTags); - int isNullColBind = TSDB_TRUE; - for (int j = 0; j < numTags; ++j) { - TAOS_BIND* bind = taosArrayGet(tagBinds, j); - bind->is_null = &isNullColBind; - } - for (int j = 0; j < numTags; ++j) { - if (tagKVs[j] == NULL) continue; - TAOS_SML_KV* kv = tagKVs[j]; - TAOS_BIND* bind = taosArrayGet(tagBinds, kv->fieldSchemaIdx); - bind->buffer_type = kv->type; - bind->length = malloc(sizeof(uintptr_t*)); - *bind->length = kv->length; - bind->buffer = kv->value; - bind->is_null = NULL; - } - - int32_t code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info); - - for (int i = 0; i < taosArrayGetSize(tagBinds); ++i) { - TAOS_BIND* bind = taosArrayGet(tagBinds, i); - free(bind->length); - } - taosArrayDestroy(tagBinds); - return code; -} - -static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, char* cTableName, - SArray* cTablePoints, size_t rowSize, SSmlLinesInfo* info) { +static int32_t applyChildTableDataPoints(TAOS* taos, char* cTableName, char* sTableName, SSmlSTableSchema* sTableSchema, + SArray* cTablePoints, size_t rowSize, SSmlLinesInfo* info) { int32_t code = TSDB_CODE_SUCCESS; - - size_t numCols = taosArrayGetSize(sTableSchema->fields); - size_t rows = taosArrayGetSize(cTablePoints); - SArray* rowsBind = taosArrayInit(rows, POINTER_BYTES); - - int isNullColBind = TSDB_TRUE; - for (int i = 0; i < rows; ++i) { - TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, i); - - TAOS_BIND* colBinds = calloc(numCols, sizeof(TAOS_BIND)); - if (colBinds == NULL) { - tscError("SML:0x%"PRIx64" taos_sml_insert insert points, failed to allocated memory for TAOS_BIND, " - "num of rows: %zu, num of cols: %zu", info->id, rows, numCols); - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - for (int j = 0; j < numCols; ++j) { - TAOS_BIND* bind = colBinds + j; - bind->is_null = &isNullColBind; - } - for (int j = 0; j < point->fieldNum; ++j) { - TAOS_SML_KV* kv = point->fields + j; - TAOS_BIND* bind = colBinds + kv->fieldSchemaIdx; - bind->buffer_type = kv->type; - bind->length = malloc(sizeof(uintptr_t*)); - *bind->length = kv->length; - bind->buffer = kv->value; - bind->is_null = NULL; - } - taosArrayPush(rowsBind, &colBinds); - } - - code = insertChildTableBatch(taos, cTableName, sTableSchema->fields, rowsBind, rowSize, info); - if (code != 0) { - tscError("SML:0x%"PRIx64" insert into child table %s failed. error %s", info->id, cTableName, tstrerror(code)); - } - - for (int i = 0; i < rows; ++i) { - TAOS_BIND* colBinds = taosArrayGetP(rowsBind, i); - for (int j = 0; j < numCols; ++j) { - TAOS_BIND* bind = colBinds + j; - free(bind->length); - } - free(colBinds); + size_t childTableDataPoints = taosArrayGetSize(cTablePoints); + if (childTableDataPoints < 10) { + code = applyChildTableDataPointsWithInsertSQL(taos, cTableName, sTableName, sTableSchema, cTablePoints, rowSize, info); + } else { + code = applyChildTableDataPointsWithStmt(taos, cTableName, sTableName, sTableSchema, cTablePoints, rowSize, info); } - taosArrayDestroy(rowsBind); return code; } @@ -1045,13 +1172,6 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, 0); SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, point->schemaIdx); - tscDebug("SML:0x%"PRIx64" apply child table tags. child table: %s", info->id, point->childTableName); - code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, info); - if (code != 0) { - tscError("apply child table tags failed. child table %s, error %s", point->childTableName, tstrerror(code)); - goto cleanup; - } - size_t rowSize = 0; size_t numCols = taosArrayGetSize(sTableSchema->fields); for (int i = 0; i < numCols; ++i) { @@ -1059,10 +1179,11 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t rowSize += colSchema->bytes; } - tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s, row size: %zu", info->id, point->childTableName, rowSize); - code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints, rowSize, info); + tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s of super table %s, row size: %zu", + info->id, point->childTableName, point->stableName, rowSize); + code = applyChildTableDataPoints(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, rowSize, info); if (code != 0) { - tscError("SML:0x%"PRIx64" Apply child table fields failed. child table %s, error %s", info->id, point->childTableName, tstrerror(code)); + tscError("SML:0x%"PRIx64" Apply child table points failed. child table %s, error %s", info->id, point->childTableName, tstrerror(code)); goto cleanup; } @@ -1082,6 +1203,60 @@ cleanup: return code; } +static int doSmlInsertOneDataPoint(TAOS* taos, TAOS_SML_DATA_POINT* point, SSmlLinesInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + + if (!point->childTableName) { + int tableNameLen = TSDB_TABLE_NAME_LEN; + point->childTableName = calloc(1, tableNameLen + 1); + getSmlMd5ChildTableName(point, point->childTableName, &tableNameLen, info); + point->childTableName[tableNameLen] = '\0'; + } + + STableMeta* tableMeta = NULL; + int32_t ret = getSuperTableMetaFromLocalCache(taos, point->stableName, &tableMeta, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + uint8_t precision = tableMeta->tableInfo.precision; + free(tableMeta); + + char* sql = malloc(TSDB_MAX_SQL_LEN + 1); + int freeBytes = TSDB_MAX_SQL_LEN; + int sqlLen = 0; + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "insert into %s(", point->childTableName); + for (int col = 0; col < point->fieldNum; ++col) { + TAOS_SML_KV* kv = point->fields + col; + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "%s,", kv->key); + } + --sqlLen; + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ") values ("); + TAOS_SML_KV* tsField = point->fields + 0; + int64_t ts = *(int64_t*)(tsField->value); + ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, precision); + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "%" PRId64 ",", ts); + for (int col = 1; col < point->fieldNum; ++col) { + TAOS_SML_KV* kv = point->fields + col; + int32_t len = 0; + converToStr(sql + sqlLen, kv->type, kv->value, kv->length, &len); + sqlLen += len; + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ","); + } + --sqlLen; + sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ")"); + sql[sqlLen] = 0; + + tscDebug("SML:0x%" PRIx64 " insert child table table %s of super table %s sql: %s", info->id, + point->childTableName, point->stableName, sql); + TAOS_RES* res = taos_query(taos, sql); + free(sql); + code = taos_errno(res); + info->affectedRows = taos_affected_rows(res); + taos_free_result(res); + + return code; +} + int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) { tscDebug("SML:0x%"PRIx64" taos_sml_insert. number of points: %d", info->id, numPoint); @@ -1089,6 +1264,14 @@ int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLine info->affectedRows = 0; + if (numPoint == 1) { + TAOS_SML_DATA_POINT* point = points + 0; + code = doSmlInsertOneDataPoint(taos, point, info); + if (code == TSDB_CODE_SUCCESS) { + return code; + } + } + tscDebug("SML:0x%"PRIx64" build data point schemas", info->id); SArray* stableSchemas = taosArrayInit(32, sizeof(SSmlSTableSchema)); // SArray code = buildDataPointSchemas(points, numPoint, stableSchemas, info); @@ -1120,14 +1303,6 @@ clean_up: return code; } -int tsc_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) { - SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo)); - info->id = genLinesSmlId(); - int code = tscSmlInsert(taos, points, numPoint, info); - free(info); - return code; -} - //========================================================================= /* Field Escape charaters @@ -1840,7 +2015,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; int len = 0; - char key[] = "_ts"; + char key[] = "ts"; char *value = NULL; start = cur = *index; diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index d064ede134129d796928768910c56573712319d1..82c554ee0a0edb3d9fec32cc4b09ba96e32e9285 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -125,8 +125,9 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char } tfree(value); - (*pTS)->key = tcalloc(sizeof(key), 1); + (*pTS)->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1); memcpy((*pTS)->key, key, sizeof(key)); + addEscapeCharToString((*pTS)->key, (int32_t)strlen(key)); *num_kvs += 1; *index = cur + 1; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 215861295fa0ff94352956fa3fd740ace9e90766..8d1a79e734cabf6694daf30d8887265488bcf630 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1533,6 +1533,41 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO return TSDB_CODE_SUCCESS; } +int32_t stmtValidateValuesFields(SSqlCmd *pCmd, char * sql) { + int32_t loopCont = 1, index0 = 0, values = 0; + SStrToken sToken; + + while (loopCont) { + sToken = tStrGetToken(sql, &index0, false); + if (sToken.n <= 0) { + return TSDB_CODE_SUCCESS; + } + + switch (sToken.type) { + case TK_RP: + if (values) { + return TSDB_CODE_SUCCESS; + } + break; + case TK_VALUES: + values = 1; + break; + case TK_QUESTION: + case TK_LP: + break; + default: + if (values) { + tscError("only ? allowed in values"); + return tscInvalidOperationMsg(pCmd->payload, "only ? allowed in values", NULL); + } + break; + } + } + + return TSDB_CODE_SUCCESS; +} + + //////////////////////////////////////////////////////////////////////////////// // interface functions @@ -1637,6 +1672,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { STMT_RET(ret); } + ret = stmtValidateValuesFields(&pSql->cmd, pSql->sqlstr); + if (ret != TSDB_CODE_SUCCESS) { + STMT_RET(ret); + } + if (pStmt->multiTbInsert) { STMT_RET(TSDB_CODE_SUCCESS); } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 2444ea2941de46a827ee05ae997cf69ec188e0c6..7651e175d335c38fd094b7227db88b70db77d246 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -41,6 +41,7 @@ #include "qScript.h" #include "ttype.h" #include "qFilter.h" +#include "cJSON.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -99,7 +100,7 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem, bool outerQuery); -static int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql); +static int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql, bool joinQuery); static int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); static int32_t validateRangeNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode); static int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema); @@ -108,7 +109,8 @@ static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryIn static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); -static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type); +static int32_t validateSQLExprItem(SSqlCmd* pCmd, tSqlExpr* pExpr, + SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type, uint64_t* uid, int32_t* height); static int32_t validateEp(char* ep); static int32_t validateDNodeConfig(SMiscInfo* pOptions); static int32_t validateLocalConfig(SMiscInfo* pOptions); @@ -163,6 +165,7 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode; int32_t firstVarType = item->value.nType; + if(colType == TSDB_DATA_TYPE_JSON) colType = firstVarType; SBufferWriter bw = tbufInitWriter( NULL, false); tbufEnsureCapacity(&bw, 512); @@ -284,6 +287,10 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) { return TSDB_RELATION_MATCH; case TK_NMATCH: return TSDB_RELATION_NMATCH; + case TK_CONTAINS: + return TSDB_RELATION_CONTAINS; + case TK_ARROW: + return TSDB_RELATION_ARROW; case TK_ISNULL: return TSDB_RELATION_ISNULL; case TK_NOTNULL: @@ -368,19 +375,28 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) { // validate the out put field type for "UNION ALL" subclause static int32_t normalizeVarDataTypeLength(SSqlCmd* pCmd) { const char* msg1 = "columns in select clause not identical"; + const char* msg2 = "too many select clause siblings, at most 100 allowed"; + int32_t siblings = 0; int32_t diffSize = 0; // if there is only one element, the limit of clause is the limit of global result. SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo; SQueryInfo* pSibling = pQueryInfo1->sibling; + // pQueryInfo1 itself + ++siblings; + while(pSibling != NULL) { int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo, &diffSize); if (ret != 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + if (++siblings > 100) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + pSibling = pSibling->sibling; } @@ -436,10 +452,12 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) { int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { - const char *msg1 = "invalidate function name"; + const char *msg1 = "invalid function name or length"; const char *msg2 = "path is too long"; const char *msg3 = "invalid outputtype"; + #ifdef LUA_EMBEDDED const char *msg4 = "invalid script"; + #endif const char *msg5 = "invalid dyn lib"; SSqlCmd *pCmd = &pSql->cmd; @@ -454,7 +472,7 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { } createInfo->name.z[createInfo->name.n] = 0; - // funcname's naming rule is same to column + // funcname's naming rule is same to column if (validateColumnName(createInfo->name.z) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -478,9 +496,12 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { } //validate *.lua or .so int32_t pathLen = (int32_t)strlen(createInfo->path.z); +#ifdef LUA_EMBEDDED if ((pathLen > 4) && (0 == strncmp(createInfo->path.z + pathLen - 4, ".lua", 4)) && !isValidScript(buf, len)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); - } else if (pathLen > 3 && (0 == strncmp(createInfo->path.z + pathLen - 3, ".so", 3))) { + } else +#endif + if (pathLen > 3 && (0 == strncmp(createInfo->path.z + pathLen - 3, ".so", 3))) { void *handle = taosLoadDll(createInfo->path.z); taosCloseDll(handle); if (handle == NULL) { @@ -1483,7 +1504,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { const char* msg3 = "duplicated column names"; const char* msg4 = "invalid data type"; const char* msg5 = "invalid binary/nchar column length"; - const char* msg6 = "invalid column name"; + const char* msg6 = "invalid column name or length"; const char* msg7 = "too many columns"; // number of fields no less than 2 @@ -1506,7 +1527,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { int32_t nLen = 0; for (int32_t i = 0; i < numOfCols; ++i) { pField = taosArrayGet(pFieldList, i); - if (!isValidDataType(pField->type)) { + if (!isValidDataType(pField->type) || pField->type == TSDB_DATA_TYPE_JSON) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } @@ -1545,7 +1566,6 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { return true; } - static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); @@ -1554,8 +1574,9 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC const char* msg3 = "duplicated column names"; //const char* msg4 = "timestamp not allowed in tags"; const char* msg5 = "invalid data type in tags"; - const char* msg6 = "invalid tag name"; + const char* msg6 = "invalid tag name or length"; const char* msg7 = "invalid binary/nchar tag length"; + const char* msg8 = "only support one tag if include json type"; // number of fields at least 1 size_t numOfTags = taosArrayGetSize(pTagsList); @@ -1571,6 +1592,11 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC return false; } + if (p->type == TSDB_DATA_TYPE_JSON && numOfTags != 1) { + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + return false; + } + if ((p->type == TSDB_DATA_TYPE_BINARY && p->bytes <= 0) || (p->type == TSDB_DATA_TYPE_NCHAR && p->bytes <= 0)) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); @@ -1589,18 +1615,21 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC } int32_t nLen = 0; + bool isJsonTag = false; for (int32_t i = 0; i < numOfTags; ++i) { TAOS_FIELD* p = taosArrayGet(pTagsList, i); if (p->bytes == 0) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } - + if(p->type == TSDB_DATA_TYPE_JSON){ + isJsonTag = true; + } nLen += p->bytes; } // max tag row length must be less than TSDB_MAX_TAGS_LEN - if (nLen > TSDB_MAX_TAGS_LEN) { + if (!isJsonTag && nLen > TSDB_MAX_TAGS_LEN) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return false; } @@ -1623,10 +1652,11 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC */ int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { const char* msg3 = "tag length too long"; - const char* msg4 = "invalid tag name"; + const char* msg4 = "invalid tag name or length"; const char* msg5 = "invalid binary/nchar tag length"; const char* msg6 = "invalid data type in tags"; const char* msg7 = "too many columns"; + const char* msg8 = "only support one json tag"; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -1652,6 +1682,9 @@ int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { // invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); // return false; //} + if (pTagField->type == TSDB_DATA_TYPE_JSON) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_UBIGINT)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); @@ -1660,6 +1693,10 @@ int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); int32_t nLen = 0; + if (numOfTags == 1 && pTagSchema[0].type == TSDB_DATA_TYPE_JSON){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + for (int32_t i = 0; i < numOfTags; ++i) { nLen += pTagSchema[i].bytes; } @@ -1696,7 +1733,7 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { const char* msg1 = "too many columns"; const char* msg3 = "column length too long"; const char* msg4 = "invalid data type"; - const char* msg5 = "invalid column name"; + const char* msg5 = "invalid column name or length"; const char* msg6 = "invalid column length"; // assert(pCmd->numOfClause == 1); @@ -1774,123 +1811,149 @@ void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t tableUid) { tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s); } -static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem) { - const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables"; +static int32_t handleScalarTypeExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem, + SColumnList* columnList, bool finalResult) { const char* msg2 = "invalid arithmetic expression in select clause"; const char* msg3 = "tag columns can not be used in arithmetic expression"; const char* msg4 = "columns from different table mixed up in arithmetic expression"; - SColumnList columnList = {0}; - int32_t arithmeticType = NON_ARITHMEIC_EXPR; + int32_t tableIndex = columnList->ids[0].tableIndex; + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; - if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + // all columns in scalar expression must belong to the same table + for (int32_t f = 1; f < columnList->num; ++f) { + if (columnList->ids[f].tableIndex != tableIndex) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } } - int32_t tableIndex = columnList.ids[0].tableIndex; - if (arithmeticType == NORMAL_ARITHMETIC) { - pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + // expr string is set as the parameter of function + SColumnIndex index = {.tableIndex = tableIndex}; - // all columns in arithmetic expression must belong to the same table - for (int32_t f = 1; f < columnList.num; ++f) { - if (columnList.ids[f].tableIndex != tableIndex) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); - } - } + tExprNode* pNode = NULL; + SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - // expr string is set as the parameter of function - SColumnIndex index = {.tableIndex = tableIndex}; + int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo, colList, NULL); + if (ret != TSDB_CODE_SUCCESS) { + taosArrayDestroy(colList); + tExprTreeDestroy(pNode, NULL); + if (tscGetErrorMsgLength(pCmd) > 0) { + return ret; + } + + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), - getNewResColId(pCmd), sizeof(double), false); + // check for if there is a tag in the scalar expression + size_t numOfNode = taosArrayGetSize(colList); + for(int32_t k = 0; k < numOfNode; ++k) { + SColIndex* pIndex = taosArrayGet(colList, k); + if (TSDB_COL_IS_TAG(pIndex->flag)) { + tExprTreeDestroy(pNode, NULL); + taosArrayDestroy(colList); + + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } - char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->exprToken.z; - size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->exprToken.n + 1); - tstrncpy(pExpr->base.aliasName, name, len); + ret = exprTreeValidateTree(tscGetErrorMsgPayload(pCmd), pNode); + if (ret != TSDB_CODE_SUCCESS) { + taosArrayDestroy(colList); + tExprTreeDestroy(pNode, NULL); + if (tscGetErrorMsgLength(pCmd) > 0) { + return ret; + } + + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - tExprNode* pNode = NULL; - SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo, colList, NULL); - if (ret != TSDB_CODE_SUCCESS) { - taosArrayDestroy(colList); - tExprTreeDestroy(pNode, NULL); - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } + SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_SCALAR_EXPR, &index, pNode->resultType, pNode->resultBytes, + getNewResColId(pCmd), 0, false); - // check for if there is a tag in the arithmetic express - size_t numOfNode = taosArrayGetSize(colList); - for(int32_t k = 0; k < numOfNode; ++k) { - SColIndex* pIndex = taosArrayGet(colList, k); - if (TSDB_COL_IS_TAG(pIndex->flag)) { - tExprTreeDestroy(pNode, NULL); - taosArrayDestroy(colList); + char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->exprToken.z; + size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->exprToken.n + 1); + tstrncpy(pExpr->base.aliasName, name, len); - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); - } - } + SBufferWriter bw = tbufInitWriter(NULL, false); - SBufferWriter bw = tbufInitWriter(NULL, false); + TRY(0) { + exprTreeToBinary(&bw, pNode); + } CATCH(code) { + tbufCloseWriter(&bw); + UNUSED(code); + // TODO: other error handling + } END_TRY - TRY(0) { - exprTreeToBinary(&bw, pNode); - } CATCH(code) { - tbufCloseWriter(&bw); - UNUSED(code); - // TODO: other error handling - } END_TRY + len = tbufTell(&bw); + char* c = tbufGetData(&bw, false); - len = tbufTell(&bw); - char* c = tbufGetData(&bw, false); + // set the serialized binary string as the parameter of arithmetic expression + tscExprAddParams(&pExpr->base, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); + if (finalResult) { + insertResultField(pQueryInfo, exprIndex, columnList, pExpr->base.resBytes, (int8_t)pExpr->base.resType, pExpr->base.aliasName, + pExpr); + } - // set the serialized binary string as the parameter of arithmetic expression - tscExprAddParams(&pExpr->base, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); - insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->base.aliasName, pExpr); + // add ts column + tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); - // add ts column - tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); + tbufCloseWriter(&bw); + taosArrayDestroy(colList); + tExprTreeDestroy(pNode, NULL); - tbufCloseWriter(&bw); - taosArrayDestroy(colList); - tExprTreeDestroy(pNode, NULL); - } else { - columnList.num = 0; - columnList.ids[0] = (SColumnIndex) {0, 0}; + return TSDB_CODE_SUCCESS; +} - char rawName[TSDB_COL_NAME_LEN] = {0}; - char aliasName[TSDB_COL_NAME_LEN] = {0}; - getColumnName(pItem, aliasName, rawName, TSDB_COL_NAME_LEN); +static int32_t handleAggTypeExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem, + SColumnList* columnList, bool finalResult) { + const char* msg2 = "invalid arithmetic expression in select clause"; - insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, aliasName, NULL); + columnList->num = 0; + columnList->ids[0] = (SColumnIndex) {0, 0}; - int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); - assert(pInfo->pExpr == NULL); + char rawName[TSDB_COL_NAME_LEN] = {0}; + char aliasName[TSDB_COL_NAME_LEN] = {0}; + getColumnName(pItem, aliasName, rawName, TSDB_COL_NAME_LEN); - SExprInfo* pExprInfo = calloc(1, sizeof(SExprInfo)); + tExprNode *pExpr = NULL; + uint64_t uid = 0; + int32_t ret = exprTreeFromSqlExpr(pCmd, &pExpr, pItem->pNode, pQueryInfo, NULL, &uid); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(pExpr, NULL); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); + } - // arithmetic expression always return result in the format of double float - pExprInfo->base.resBytes = sizeof(double); - pExprInfo->base.interBytes = 0; - pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE; + ret = exprTreeValidateTree(tscGetErrorMsgPayload(pCmd), pExpr); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(pExpr, NULL); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + // the expr associated with the result field will become exprList1 in SQueryInfo, then pExpr2 in SQueryAttr + insertResultField(pQueryInfo, exprIndex, columnList, pExpr->resultBytes, (int8_t)pExpr->resultType, aliasName, NULL); - pExprInfo->base.functionId = TSDB_FUNC_ARITHM; - pExprInfo->base.numOfParams = 1; - pExprInfo->base.resColId = getNewResColId(pCmd); - strncpy(pExprInfo->base.aliasName, aliasName, tListLen(pExprInfo->base.aliasName)); - strncpy(pExprInfo->base.token, rawName, tListLen(pExprInfo->base.token)); + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); + assert(pInfo->pExpr == NULL); + SExprInfo* pExprInfo = calloc(1, sizeof(SExprInfo)); + // arithmetic expression always return result in the format of double float + pExprInfo->base.resBytes = pExpr->resultBytes; + pExprInfo->base.interBytes = 0; + pExprInfo->base.resType = pExpr->resultType; - int32_t ret = exprTreeFromSqlExpr(pCmd, &pExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &(pExprInfo->base.uid)); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(pExprInfo->pExpr, NULL); - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); - } + pExprInfo->base.functionId = TSDB_FUNC_SCALAR_EXPR; + pExprInfo->base.numOfParams = 1; + pExprInfo->base.resColId = getNewResColId(pCmd); + strncpy(pExprInfo->base.aliasName, aliasName, tListLen(pExprInfo->base.aliasName)); + strncpy(pExprInfo->base.token, rawName, tListLen(pExprInfo->base.token)); - pInfo->pExpr = pExprInfo; + pExprInfo->pExpr = pExpr; + pExprInfo->base.uid = uid; + pInfo->pExpr = pExprInfo; - SBufferWriter bw = tbufInitWriter(NULL, false); + SBufferWriter bw = tbufInitWriter(NULL, false); - TRY(0) { + TRY(0) { exprTreeToBinary(&bw, pInfo->pExpr->pExpr); } CATCH(code) { tbufCloseWriter(&bw); @@ -1898,33 +1961,70 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32 // TODO: other error handling } END_TRY - SSqlExpr* pSqlExpr = &pInfo->pExpr->base; - pSqlExpr->param[0].nLen = (int16_t) tbufTell(&bw); - pSqlExpr->param[0].pz = tbufGetData(&bw, true); - pSqlExpr->param[0].nType = TSDB_DATA_TYPE_BINARY; + SSqlExpr* pSqlExpr = &pInfo->pExpr->base; + pSqlExpr->param[0].nLen = (int16_t) tbufTell(&bw); + pSqlExpr->param[0].pz = tbufGetData(&bw, true); + pSqlExpr->param[0].nType = TSDB_DATA_TYPE_BINARY; // tbufCloseWriter(&bw); // TODO there is a memory leak + return TSDB_CODE_SUCCESS; +} + +static int32_t handleSQLExprItem(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem) { + SColumnList columnList = {0}; + int32_t sqlExprType = SQLEXPR_TYPE_UNASSIGNED; + + uint64_t uid; + int32_t height = 0; + int32_t code = validateSQLExprItem(pCmd, pItem->pNode, pQueryInfo, &columnList, &sqlExprType, &uid, &height); + + if (code != TSDB_CODE_SUCCESS) { + return code; } - return TSDB_CODE_SUCCESS; + if (height > 16) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "the max expression nested levels must be equal or less than 16"); + } + + if (sqlExprType == SQLEXPR_TYPE_SCALAR) { + code = handleScalarTypeExpr(pCmd, pQueryInfo, exprIndex, pItem, &columnList, true); + } else { + code = handleAggTypeExpr(pCmd, pQueryInfo, exprIndex, pItem, &columnList, true); + } + + return code; } static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSqlExprItem* pItem, int32_t colId) { SExprInfo* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex, colId); + if( pItem->pNode->tokenId == TK_ARROW){ + tSqlExpr* right = pItem->pNode->pRight; + assert(right != NULL && right->type == SQL_NODE_VALUE); + tVariantAssign(&(pExpr->base.param[pExpr->base.numOfParams]), &right->value); + pExpr->base.numOfParams++; + } STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); - char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName; - tstrncpy(pExpr->base.aliasName, colName, sizeof(pExpr->base.aliasName)); - + if (pSchema->type == TSDB_DATA_TYPE_JSON && pItem->pNode->tokenId == TK_ARROW) { + if (pItem->aliasName){ + tstrncpy(pExpr->base.aliasName, pItem->aliasName, sizeof(pExpr->base.aliasName)); + }else{ + tstrncpy(pExpr->base.aliasName, pItem->pNode->exprToken.z, + pItem->pNode->exprToken.n + 1 < sizeof(pExpr->base.aliasName) ? pItem->pNode->exprToken.n + 1 : sizeof(pExpr->base.aliasName)); + } + }else{ + char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName; + tstrncpy(pExpr->base.aliasName, colName, sizeof(pExpr->base.aliasName)); + } SColumnList ids = {0}; ids.num = 1; ids.ids[0] = *pIndex; - if (pIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX || pIndex->columnIndex == TSDB_UD_COLUMN_INDEX || + if (pIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX || pIndex->columnIndex <= TSDB_UD_COLUMN_INDEX || pIndex->columnIndex >= tscGetNumOfColumns(pTableMeta)) { ids.num = 0; } @@ -1949,7 +2049,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { // add the timestamp column into the output columns SColumnIndex index = {0}; // primary timestamp column info - int32_t numOfCols = (int32_t)tscNumOfExprs(pQueryInfo); + int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); @@ -2040,6 +2140,16 @@ static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) { return NULL; } +static bool willProcessFunctionWithExpr(const tSqlExprItem* pItem) { + assert(pItem->pNode->type == SQL_NODE_SQLFUNCTION); + int32_t functionId = pItem->pNode->functionId; + + if (TSDB_FUNC_IS_SCALAR(functionId)) { + return true; + } + return false; +} + int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool joinQuery, bool timeWindowQuery, bool outerQuery) { assert(pSelNodeList != NULL && pCmd != NULL); @@ -2054,7 +2164,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg8 = "not support distinct in nest query"; const char* msg9 = "_block_dist not support subquery, only support stable/table"; const char* msg10 = "not support group by in block func"; - const char* msg11 = "invalid alias name"; + const char* msg11 = "invalid alias name or length"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2081,8 +2191,17 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); } + if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); + } + int32_t type = pItem->pNode->type; - if (type == SQL_NODE_SQLFUNCTION) { + if (type == SQL_NODE_EXPR && pItem->pNode->tokenId != TK_ARROW) { + int32_t code = handleSQLExprItem(pCmd, pQueryInfo, i, pItem); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } else if (type == SQL_NODE_SQLFUNCTION) { hasAgg = true; if (hasDistinct) break; @@ -2106,22 +2225,25 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS pItem->pNode->functionId = pUdfInfo->functionId; } - // sql function in selection clause, append sql function info in pSqlCmd structure sequentially - if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true, pUdfInfo) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_OPERATION; + if (willProcessFunctionWithExpr(pItem)) { + int32_t code = handleSQLExprItem(pCmd, pQueryInfo, i, pItem); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } else { + // sql function in selection clause, append sql function info in pSqlCmd structure sequentially + if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true, pUdfInfo) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } } - } else if (type == SQL_NODE_TABLE_COLUMN || type == SQL_NODE_VALUE) { + } else if (type == SQL_NODE_TABLE_COLUMN || type == SQL_NODE_VALUE || + (type == SQL_NODE_EXPR && pItem->pNode->tokenId == TK_ARROW)) { // use the dynamic array list to decide if the function is valid or not // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2 if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem, outerQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - } else if (type == SQL_NODE_EXPR) { - int32_t code = handleArithmeticExpr(pCmd, pQueryInfo, i, pItem); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } else { + } else { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -2265,6 +2387,9 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t const char* msg1 = "tag for normal table query is not allowed"; const char* msg2 = "invalid column name"; const char* msg3 = "tbname not allowed in outer query"; + const char* msg4 = "-> operate can only used in json type"; + const char* msg5 = "the right value of -> operation must be string"; + const char* msg6 = "select name is too long than 64, please use alias name"; int32_t startPos = (int32_t)tscNumOfExprs(pQueryInfo); int32_t tokenId = pItem->pNode->tokenId; @@ -2304,13 +2429,29 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC, getNewResColId(pCmd)); - // NOTE: the first parameter is reserved for the tag column id during join query process. - pExpr->base.numOfParams = 2; - tVariantAssign(&pExpr->base.param[1], &pItem->pNode->value); - } else if (tokenId == TK_ID) { + tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pItem->pNode->value); + }else if (tokenId == TK_ID || tokenId == TK_ARROW) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pItem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + SStrToken* pToken = NULL; + if (tokenId == TK_ARROW){ + tSqlExpr* left = pItem->pNode->pLeft; + assert(left != NULL && left->type == SQL_NODE_TABLE_COLUMN); + if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0){ // if select from subquery, pToken should be jtag->'location'. like (select jtag->'location' from (select jtag->'location' from jsons1);) + pToken = &pItem->pNode->exprToken; + }else{ + pToken = &left->columnName; + } + + tSqlExpr* right = pItem->pNode->pRight; + if(right == NULL || right->type != SQL_NODE_VALUE || right->tokenId != TK_STRING){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + }else { + pToken = &pItem->pNode->columnName; + } + + if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2357,6 +2498,14 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + if (tokenId == TK_ARROW && pSchema->type != TSDB_DATA_TYPE_JSON) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (pSchema->type == TSDB_DATA_TYPE_JSON && tokenId == TK_ARROW && pItem->pNode->exprToken.n >= TSDB_COL_NAME_LEN){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + addProjectQueryCol(pQueryInfo, startPos, &index, pItem, getNewResColId(pCmd)); pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } @@ -2438,11 +2587,20 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT if (tsKeepOriginalColumnName) { // keep the original column name tstrncpy(name, uname, TSDB_COL_NAME_LEN); } else { - int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1; - char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0}; - snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname); + if (!TSDB_FUNC_IS_SCALAR(functionId)) { + int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0}; + snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname); + + tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + } else { + int32_t index = TSDB_FUNC_SCALAR_INDEX(functionId); + int32_t size = TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1] = {0}; + snprintf(tmp, size, "%s(%s)", aScalarFunctions[index].name, uname); - tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + } } } else { // use the user-input result column name int32_t len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN); @@ -2500,6 +2658,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg13 = "parameter list required"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg15 = "parameter is out of range [1, 1000]"; + const char* msg16 = "elapsed duration should be greater than or equal to database precision"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2590,23 +2749,22 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_DIFF: case TSDB_FUNC_DERIVATIVE: case TSDB_FUNC_CSUM: - case TSDB_FUNC_CEIL: - case TSDB_FUNC_FLOOR: - case TSDB_FUNC_ROUND: case TSDB_FUNC_STDDEV: - case TSDB_FUNC_LEASTSQR: { + case TSDB_FUNC_LEASTSQR: + case TSDB_FUNC_ELAPSED: { // 1. valid the number of parameters int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList); // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || - (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) || - ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) { + (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && numOfParams != 1) || + ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) || + (functionId == TSDB_FUNC_ELAPSED && numOfParams > 2)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); - if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) { + if ((pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) || 0 == pParamElem->pNode->columnName.n) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2615,6 +2773,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } + // elapsed only can be applied to primary key + if (functionId == TSDB_FUNC_ELAPSED && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); + } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -2626,7 +2789,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // 2. check if sql function can be applied on this column data type SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - if (!IS_NUMERIC_TYPE(pSchema->type)) { + if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId != TSDB_FUNC_ELAPSED)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); @@ -2671,11 +2834,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else if (functionId == TSDB_FUNC_IRATE) { int64_t prec = info.precision; tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); - } else if (functionId == TSDB_FUNC_DERIVATIVE) { + } else if (functionId == TSDB_FUNC_DERIVATIVE || (functionId == TSDB_FUNC_ELAPSED && 2 == numOfParams)) { char val[8] = {0}; int64_t tickPerSec = 0; - if (tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { + if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -2685,23 +2848,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); } - if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { + if ((tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) && (functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); - } + } else if (tickPerSec <= 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16); + } tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); - memset(val, 0, tListLen(val)); + if (functionId == TSDB_FUNC_DERIVATIVE) { + memset(val, 0, tListLen(val)); - if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } + if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } - int64_t v = *(int64_t*) val; - if (v != 0 && v != 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); - } + int64_t v = *(int64_t*) val; + if (v != 0 && v != 1) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); + } - tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); + tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); + } } SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); @@ -3120,8 +3287,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_SUCCESS; } - default: { + assert(!TSDB_FUNC_IS_SCALAR(functionId)); pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); if (pUdfInfo == NULL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); @@ -3251,13 +3418,15 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum const char* msg0 = "ambiguous column name"; const char* msg1 = "invalid column name"; + if (pToken->n == 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if (isTablenameToken(pToken)) { pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX; } else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n && strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) { pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest - } else if (pToken->n == 0) { - pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest } else { // not specify the table name, try to locate the table index by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { @@ -3430,7 +3599,9 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) { return TSDB_CODE_TSC_INVALID_OPERATION; } - strncpy(pCmd->payload, idStr->z, idStr->n); + SKillQueryMsg* msg = (SKillQueryMsg*)pCmd->payload; + + strncpy(msg->queryId, idStr->z, idStr->n); const char delim = ':'; char* connIdStr = strtok(idStr->z, &delim); @@ -3438,7 +3609,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) { int32_t connId = (int32_t)strtol(connIdStr, NULL, 10); if (connId <= 0) { - memset(pCmd->payload, 0, strlen(pCmd->payload)); + memset(msg, 0, sizeof(*msg)); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -3448,7 +3619,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) { int32_t queryId = (int32_t)strtol(queryIdStr, NULL, 10); if (queryId <= 0) { - memset(pCmd->payload, 0, strlen(pCmd->payload)); + memset(msg, 0, sizeof(*msg)); if (killType == TSDB_SQL_KILL_QUERY) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } else { @@ -3481,7 +3652,11 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { size_t size = tscNumOfExprs(pQueryInfo); for (int32_t k = 0; k < size; ++k) { SExprInfo* pExpr = tscExprGet(pQueryInfo, k); - int16_t functionId = aAggs[pExpr->base.functionId].stableFuncId; + + int16_t functionId = pExpr->base.functionId; + if (!TSDB_FUNC_IS_SCALAR(functionId)) { + functionId = aAggs[pExpr->base.functionId].stableFuncId; + } int32_t colIndex = pExpr->base.colInfo.colIndex; SSchema* pSrcSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, colIndex); @@ -3489,7 +3664,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) || - (functionId == TSDB_FUNC_SAMPLE)) { + (functionId == TSDB_FUNC_SAMPLE) || (functionId == TSDB_FUNC_ELAPSED)) { if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes, &interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -3544,8 +3719,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { } bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { - const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP are not allowed to apply to super table directly"; - const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP only support group by tbname for super table query"; + const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed are not allowed to apply to super table directly"; + const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed only support group by tbname for super table query"; const char* msg3 = "functions not support for super table query"; // filter sql function not supported by metric query yet. @@ -3556,6 +3731,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) continue; } + if (TSDB_FUNC_IS_SCALAR(functionId)) { + continue; + } + if ((aAggs[functionId].status & TSDB_FUNCSTATE_STABLE) == 0) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return true; @@ -3563,7 +3742,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) } if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivLikeQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo) || - tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo)) { + tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED)) { if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return true; @@ -3631,7 +3810,9 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool int32_t scalarUdf = 0; int32_t prjNum = 0; int32_t aggNum = 0; - int32_t scalNum = 0; + int32_t scalarFuncNum = 0; + int32_t funcCompatFactor = INT_MAX; + int32_t countTbname = 0; size_t numOfExpr = tscNumOfExprs(pQueryInfo); assert(numOfExpr > 0); @@ -3663,18 +3844,28 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool ++prjNum; } - if (functionId == TSDB_FUNC_CEIL || functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) { - ++scalNum; + if (TSDB_FUNC_IS_SCALAR(functionId)) { + ++scalarFuncNum; + } + + if (functionId == TSDB_FUNC_SCALAR_EXPR) { + ++scalarFuncNum; } if (functionId == TSDB_FUNC_PRJ && (pExpr1->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->base.colInfo.flag))) { continue; } + if (TSDB_FUNC_IS_SCALAR(functionId)) { + funcCompatFactor = 1; + } else { + funcCompatFactor = functionCompatList[functionId]; + } + if (factor == INT32_MAX) { - factor = functionCompatList[functionId]; + factor = funcCompatFactor; } else { - if (functionCompatList[functionId] != factor) { + if (funcCompatFactor != factor) { return false; } else { if (factor == -1) { // two functions with the same -1 flag @@ -3686,21 +3877,29 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || twQuery || !groupbyTagsOrNull(pQueryInfo))) { return false; } + + if (functionId == TSDB_FUNC_COUNT && (pExpr1->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TAG(pExpr1->base.colInfo.flag))) { + ++countTbname; + } } - aggNum = (int32_t)size - prjNum - scalNum - aggUdf - scalarUdf; + aggNum = (int32_t)size - prjNum - scalarFuncNum - aggUdf - scalarUdf - countTbname; assert(aggNum >= 0); - if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalNum > 0 || scalarUdf > 0)) { + if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalarFuncNum > 0 || scalarUdf > 0)) { return false; } - if (scalarUdf > 0 && (aggNum > 0 || scalNum > 0)) { + if (scalarUdf > 0 && (aggNum > 0 || scalarFuncNum > 0)) { return false; } - if (aggNum > 0 && scalNum > 0) { + if (aggNum > 0 && scalarFuncNum > 0) { + return false; + } + + if (countTbname && (prjNum > 0 || aggNum > 0 || scalarUdf > 0 || aggUdf > 0)) { return false; } @@ -3716,6 +3915,9 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd const char* msg6 = "tags not allowed for table query"; const char* msg7 = "not support group by expression"; const char* msg8 = "normal column can only locate at the end of group by clause"; + const char* msg9 = "json tag must be use ->'key'"; + const char* msg10 = "non json column can not use ->'key'"; + const char* msg11 = "group by json->'key' is too long"; // todo : handle two tables situation STableMetaInfo* pTableMetaInfo = NULL; @@ -3751,13 +3953,18 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd size_t num = taosArrayGetSize(pList); for (int32_t i = 0; i < num; ++i) { - tVariantListItem * pItem = taosArrayGet(pList, i); - tVariant* pVar = &pItem->pVar; - - SStrToken token = {pVar->nLen, pVar->nType, pVar->pz}; - - if (pVar->nType != TSDB_DATA_TYPE_BINARY){ - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + CommonItem * pItem = taosArrayGet(pList, i); + SStrToken token = {0}; + if(pItem->isJsonExp){ + assert(pItem->jsonExp->tokenId == TK_ARROW); + token = pItem->jsonExp->pLeft->columnName; + }else { + token.n = pItem->pVar.nLen; + token.z = pItem->pVar.pz; + token.type = pItem->pVar.nType; + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } } SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -3780,6 +3987,13 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); } + if (pSchema->type == TSDB_DATA_TYPE_JSON && !pItem->isJsonExp){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); + } + if (pSchema->type != TSDB_DATA_TYPE_JSON && pItem->isJsonExp){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); + } + int32_t numOfCols = tscGetNumOfColumns(pTableMeta); bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols); @@ -3794,9 +4008,17 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd } SColIndex colIndex = { .colIndex = relIndex, .flag = TSDB_COL_TAG, .colId = pSchema->colId, }; - strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name)); + if(pItem->isJsonExp) { + if(pItem->jsonExp->exprToken.n >= tListLen(colIndex.name)){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); + } + tstrncpy(colIndex.name, pItem->jsonExp->exprToken.z, pItem->jsonExp->exprToken.n + 1); + }else{ + tstrncpy(colIndex.name, pSchema->name, tListLen(colIndex.name)); + } + taosArrayPush(pGroupExpr->columnInfo, &colIndex); - + index.columnIndex = relIndex; tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema); } else { @@ -3806,7 +4028,7 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd } tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema); - + SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name)); @@ -3825,7 +4047,7 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd for(int32_t i = 0; i < num; ++i) { SColIndex* pIndex = taosArrayGet(pGroupExpr->columnInfo, i); if (TSDB_COL_IS_NORMAL_COL(pIndex->flag) && i != num - 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); } } @@ -4152,8 +4374,17 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight); } + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + if(pLeft->tokenId == TK_ARROW){ + pLeft = pLeft->pLeft; + } + if(pRight->tokenId == TK_ARROW){ + pRight = pRight->pLeft; + } + SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } @@ -4169,6 +4400,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS (*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid; (*leftNode)->tagColId = pTagSchema1->colId; + if(pExpr->pLeft->tokenId == TK_ARROW) { + tstrncpy((*leftNode)->tagJsonKeyName, pExpr->pLeft->pRight->value.pz, TSDB_MAX_JSON_KEY_LEN + 1); + } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -4187,7 +4421,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS int16_t leftIdx = index.tableIndex; index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pExpr->pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } @@ -4203,6 +4437,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS (*rightNode)->uid = pTableMetaInfo->pTableMeta->id.uid; (*rightNode)->tagColId = pTagSchema2->colId; + if(pExpr->pRight->tokenId == TK_ARROW) { + tstrncpy((*rightNode)->tagJsonKeyName, pExpr->pRight->pRight->value.pz, TSDB_MAX_JSON_KEY_LEN + 1); + } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -4250,47 +4487,80 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr); } -static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, - int32_t* type, uint64_t* uid) { - if (pExpr->type == SQL_NODE_TABLE_COLUMN) { - if (*type == NON_ARITHMEIC_EXPR) { - *type = NORMAL_ARITHMETIC; - } else if (*type == AGG_ARIGHTMEIC) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } +static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, + SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type, uint64_t *uid, int32_t* height) { + int32_t code = TSDB_CODE_SUCCESS; + const char* msg1 = "invalid function parameters"; + const char* msg2 = "not supported functions in arithmetic expression"; + + int32_t functionId = isValidFunction(pExpr->Expr.operand.z, pExpr->Expr.operand.n); + + pExpr->functionId = functionId; + if (pExpr->Expr.paramList != NULL) { + size_t numChildren = taosArrayGetSize(pExpr->Expr.paramList); + int32_t* childrenTypes = calloc(numChildren, sizeof(int32_t)); + int32_t* childrenHeight = calloc(numChildren, sizeof(int32_t)); + for (int32_t i = 0; i < numChildren; ++i) { + tSqlExprItem* pParamElem = taosArrayGet(pExpr->Expr.paramList, i); + if (TSDB_FUNC_IS_SCALAR(functionId)) { + code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i); + if (code != TSDB_CODE_SUCCESS) { + free(childrenTypes); + return code; + } + } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } + if (!TSDB_FUNC_IS_SCALAR(functionId) && + (pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } - // if column is timestamp, bool, binary, nchar, not support arithmetic, so return invalid sql - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta; - SSchema* pSchema = tscGetTableSchema(pTableMeta) + index.columnIndex; - - if ((pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) || (pSchema->type == TSDB_DATA_TYPE_BOOL) || - (pSchema->type == TSDB_DATA_TYPE_BINARY) || (pSchema->type == TSDB_DATA_TYPE_NCHAR)) { - return TSDB_CODE_TSC_INVALID_OPERATION; } + { + if (TSDB_FUNC_IS_SCALAR(functionId)) { + bool anyChildScalar = false; + bool anyChildAgg = false; + int32_t maxChildrenHeight = 0; + for (int i = 0; i < numChildren; ++i) { + assert (childrenTypes[i] != SQLEXPR_TYPE_UNASSIGNED); + anyChildScalar = anyChildScalar || (childrenTypes[i] == SQLEXPR_TYPE_SCALAR); + anyChildAgg = anyChildAgg || (childrenTypes[i] == SQLEXPR_TYPE_AGG); + if (childrenHeight[i] > maxChildrenHeight) { + maxChildrenHeight = childrenHeight[i]; + } + } - pList->ids[pList->num++] = index; - } else if ((pExpr->tokenId == TK_FLOAT && (isnan(pExpr->value.dKey) || isinf(pExpr->value.dKey))) || - pExpr->tokenId == TK_NULL) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } else if (pExpr->type == SQL_NODE_SQLFUNCTION) { - if (*type == NON_ARITHMEIC_EXPR) { - *type = AGG_ARIGHTMEIC; - } else if (*type == NORMAL_ARITHMETIC) { - return TSDB_CODE_TSC_INVALID_OPERATION; + *height = maxChildrenHeight + 1; + + if (anyChildAgg && anyChildScalar) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + if (anyChildAgg) { + *type = SQLEXPR_TYPE_AGG; + } else { + *type = SQLEXPR_TYPE_SCALAR; + } + } else { + *type = SQLEXPR_TYPE_AGG; + } + } + free(childrenTypes); + //end if param list is not null + } else { + if (TSDB_FUNC_IS_SCALAR(functionId)) { + *type = SQLEXPR_TYPE_SCALAR; + } else { + *type = SQLEXPR_TYPE_AGG; } + } // else param list is null + if (!TSDB_FUNC_IS_SCALAR(functionId)) { int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo); - + tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL}; - + // sql function list in selection clause. // Append the sqlExpr into exprList of pQueryInfo structure sequentially - pExpr->functionId = isValidFunction(pExpr->Expr.operand.z, pExpr->Expr.operand.n); if (pExpr->functionId < 0) { SUdfInfo* pUdfInfo = NULL; pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pExpr->Expr.operand.z, pExpr->Expr.operand.n); @@ -4299,23 +4569,25 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer } } + // add the aggregate function to SQueryInfo exprList, which is pExpr1/global aggregate pExpr3 in SQueryAttr if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false, NULL) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } // It is invalid in case of more than one sqlExpr, such as first(ts, k) - last(ts, k) - int32_t inc = (int32_t) tscNumOfExprs(pQueryInfo) - outputIndex; + int32_t inc = (int32_t)tscNumOfExprs(pQueryInfo) - outputIndex; if (inc > 1) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } // Not supported data type in arithmetic expression uint64_t id = -1; - for(int32_t i = 0; i < inc; ++i) { + for (int32_t i = 0; i < inc; ++i) { SExprInfo* p1 = tscExprGet(pQueryInfo, i + outputIndex); int16_t t = p1->base.resType; - if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) { + if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || + t == TSDB_DATA_TYPE_TIMESTAMP) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -4331,49 +4603,101 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer *uid = id; } + return TSDB_CODE_SUCCESS; +} + +static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, + int32_t* type, uint64_t* uid, int32_t* height) { + uint64_t uidLeft = 0; + uint64_t uidRight = 0; + int32_t leftType = SQLEXPR_TYPE_UNASSIGNED; + int32_t rightType = SQLEXPR_TYPE_UNASSIGNED; + const char* msg1 = "arithmetic expression composed with columns from different tables"; + const char* msg2 = "arithmetic expression composed with functions/columns of different types"; + int32_t leftHeight = 0; + int32_t ret = validateSQLExprItem(pCmd, pExpr->pLeft, pQueryInfo, pList, &leftType, &uidLeft, &leftHeight); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + int32_t rightHeight = 0; + ret = validateSQLExprItem(pCmd, pExpr->pRight, pQueryInfo, pList, &rightType, &uidRight, &rightHeight); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (uidLeft != uidRight && uidLeft != 0 && uidRight != 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + *uid = uidLeft; + *height = (leftHeight > rightHeight) ? leftHeight + 1 : rightHeight+1; + { + assert(leftType != SQLEXPR_TYPE_UNASSIGNED && rightType != SQLEXPR_TYPE_UNASSIGNED); + + // return invalid operation when one child aggregate and the other child scalar or column + if ((leftType == SQLEXPR_TYPE_AGG && rightType == SQLEXPR_TYPE_SCALAR) || (rightType == SQLEXPR_TYPE_AGG && leftType == SQLEXPR_TYPE_SCALAR)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + if (leftType == SQLEXPR_TYPE_AGG || rightType == SQLEXPR_TYPE_AGG) { + *type = SQLEXPR_TYPE_AGG; + } else if (leftType == SQLEXPR_TYPE_VALUE && rightType == SQLEXPR_TYPE_VALUE) { + *type = SQLEXPR_TYPE_VALUE; + } else if (leftType == SQLEXPR_TYPE_SCALAR || rightType == SQLEXPR_TYPE_SCALAR){ + *type = SQLEXPR_TYPE_SCALAR; + } + } return TSDB_CODE_SUCCESS; } -static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) { +static int32_t validateSQLExprItem(SSqlCmd* pCmd, tSqlExpr* pExpr, + SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type, uint64_t* uid, int32_t* height) { + const char* msg1 = "invalid column name in select clause"; + const char* msg2 = "invalid data type in select clause"; + const char* msg3 = "invalid select clause"; + if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - - tSqlExpr* pLeft = pExpr->pLeft; - uint64_t uidLeft = 0; - uint64_t uidRight = 0; - - if (pLeft->type == SQL_NODE_EXPR) { - int32_t ret = validateArithmeticSQLExpr(pCmd, pLeft, pQueryInfo, pList, type); + if (pExpr->type == SQL_NODE_EXPR) { + int32_t ret = validateSQLExprItemArithmeticExpr(pCmd, pExpr, pQueryInfo, pList, type, uid, height); if (ret != TSDB_CODE_SUCCESS) { return ret; } - } else { - int32_t ret = validateSQLExpr(pCmd, pLeft, pQueryInfo, pList, type, &uidLeft); + } else if (pExpr->type == SQL_NODE_SQLFUNCTION) { + int32_t ret = validateSQLExprItemSQLFunc(pCmd, pExpr, pQueryInfo, pList, type, uid, height); if (ret != TSDB_CODE_SUCCESS) { return ret; } - } + } else if (pExpr->type == SQL_NODE_TABLE_COLUMN) { + SColumnIndex index = COLUMN_INDEX_INITIALIZER; - tSqlExpr* pRight = pExpr->pRight; - if (pRight->type == SQL_NODE_EXPR) { - int32_t ret = validateArithmeticSQLExpr(pCmd, pRight, pQueryInfo, pList, type); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + + pList->ids[pList->num++] = index; + *type = SQLEXPR_TYPE_SCALAR; + } else if (pExpr->type == SQL_NODE_DATA_TYPE) { + if (pExpr->dataType.type < 0 || pExpr->dataType.bytes <= 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + *type = SQLEXPR_TYPE_VALUE; } else { - int32_t ret = validateSQLExpr(pCmd, pRight, pQueryInfo, pList, type, &uidRight); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if ((pExpr->tokenId == TK_FLOAT && (isnan(pExpr->value.dKey) || isinf(pExpr->value.dKey))) || + pExpr->tokenId == TK_NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - // the expression not from the same table, return error - if (uidLeft != uidRight && uidLeft != 0 && uidRight != 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; + if (pExpr->value.nType == (uint32_t)-1) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + if (pExpr->type == SQL_NODE_VALUE) { + *type = SQLEXPR_TYPE_VALUE; } } - return TSDB_CODE_SUCCESS; } @@ -4413,7 +4737,7 @@ static void exchangeExpr(tSqlExpr* pExpr) { tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; - if (pRight->tokenId == TK_ID && (pLeft->tokenId == TK_INTEGER || pLeft->tokenId == TK_FLOAT || + if ((pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW) && (pLeft->tokenId == TK_INTEGER || pLeft->tokenId == TK_FLOAT || pLeft->tokenId == TK_STRING || pLeft->tokenId == TK_BOOL)) { /* * exchange value of the left handside and the value of the right-handside @@ -4450,8 +4774,28 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr const char* msg3 = "join column must have same type"; const char* msg4 = "self join is not allowed"; const char* msg5 = "join table must be the same type(table to table, super table to super table)"; + const char* msg6 = "tag json key must be string"; + const char* msg7 = "tag json key in json must be same"; + const char* msg8 = "tag json key is too long, no more than 256 bytes"; tSqlExpr* pRight = pExpr->pRight; + if(pRight->tokenId == TK_ARROW){ + if(!IS_VAR_DATA_TYPE(pExpr->pLeft->pRight->value.nType) || pExpr->pLeft->pRight->value.nType != pExpr->pRight->pRight->value.nType){ + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + return false; + } + if(pExpr->pLeft->pRight->value.nLen > TSDB_MAX_JSON_KEY_LEN){ + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + return false; + } + if(pExpr->pLeft->pRight->value.nLen != pExpr->pRight->pRight->value.nLen + || strncmp(pExpr->pLeft->pRight->value.pz, pExpr->pRight->pRight->value.pz, pExpr->pRight->pRight->value.nLen) != 0){ + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + return false; + } + + pRight = pExpr->pRight->pLeft; + } if (pRight->tokenId != TK_ID) { return true; @@ -4528,13 +4872,13 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t const char* msg = "only support is [not] null"; tSqlExpr* pRight = pExpr->pRight; - if (pRight->tokenId == TK_NULL && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) { + SSchema* pSchema = tscGetTableSchema(pTableMeta); + if (pRight->tokenId == TK_NULL && pSchema[index].type != TSDB_DATA_TYPE_JSON && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) { return invalidOperationMsg(msgBuf, msg); } if (pRight->tokenId == TK_STRING) { - SSchema* pSchema = tscGetTableSchema(pTableMeta); - if (IS_VAR_DATA_TYPE(pSchema[index].type)) { + if (IS_VAR_DATA_TYPE(pSchema[index].type) || pSchema[index].type == TSDB_DATA_TYPE_JSON) { return TSDB_CODE_SUCCESS; } @@ -4572,7 +4916,7 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t } SSchema* pSchema = tscGetTableSchema(pTableMeta); - if ((!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { + if ((pLeft->tokenId != TK_ARROW) && (!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { return invalidOperationMsg(msgBuf, msg2); } } @@ -4580,6 +4924,59 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t return TSDB_CODE_SUCCESS; } +// check for match expression +static int32_t validateJsonTagExpr(tSqlExpr* pExpr, char* msgBuf) { + const char* msg1 = "not support json tag column filter"; + const char* msg2 = "tag json key is invalidate"; + const char* msg3 = "tag json key must be string"; + const char* msg4 = "in operation not support in tag json"; + + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + + if (pExpr->tokenId == TK_CONTAINS) { + if (pRight != NULL && !IS_VAR_DATA_TYPE(pRight->value.nType)) + return invalidOperationMsg(msgBuf, msg3); + + if (pRight != NULL && (pRight->value.nLen > TSDB_MAX_JSON_KEY_LEN || pRight->value.nLen <= 0)) + return invalidOperationMsg(msgBuf, msg2); + } else if(pExpr->tokenId == TK_IN){ + return invalidOperationMsg(msgBuf, msg4); + } else { + if (pLeft != NULL && pLeft->tokenId == TK_ID && pExpr->tokenId != TK_ISNULL && pExpr->tokenId != TK_NOTNULL) { + return invalidOperationMsg(msgBuf, msg1); + } + + if (pLeft != NULL && pLeft->tokenId == TK_ARROW) { + if (pLeft->pRight && !IS_VAR_DATA_TYPE(pLeft->pRight->value.nType)) + return invalidOperationMsg(msgBuf, msg3); + if (pLeft->pRight && (pLeft->pRight->value.nLen > TSDB_MAX_JSON_KEY_LEN || pLeft->pRight->value.nLen <= 0)) + return invalidOperationMsg(msgBuf, msg2); + } + + if (pRight->value.nType == TSDB_DATA_TYPE_BINARY){ // json value store by nchar, so need to convert from binary to nchar + if(pRight->value.nLen == INT_BYTES && *(uint32_t*)pRight->value.pz == TSDB_DATA_JSON_null){ + return TSDB_CODE_SUCCESS; + } + if(pRight->value.nLen == 0){ + pRight->value.nType = TSDB_DATA_TYPE_NCHAR; + return TSDB_CODE_SUCCESS; + } + char newData[TSDB_MAX_JSON_TAGS_LEN] = {0}; + int len = 0; + if(!taosMbsToUcs4(pRight->value.pz, pRight->value.nLen, newData, TSDB_MAX_JSON_TAGS_LEN, &len)){ + tscError("json where condition mbsToUcs4 error"); + } + pRight->value.pz = realloc(pRight->value.pz, len); + memcpy(pRight->value.pz, newData, len); + pRight->value.nLen = len; + pRight->value.nType = TSDB_DATA_TYPE_NCHAR; + } + } + + return TSDB_CODE_SUCCESS; +} + // check for match expression static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { const char* msg1 = "regular expression string should be less than %d characters"; @@ -4639,19 +5036,35 @@ int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) { static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr, - int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) { + int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, + tSqlExpr** tsExpr, bool joinQuery) { const char* msg1 = "table query cannot use tags filter"; const char* msg2 = "illegal column name"; const char* msg4 = "too many join tables"; const char* msg5 = "not support ordinary column join"; + const char* msg6 = "illegal condition expression"; tSqlExpr* pLeft = (*pExpr)->pLeft; tSqlExpr* pRight = (*pExpr)->pRight; + SStrToken* colName = NULL; + if(pLeft->tokenId == TK_ARROW){ + colName = &(pLeft->pLeft->columnName); + if (pRight->tokenId == TK_NULL && (*pExpr)->tokenId == TK_EQ) { + // transform for json->'key'=null + pRight->tokenId = TK_STRING; + pRight->value.nType = TSDB_DATA_TYPE_BINARY; + pRight->value.nLen = INT_BYTES; + pRight->value.pz = calloc(INT_BYTES, 1); + *(uint32_t*)pRight->value.pz = TSDB_DATA_JSON_null; + } + }else{ + colName = &(pLeft->columnName); + } int32_t ret = TSDB_CODE_SUCCESS; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(colName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -4746,7 +5159,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } else { rexpr = *pExpr; } - + ret = setNormalExprToCond(tsExpr, rexpr, parentOptr); if (type) { *type |= TSQL_EXPR_TS; @@ -4760,7 +5173,15 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query + // check for json tag operation -> and ? + if (pSchema->type == TSDB_DATA_TYPE_JSON){ + code = validateJsonTagExpr(*pExpr, tscGetErrorMsgPayload(pCmd)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + if (pRight != NULL && (pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW)) { // join on tag columns for stable query if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -4770,17 +5191,20 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql *pExpr = NULL; if (type) { *type |= TSQL_EXPR_JOIN; - } + } } else { // do nothing // ret = setExprToCond(pCmd, &pCondExpr->pTagCond, // *pExpr, NULL, parentOptr); tSqlExpr *rexpr = NULL; - if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY && pSchema->type != TSDB_DATA_TYPE_NCHAR && pSchema->type != TSDB_DATA_TYPE_BOOL)) { + if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY + && pSchema->type != TSDB_DATA_TYPE_NCHAR + && pSchema->type != TSDB_DATA_TYPE_BOOL + && pSchema->type != TSDB_DATA_TYPE_JSON)) { handleNeOptr(&rexpr, *pExpr); *pExpr = rexpr; } - + if (type) { *type |= TSQL_EXPR_TAG; } @@ -4789,9 +5213,13 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql if (type) { *type |= TSQL_EXPR_COLUMN; } - - if (pRight->tokenId == TK_ID) { // other column cannot be served as the join column - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); + + if (pRight->tokenId == TK_ID) { + if (joinQuery) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); // other column cannot be served as the join column + } else { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } } tSqlExpr *rexpr = NULL; @@ -4809,7 +5237,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr, - int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, tSqlExpr** tsExpr) { + int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, + tSqlExpr** tsExpr, bool joinQuery) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } @@ -4841,12 +5270,12 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr int32_t rightTbIdx = 0; if (!tSqlExprIsParentOfLeaf(*pExpr)) { - ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, type ? &leftType : NULL, &leftTbIdx, (*pExpr)->tokenId, &columnLeft, &tsLeft); + ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, type ? &leftType : NULL, &leftTbIdx, (*pExpr)->tokenId, &columnLeft, &tsLeft, joinQuery); if (ret != TSDB_CODE_SUCCESS) { goto err_ret; } - ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, type ? &rightType : NULL, &rightTbIdx, (*pExpr)->tokenId, &columnRight, &tsRight); + ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, type ? &rightType : NULL, &rightTbIdx, (*pExpr)->tokenId, &columnRight, &tsRight, joinQuery); if (ret != TSDB_CODE_SUCCESS) { goto err_ret; } @@ -4901,7 +5330,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr goto err_ret; } - ret = handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, tbIdx, parentOptr, columnExpr, tsExpr); + ret = handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, tbIdx, parentOptr, columnExpr, tsExpr, joinQuery); if (ret) { goto err_ret; } @@ -4927,6 +5356,9 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo* tSqlExpr* pLeft = (*pExpr)->pLeft; SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if(pLeft->tokenId == TK_ARROW) { + pLeft = pLeft->pLeft; + } if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return; } @@ -5251,7 +5683,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE SArray* colList = taosArrayInit(10, sizeof(SColIndex)); ret = exprTreeFromSqlExpr(pCmd, &p, p1, pQueryInfo, colList, NULL); //if (ret == TSDB_CODE_SUCCESS) { - // ret = filterInitFromTree(p, &pQueryInfo->tagFilter, (int32_t)taosArrayGetSize(colList)); + // ret = filterInitFromTree(p, &pQueryInfo->tagFilter, (int32_t)taosArrayGetSize(colList), NULL); //} SBufferWriter bw = tbufInitWriter(NULL, false); @@ -5438,11 +5870,11 @@ _ret: -int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) { +int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql, bool joinQuery) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - + const char* msg1 = "invalid expression"; // const char* msg2 = "invalid filter expression"; @@ -5466,7 +5898,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq } #endif - if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, etype, &tbIdx, (*pExpr)->tokenId, &condExpr.pColumnCond, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { + if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, etype, &tbIdx, (*pExpr)->tokenId, &condExpr.pColumnCond, &condExpr.pTimewindow, joinQuery)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } @@ -5845,7 +6277,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg0 = "only one column allowed in orderby"; const char* msg1 = "invalid column name in orderby clause"; const char* msg2 = "too many order by columns"; - const char* msg3 = "only primary timestamp/tbname/first tag in groupby clause allowed"; + const char* msg3 = "only primary timestamp, first tag/tbname in groupby clause allowed as order column"; const char* msg4 = "only tag in groupby clause allowed in order clause"; const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column"; const char* msg6 = "only primary timestamp allowed as the second order column"; @@ -5855,6 +6287,8 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg10 = "not support distinct mixed with order by"; const char* msg11 = "not support order with udf"; const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg"; + const char* msg13 = "order by json tag, key is too long"; + const char* msg14 = "order by json tag, must be json->'key'"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5868,10 +6302,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq * for table query, there is only one or none order option is allowed, which is the * ts or values(top/bottom) order is supported. * - * for super table query, the order option must be less than 3. + * for super table query, the order option must be less than 3 and the second must be ts. + * + * order by has 5 situations + * 1. from stable group by tag1 order by tag1 [ts] + * 2. from stable group by tbname order by tbname [ts] + * 3. from stable/table group by column1 order by column1 + * 4. from stable/table order by ts + * 5. select stable/table top(column2,1) ... order by column2 */ size_t size = taosArrayGetSize(pSortOrder); - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { + if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (size > 1) { return invalidOperationMsg(pMsgBuf, msg0); } @@ -5881,24 +6322,33 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } if (size > 0 && pQueryInfo->distinct) { - return invalidOperationMsg(pMsgBuf, msg10); + return invalidOperationMsg(pMsgBuf, msg10); } - // handle the first part of order by - tVariant* pVar = taosArrayGet(pSortOrder, 0); + SStrToken columnName = {0}; + CommonItem* pItem = taosArrayGet(pSortOrder, 0); + if (pItem->isJsonExp){ + assert(pItem->jsonExp->tokenId == TK_ARROW); + columnName = pItem->jsonExp->pLeft->columnName; + }else{ + // handle the first part of order by + tVariant* pVar = &pItem->pVar; - // e.g., order by 1 asc, return directly with out further check. - if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { - return TSDB_CODE_SUCCESS; + if (pVar->nType != TSDB_DATA_TYPE_BINARY){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + columnName.n = pVar->nLen; + columnName.type = pVar->nType; + columnName.z = pVar->pz; } - SStrToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; SColumnIndex index = COLUMN_INDEX_INITIALIZER; bool udf = false; if (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) { int32_t usize = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo); - + for (int32_t i = 0; i < usize; ++i) { SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, i); if (pUdfInfo->funcType == TSDB_UDF_TYPE_SCALAR) { @@ -5917,37 +6367,54 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq bool orderByTS = false; bool orderByGroupbyCol = false; - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { + if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1 int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - + // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { return invalidOperationMsg(pMsgBuf, msg4); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { + if (tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, pColIndex->colId)->type == TSDB_DATA_TYPE_JSON){ + if(!pItem->isJsonExp){ + return invalidOperationMsg(pMsgBuf, msg14); + } + if(pItem->jsonExp->exprToken.n >= sizeof(pColIndex->name)){ + return invalidOperationMsg(pMsgBuf, msg13); + } + if(strncmp(pColIndex->name, pItem->jsonExp->exprToken.z, pItem->jsonExp->exprToken.n) == 0){ + orderByTags = true; + }else{ + orderByTags = false; + } + }else{ + orderByTags = true; + } + } + } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname + // it is a tag column + if (pQueryInfo->groupbyExpr.columnInfo == NULL) { + return invalidOperationMsg(pMsgBuf, msg4); + } + SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); + if (TSDB_TBNAME_COLUMN_INDEX == pColIndex->colIndex) { orderByTags = true; } - } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - orderByTags = true; - } - - if (PRIMARYKEY_TIMESTAMP_COL_INDEX == index.columnIndex) { + }else if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts orderByTS = true; - } - - SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; - if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { - SColIndex* pColIndex = taosArrayGet(columnInfo, 0); - if (PRIMARYKEY_TIMESTAMP_COL_INDEX != index.columnIndex && pColIndex->colIndex == index.columnIndex) { - orderByGroupbyCol = true; + }else{ // order by normal column + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + if (pColIndex->colIndex == index.columnIndex) { + orderByGroupbyCol = true; + } } } if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { return invalidOperationMsg(pMsgBuf, msg3); - } else { // order by top/bottom result value column is not supported in case of interval query. - assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } size_t s = taosArrayGetSize(pSortOrder); @@ -5958,10 +6425,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } else if (orderByGroupbyCol) { - tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -5982,12 +6449,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg5); } - tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); if (udf) { return invalidOperationMsg(pMsgBuf, msg11); @@ -6013,7 +6480,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } } else { - tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; @@ -6031,9 +6498,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } + SStrToken cname = {0}; pItem = taosArrayGet(pSqlNode->pSortOrder, 1); - tVariant* pVar2 = &pItem->pVar; - SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; + if (pItem->isJsonExp){ + assert(pItem->jsonExp->tokenId == TK_ARROW); + cname = pItem->jsonExp->pLeft->columnName; + }else{ + tVariant* pVar = &pItem->pVar; + + cname.n = pVar->nLen; + cname.type = pVar->nType; + cname.z = pVar->pz; + } if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsgBuf, msg1); } @@ -6041,8 +6517,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg6); } else { - tVariantListItem* p1 = taosArrayGet(pSortOrder, 1); - pQueryInfo->order.order = p1->sortOrder; + pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } @@ -6072,7 +6547,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg11); } - tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } @@ -6082,7 +6557,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { SColIndex* pColIndex = taosArrayGet(columnInfo, 0); - if (pColIndex->colIndex == index.columnIndex) { + if (pColIndex->colIndex != index.columnIndex) { return invalidOperationMsg(pMsgBuf, msg8); } } else { @@ -6098,7 +6573,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } - tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -6109,7 +6584,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg11); } - tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else { @@ -6125,7 +6600,28 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg11); } - tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + if (tscIsProjectionQuery(pQueryInfo)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) { + found = true; + break; + } + } + + if (!found) { + int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); + tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + + SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); + pSupInfo->visible = false; + + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; + } + } + + pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } @@ -6162,6 +6658,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg23 = "only column length coulbe be modified"; const char* msg24 = "invalid binary/nchar column length"; + const char* msg25 = "json type error, should be string"; + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; @@ -6321,22 +6819,32 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); - if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { + if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes)) { return invalidOperationMsg(pMsg, msg14); } + SKVRowBuilder kvRowBuilder = {0}; + if (pTagsSchema->type == TSDB_DATA_TYPE_JSON) { + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { + tscError("json type error, should be string"); + return invalidOperationMsg(pMsg, msg25); + } + if (pItem->pVar.nType > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) { + tscError("json tag too long"); + return invalidOperationMsg(pMsg, msg14); + } - pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); + if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } - if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(pMsg, msg13); - } - - pAlterSQL->tagData.dataLen = pTagsSchema->bytes; + int8_t tagVal = TSDB_DATA_JSON_PLACEHOLDER; + tdAddColToKVRow(&kvRowBuilder, pTagsSchema->colId, pTagsSchema->type, &tagVal, false); - // validate the length of binary - if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) && - varDataTLen(pAlterSQL->tagData.data) > pTagsSchema->bytes) { - return invalidOperationMsg(pMsg, msg14); + code = parseJsontoTagData(pItem->pVar.pz, &kvRowBuilder, pMsg, pTagsSchema->colId); + if (code != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return code; + } } int32_t schemaLen = sizeof(STColumn) * numOfTags; @@ -6372,15 +6880,32 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { d += sizeof(STColumn); } - // copy the tag value to pMsg body - pItem = taosArrayGet(pVarList, 1); - tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true); - + if (pTagsSchema->type == TSDB_DATA_TYPE_JSON){ + SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); + tdDestroyKVRowBuilder(&kvRowBuilder); + if (row == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + tdSortKVRowByColIdx(row); + + kvRowCpy(pUpdateMsg->data + schemaLen, row); + free(row); + }else{ + // copy the tag value to pMsg body + if (tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true) + != TSDB_CODE_SUCCESS){ + return invalidOperationMsg(pMsg, msg13); + } + } + int32_t len = 0; - if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) { + if(pTagsSchema->type == TSDB_DATA_TYPE_JSON){ + len = kvRowLen(pUpdateMsg->data + schemaLen); + }else if (!IS_VAR_DATA_TYPE(pTagsSchema->type)) { len = tDataTypes[pTagsSchema->type].bytes; } else { len = varDataTLen(pUpdateMsg->data + schemaLen); + if(len > pTagsSchema->bytes) return invalidOperationMsg(pMsg, msg14); } pUpdateMsg->tagValLen = htonl(len); // length may be changed after dump data @@ -6586,6 +7111,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { int32_t functId = tscExprGet(pQueryInfo, i)->base.functionId; + if (TSDB_FUNC_IS_SCALAR(functId)) { + continue; + } if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6614,6 +7142,11 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu } } + if (TSDB_FUNC_IS_SCALAR(pExpr->base.functionId)) { + isProjectionFunction = true; + break; + } + // projection query on primary timestamp, the selectivity function needs to be present. if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { bool hasSelectivity = false; @@ -6632,9 +7165,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu int32_t f = pExpr->base.functionId; if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || - f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE || - f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || - f == TSDB_FUNC_CEIL || f == TSDB_FUNC_FLOOR || f == TSDB_FUNC_ROUND) + f == TSDB_FUNC_DIFF || f == TSDB_FUNC_SCALAR_EXPR || f == TSDB_FUNC_DERIVATIVE || + f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG) { isProjectionFunction = true; break; @@ -6787,6 +7319,10 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { } int32_t validateColumnName(char* name) { + if (strlen(name) == 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name)); if (ret) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -6849,17 +7385,15 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN // todo refactor if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query - if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { - if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } + if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { + if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - // for projection query on super table, all queries are subqueries - if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && - !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY)) { - pQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; - } + // for projection query on super table, all queries are subqueries + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && + !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY)) { + pQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; } } @@ -7161,12 +7695,18 @@ static int32_t doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } -static bool tagColumnInGroupby(SGroupbyExpr* pGroupbyExpr, int16_t columnId) { +static bool tagColumnInGroupby(SGroupbyExpr* pGroupbyExpr, int16_t columnId, int16_t type, char* name) { for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j); - - if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) { - return true; + + if (type == TSDB_DATA_TYPE_JSON && name != NULL){ + if (columnId == pColIndex->colId && strncmp(pColIndex->name, name, tListLen(pColIndex->name)) == 0 && TSDB_COL_IS_TAG(pColIndex->flag )) { + return true; + } + }else{ + if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) { + return true; + } } } @@ -7180,7 +7720,8 @@ static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); - if (pExpr->base.functionId == TSDB_FUNC_PRJ) { + if (pExpr->base.functionId == TSDB_FUNC_PRJ || + (pExpr->base.functionId == TSDB_FUNC_SCALAR_EXPR && ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0))) { hasColumnPrj = true; } else if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ) { hasTagPrj = true; @@ -7201,7 +7742,7 @@ static bool allTagPrjInGroupby(SQueryInfo* pQueryInfo) { continue; } - if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->base.colInfo.colId)) { + if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->base.colInfo.colId, pExpr->base.resType, pExpr->base.param[0].pz)) { allInGroupby = false; break; } @@ -7252,7 +7793,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { int16_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || - functionId == TSDB_FUNC_ARITHM || functionId == TSDB_FUNC_TS_DUMMY) { + functionId == TSDB_FUNC_SCALAR_EXPR || functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -7265,10 +7806,13 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { continue; } + if (TSDB_FUNC_IS_SCALAR(functionId)) { + numOfScalar++; + continue; + } + if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { numOfSelectivity++; - } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) { - numOfScalar++; } else { numOfAggregation++; } @@ -7299,6 +7843,10 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { for (int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int16_t functionId = pExpr->base.functionId; + if (TSDB_FUNC_IS_SCALAR(functionId)) { + continue; + } + if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == 0) { continue; } @@ -7344,7 +7892,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); SSchema* tagSchema = NULL; - if (!UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); } @@ -7378,16 +7926,23 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true); - - memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName)); - tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName)); - tstrncpy(pExpr->base.token, s->name, sizeof(pExpr->base.aliasName)); - - pExpr->base.colInfo.flag = TSDB_COL_TAG; - // NOTE: tag column does not add to source column list SColumnList ids = createColumnList(1, 0, pColIndex->colIndex); - insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, s->name, pExpr); + insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, pColIndex->name, pExpr); + pExpr->base.colInfo.flag = TSDB_COL_TAG; + memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName)); + tstrncpy(pExpr->base.aliasName, pColIndex->name, sizeof(pExpr->base.aliasName)); + tstrncpy(pExpr->base.token, pColIndex->name, sizeof(pExpr->base.token)); + if(s->type == TSDB_DATA_TYPE_JSON){ + SStrToken t0 = {.z = pColIndex->name}; + getJsonKey(&t0); + tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), t0.z, + t0.n, TSDB_DATA_TYPE_BINARY); + pExpr->base.numOfParams++; + assert(t0.n < strlen(pColIndex->name)); + memmove(pColIndex->name, t0.z, t0.n); + pColIndex->name[t0.n] = '\0'; + } } else { // if this query is "group by" normal column, time window query is not allowed if (isTimeWindowQuery(pQueryInfo)) { @@ -7445,7 +8000,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* const char* msg3 = "group by/session/state_window not allowed on projection query"; const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg5 = "functions can not be mixed up"; - const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg only support group by tbname"; + const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname"; // only retrieve tags, group by is not supportted if (tscQueryTags(pQueryInfo)) { @@ -7506,8 +8061,13 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* continue; } + if (f == TSDB_FUNC_SCALAR_EXPR && + (pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) { + return invalidOperationMsg(msg, msg1); + } + if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || - f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG)) { + f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED)) { for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); if (j == 0) { @@ -7526,9 +8086,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* return invalidOperationMsg(msg, msg1); } - if (IS_SCALAR_FUNCTION(aAggs[f].status)) { - return invalidOperationMsg(msg, msg1); - } if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(msg, msg1); @@ -7556,7 +8113,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { - const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table without group by tbname"; + const char* msg1 = "TWA/Diff/Derivative/Irate/elapsed are not allowed to apply to super table without group by tbname"; const char* msg2 = "group by not supported in nested interp query"; const char* msg3 = "order by not supported in nested interp query"; const char* msg4 = "first column should be timestamp for interp query"; @@ -7569,7 +8126,7 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF) { + if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED) { for (int32_t j = 0; j < upNum; ++j) { SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0); @@ -7818,8 +8375,10 @@ void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) { if (pExpr->base.functionId < 0) { SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * pExpr->base.functionId - 1); name = pUdfInfo->name; - } else { + } else if (!TSDB_FUNC_IS_SCALAR(pExpr->base.functionId)) { name = aAggs[pExpr->base.functionId].name; + } else { + name = aScalarFunctions[TSDB_FUNC_SCALAR_INDEX(pExpr->base.functionId)].name; } tmpLen = @@ -7901,6 +8460,8 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { const char* msg3 = "tag value too long"; const char* msg4 = "illegal value or data overflow"; const char* msg5 = "tags number not matched"; + const char* msg6 = "create table only from super table is allowed"; + const char* msg7 = "json type error, should be string"; SSqlCmd* pCmd = &pSql->cmd; @@ -7948,6 +8509,10 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return code; } + if (!UTIL_TABLE_IS_SUPER_TABLE(pStableMetaInfo)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + size_t valSize = taosArrayGetSize(pValList); // too long tag values will return invalid sql, not be truncated automatically @@ -8021,7 +8586,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { pItem->pVar.i64 = convertTimePrecision(pItem->pVar.i64, TSDB_TIME_PRECISION_NANO, tinfo.precision); } } - ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); // check again after the convert since it may be converted from binary to nchar. @@ -8038,7 +8602,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal, false); findColumnIndex = true; break; @@ -8060,7 +8624,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { SSchema* pSchema = &pTagSchema[i]; tVariantListItem* pItem = taosArrayGet(pValList, i); - char tagVal[TSDB_MAX_TAGS_LEN]; + char tagVal[TSDB_MAX_TAGS_LEN] = {0}; if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { if (pItem->pVar.nLen > pSchema->bytes) { tdDestroyKVRowBuilder(&kvRowBuilder); @@ -8076,8 +8640,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { pItem->pVar.i64 = convertTimePrecision(pItem->pVar.i64, TSDB_TIME_PRECISION_NANO, tinfo.precision); } } - - ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); // check again after the convert since it may be converted from binary to nchar. @@ -8094,7 +8656,31 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal, false); + } + } + + // encode json tag string + if(schemaSize == 1 && pTagSchema[0].type == TSDB_DATA_TYPE_JSON){ + if (valSize != schemaSize) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + tVariantListItem* pItem = taosArrayGet(pValList, 0); + if(pItem->pVar.nType != TSDB_DATA_TYPE_BINARY){ + tscError("json type error, should be string"); + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + if(pItem->pVar.nLen > TSDB_MAX_JSON_TAGS_LEN/TSDB_NCHAR_SIZE){ + tscError("json tag too long"); + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + ret = parseJsontoTagData(pItem->pVar.pz, &kvRowBuilder, tscGetErrorMsgPayload(pCmd), pTagSchema[0].colId); + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return ret; } } @@ -8162,7 +8748,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { if (tscValidateName(pName, true, &dbIncluded1) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + SRelationInfo* pFromInfo = pInfo->pCreateTableInfo->pSelect->from; if (pFromInfo == NULL || taosArrayGetSize(pFromInfo->list) == 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); @@ -8179,7 +8765,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { char buf[TSDB_TABLE_FNAME_LEN]; SStrToken sTblToken; sTblToken.z = buf; - + int32_t code = validateTableName(srcToken.z, srcToken.n, &sTblToken, &dbIncluded2); if (code != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -8199,8 +8785,10 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return TSDB_CODE_TSC_INVALID_OPERATION; } + int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); + if (pSqlNode->pWhere != NULL) { // query condition in stream computing - if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql, joinQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } } @@ -9092,7 +9680,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column"; const char* msg5 = "only tag query not compatible with normal column filter"; const char* msg6 = "not support stddev/percentile/interp in the outer query yet"; - const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery"; + const char* msg7 = "derivative/twa/rate/irate/diff requires timestamp column exists in subquery"; const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; @@ -9141,11 +9729,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY); + int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); + // parse the group by clause in the first place if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - + if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, timeWindowQuery, true) != TSDB_CODE_SUCCESS) { @@ -9177,7 +9767,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) { + if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || + f == TSDB_FUNC_RATE || f == TSDB_FUNC_DIFF) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } } @@ -9185,7 +9776,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf // validate the query filter condition info if (pSqlNode->pWhere != NULL) { - if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql, joinQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } } else { @@ -9230,7 +9821,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf } // parse the having clause in the first place - int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -9282,6 +9872,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf int32_t type = isSTable? TSDB_QUERY_TYPE_STABLE_QUERY:TSDB_QUERY_TYPE_TABLE_QUERY; TSDB_QUERY_SET_TYPE(pQueryInfo->type, type); + int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); + // parse the group by clause in the first place if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -9289,7 +9881,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf pQueryInfo->onlyHasTagCond = true; // set where info if (pSqlNode->pWhere != NULL) { - if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { + if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql, joinQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -9300,7 +9892,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf } } - int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); int32_t timeWindowQuery = (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)); @@ -9310,7 +9901,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf } if (isSTable && tscQueryTags(pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) { - return TSDB_CODE_TSC_INVALID_OPERATION; + return TSDB_CODE_TSC_INVALID_OPERATION; } // parse the window_state @@ -9434,34 +10025,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf } int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid) { - tExprNode* pLeft = NULL; - tExprNode* pRight= NULL; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - - if (pSqlExpr->pLeft != NULL) { - int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pQueryInfo, pCols, uid); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - } - - if (pSqlExpr->pRight != NULL) { - int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pQueryInfo, pCols, uid); - if (ret != TSDB_CODE_SUCCESS) { - tExprTreeDestroy(pLeft, NULL); - return ret; - } - } - - if (pSqlExpr->pLeft == NULL && pSqlExpr->pRight == NULL && pSqlExpr->tokenId == 0) { - *pExpr = calloc(1, sizeof(tExprNode)); - return TSDB_CODE_SUCCESS; - } - - if (pSqlExpr->pLeft == NULL) { // it is the leaf node - assert(pSqlExpr->pRight == NULL); - + if (pSqlExpr->type != SQL_NODE_EXPR && pSqlExpr->type != SQL_NODE_SQLFUNCTION) { + assert(pSqlExpr->pLeft == NULL && pSqlExpr->pRight == NULL); if (pSqlExpr->type == SQL_NODE_VALUE) { + if(pSqlExpr->value.nType == -1){ + return TSDB_CODE_TSC_INVALID_VALUE; + } int32_t ret = TSDB_CODE_SUCCESS; *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_VALUE; @@ -9474,7 +10043,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS if (colSize > 0) { SColIndex* idx = taosArrayGet(pCols, colSize - 1); - + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); // convert time by precision if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) { @@ -9483,32 +10052,9 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS } } return ret; - } else if (pSqlExpr->type == SQL_NODE_SQLFUNCTION) { - // arithmetic expression on the results of aggregation functions - *pExpr = calloc(1, sizeof(tExprNode)); - (*pExpr)->nodeType = TSQL_NODE_COL; - (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); - strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n); - - // set the input column data byte and type. - size_t size = taosArrayGetSize(pQueryInfo->exprList); - - for (int32_t i = 0; i < size; ++i) { - SExprInfo* p1 = taosArrayGetP(pQueryInfo->exprList, i); - - if (strcmp((*pExpr)->pSchema->name, p1->base.aliasName) == 0) { - (*pExpr)->pSchema->type = (uint8_t)p1->base.resType; - (*pExpr)->pSchema->bytes = p1->base.resBytes; - (*pExpr)->pSchema->colId = p1->base.resColId; - - if (uid != NULL) { - *uid = p1->base.uid; - } + } else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression + SColumnIndex index = COLUMN_INDEX_INITIALIZER; - break; - } - } - } else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)); if (ret != TSDB_CODE_SUCCESS) { return ret; @@ -9516,34 +10062,41 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS pQueryInfo->curTableIdx = index.tableIndex; STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta; - int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); + int32_t numOfColumns = tscGetNumOfColumns(pTableMeta); *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_COL; (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); SSchema* pSchema = NULL; - + if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { pSchema = (*pExpr)->pSchema; - strcpy(pSchema->name, TSQL_TBNAME_L); - pSchema->type = TSDB_DATA_TYPE_BINARY; - pSchema->colId = TSDB_TBNAME_COLUMN_INDEX; - pSchema->bytes = -1; + strcpy(pSchema->name, tGetTbnameColumnSchema()->name); + pSchema->type = tGetTbnameColumnSchema()->type; + pSchema->colId = tGetTbnameColumnSchema()->colId; + pSchema->bytes = tGetTbnameColumnSchema()->bytes; } else { pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); *(*pExpr)->pSchema = *pSchema; } - + if (pCols != NULL) { // record the involved columns SColIndex colIndex = {0}; tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name)); colIndex.colId = pSchema->colId; colIndex.colIndex = index.columnIndex; - colIndex.flag = (index.columnIndex >= numOfColumns)? 1:0; + colIndex.flag = (index.columnIndex >= numOfColumns) ? 1 : 0; taosArrayPush(pCols, &colIndex); } + return TSDB_CODE_SUCCESS; + } else if (pSqlExpr->type == SQL_NODE_DATA_TYPE) { + *pExpr = calloc(1, sizeof(tExprNode)); + (*pExpr)->nodeType = TSQL_NODE_TYPE; + (*pExpr)->pType = calloc(1, sizeof(TAOS_FIELD)); + + *(*pExpr)->pType = pSqlExpr->dataType; return TSDB_CODE_SUCCESS; } else if (pSqlExpr->tokenId == TK_SET) { @@ -9578,21 +10131,44 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_VALUE; (*pExpr)->pVal = pVal; + return TSDB_CODE_SUCCESS; + } else if (pSqlExpr->tokenId == 0) { + *pExpr = calloc(1, sizeof(tExprNode)); + return TSDB_CODE_SUCCESS; } else { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression"); } - - } else { + } + + if (pSqlExpr->type == SQL_NODE_EXPR) { + tExprNode* pLeft = NULL; + tExprNode* pRight= NULL; + + if (pSqlExpr->pLeft != NULL) { + int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pQueryInfo, pCols, uid); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + if (pSqlExpr->pRight != NULL) { + int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pQueryInfo, pCols, uid); + if (ret != TSDB_CODE_SUCCESS) { + tExprTreeDestroy(pLeft, NULL); + return ret; + } + } + *pExpr = (tExprNode *)calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_EXPR; - + (*pExpr)->_node.hasPK = false; (*pExpr)->_node.pLeft = pLeft; (*pExpr)->_node.pRight = pRight; - + SStrToken t = {.type = pSqlExpr->tokenId}; (*pExpr)->_node.optr = convertRelationalOperator(&t); - + assert((*pExpr)->_node.optr != 0); // check for dividing by 0 @@ -9609,13 +10185,60 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS // NOTE: binary|nchar data allows the >|< type filter if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) { if (pRight != NULL && pRight->nodeType == TSQL_NODE_VALUE) { - if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL && pLeft->pSchema->type == TSDB_DATA_TYPE_BOOL) { + if (pLeft->_node.optr == TSDB_RELATION_ARROW){ + pLeft = pLeft->_node.pLeft; + } + if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL && (pLeft->pSchema->type == TSDB_DATA_TYPE_BOOL || pLeft->pSchema->type == TSDB_DATA_TYPE_JSON)) { return TSDB_CODE_TSC_INVALID_OPERATION; } } } + + } else if (pSqlExpr->type == SQL_NODE_SQLFUNCTION) { + if (TSDB_FUNC_IS_SCALAR(pSqlExpr->functionId)) { + *pExpr = calloc(1, sizeof(tExprNode)); + (*pExpr)->nodeType = TSQL_NODE_FUNC; + (*pExpr)->_func.functionId = pSqlExpr->functionId; + SArray* paramList = pSqlExpr->Expr.paramList; + size_t paramSize = paramList ? taosArrayGetSize(paramList) : 0; + if (paramSize > 0) { + (*pExpr)->_func.numChildren = (int32_t)paramSize; + (*pExpr)->_func.pChildren = (tExprNode**)calloc(paramSize, sizeof(tExprNode*)); + } + for (int32_t i = 0; i < paramSize; ++i) { + tSqlExprItem* param = taosArrayGet(paramList, i); + tSqlExpr* paramNode = param->pNode; + int32_t ret = exprTreeFromSqlExpr(pCmd, (*pExpr)->_func.pChildren+i, paramNode, pQueryInfo, pCols, uid); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + } else { + // arithmetic expression on the results of aggregation functions + *pExpr = calloc(1, sizeof(tExprNode)); + (*pExpr)->nodeType = TSQL_NODE_COL; + (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); + strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n); + + // set the input column data byte and type. + size_t size = taosArrayGetSize(pQueryInfo->exprList); + + for (int32_t i = 0; i < size; ++i) { + SExprInfo* p1 = taosArrayGetP(pQueryInfo->exprList, i); + + if (strcmp((*pExpr)->pSchema->name, p1->base.aliasName) == 0) { + (*pExpr)->pSchema->type = (uint8_t)p1->base.resType; + (*pExpr)->pSchema->bytes = p1->base.resBytes; + (*pExpr)->pSchema->colId = p1->base.resColId; + + if (uid != NULL) { + *uid = p1->base.uid; + } + break; + } + } // endfor each expr + } // end not scalar function } - return TSDB_CODE_SUCCESS; } @@ -9663,5 +10286,3 @@ void normalizeSqlNode(SSqlNode* pSqlNode, const char* dbName) { } #endif - - diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 52a918bbe22589d85fc89cbff8249065129f1618..35582973c2af7b214d3054807d31a897c7256191 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -506,7 +506,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { } } - if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) { + if (pRes->code == TSDB_CODE_SUCCESS && pCmd->command < TSDB_SQL_MAX && tscProcessMsgRsp[pCmd->command]) { rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql); } @@ -832,12 +832,16 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, return TSDB_CODE_TSC_INVALID_TABLE_NAME; } - if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { + if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId)) { tscError("0x%"PRIx64" table schema is not matched with parsed sql", id); return TSDB_CODE_TSC_INVALID_OPERATION; } - assert(pExpr->resColId < 0); + if (pExpr->resColId >= 0) { + tscError("result column id underflowed: %d", pExpr->resColId); + return TSDB_CODE_TSC_RES_TOO_MANY; + } + SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg); SColIndex* pIndex = &pSqlExpr->colInfo; @@ -902,6 +906,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SArray* queryOperator = createExecOperatorPlan(&query); SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; + tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); int32_t numOfTags = query.numOfTags; @@ -943,6 +948,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsCompQuery = query.tsCompQuery; pQueryMsg->simpleAgg = query.simpleAgg; pQueryMsg->pointInterpQuery = query.pointInterpQuery; + pQueryMsg->needTableSeqScan = query.needTableSeqScan; pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->stateWindow = query.stateWindow; pQueryMsg->numOfTags = htonl(numOfTags); @@ -1141,6 +1147,23 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { memcpy(pMsg, pSql->sqlstr, sqlLen); pMsg += sqlLen; + +/* + //MSG EXTEND DEMO + pQueryMsg->extend = 1; + + STLV *tlv = (STLV *)pMsg; + tlv->type = htons(TLV_TYPE_DUMMY); + tlv->len = htonl(sizeof(int16_t)); + *(int16_t *)tlv->value = htons(12345); + pMsg += sizeof(*tlv) + ntohl(tlv->len); + + tlv = (STLV *)pMsg; + tlv->len = 0; + pMsg += sizeof(*tlv); + +*/ + int32_t msgLen = (int32_t)(pMsg - pCmd->payload); tscDebug("0x%"PRIx64" msg built success, len:%d bytes", pSql->self, msgLen); @@ -1487,7 +1510,8 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) { SCreateTableSql *pCreateTableInfo = pInfo->pCreateTableInfo; if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) { int32_t numOfTables = (int32_t)taosArrayGetSize(pInfo->pCreateTableInfo->childTableInfo); - size += numOfTables * (sizeof(SCreateTableMsg) + TSDB_MAX_TAGS_LEN); + size += numOfTables * (sizeof(SCreateTableMsg) + + ((TSDB_MAX_TAGS_LEN > TSDB_MAX_JSON_TAGS_LEN)?TSDB_MAX_TAGS_LEN:TSDB_MAX_JSON_TAGS_LEN)); } else { size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); } @@ -1536,7 +1560,17 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += sizeof(SCreateTableMsg); SCreatedTableInfo* p = taosArrayGet(list, i); - strcpy(pCreate->tableName, p->fullname); + //what pCreate->tableName point is a fixed char array which size is 237 + //what p->fullname point is a char* + //before the time we copy p->fullname to pCreate->tableName , we need to check the length of p->fullname + if (strlen(p->fullname) > 237) { + tscError("failed to write this name, which is over 237, just save the first 237 char here"); + strncpy(pCreate->tableName, p->fullname,237); + pCreate->tableName[236]='\0';//I don't know if this line is working properly + }else{ + strcpy(pCreate->tableName, p->fullname); + } + pCreate->igExists = (p->igExist)? 1 : 0; // use dbinfo from table id without modifying current db info @@ -1599,9 +1633,6 @@ int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { } int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { - char *pMsg; - int msgLen = 0; - SSqlCmd *pCmd = &pSql->cmd; SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); @@ -1630,14 +1661,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSchema++; } - pMsg = (char *)pSchema; - pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen); - if (pAlterInfo->tagData.dataLen > 0) { - memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen); - } - pMsg += pAlterInfo->tagData.dataLen; - - msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg); + int msgLen = sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo); pCmd->payloadLen = msgLen; pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_TABLE; @@ -1839,7 +1863,9 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { uint64_t localQueryId = pSql->self; qTableQuery(pQueryInfo->pQInfo, &localQueryId); - convertQueryResult(pRes, pQueryInfo, pSql->self, true); + bool convertJson = true; + if (pQueryInfo->isStddev == true) convertJson = false; + convertQueryResult(pRes, pQueryInfo, pSql->self, true, convertJson); code = pRes->code; if (pRes->code == TSDB_CODE_SUCCESS) { @@ -2798,7 +2824,11 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn tscAddQueryInfo(&pNew->cmd); SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd); - if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) { + int payLoadLen = TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen; + if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) { + payLoadLen += pSql->cmd.insertParam.tagData.dataLen; + } + if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, payLoadLen)) { tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self); tscFreeSqlObj(pNew); @@ -3076,12 +3106,16 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { pSql->rootObj->retryReason = pSql->retryReason; + SSqlObj *tmpSql = pSql->rootObj; + tscFreeSubobj(pSql->rootObj); + tfree(tmpSql->pSubs); + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); char* n = strdup(name); taosArrayPush(pNameList, &n); - code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); + code = getMultiTableMetaFromMnode(tmpSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); taosArrayDestroyEx(pNameList, freeElem); taosArrayDestroyEx(vgroupList, freeElem); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 89da3c5640c6523d4d2a816b8ae0293310c5830a..d27fa15bf13b0f7ca15e4cb92b447f8d51b842ef 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -29,6 +29,7 @@ #include "ttimer.h" #include "tscProfile.h" +static char clusterDefaultId[] = "clusterDefaultId"; static bool validImpl(const char* str, size_t maxsize) { if (str == NULL) { return false; @@ -193,7 +194,9 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass, tscBuildAndSendRequest(pSql, NULL); tsem_wait(&pSql->rspSem); - + if (0 == strlen(pSql->pTscObj->clusterId)) { + memcpy(pSql->pTscObj->clusterId, clusterDefaultId, strlen(clusterDefaultId)); + } pSql->pTscObj->pClusterInfo = (SClusterInfo *)tscAcquireClusterInfo(pSql->pTscObj->clusterId); if (pSql->res.code != TSDB_CODE_SUCCESS) { terrno = pSql->res.code; @@ -442,7 +445,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { // revise the length for binary and nchar fields if (f[j].type == TSDB_DATA_TYPE_BINARY) { f[j].bytes -= VARSTR_HEADER_SIZE; - } else if (f[j].type == TSDB_DATA_TYPE_NCHAR) { + } else if (f[j].type == TSDB_DATA_TYPE_NCHAR || f[j].type == TSDB_DATA_TYPE_JSON) { f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9309f70d140b6d5cebcfa9fdf572c464b05c8df6..5f8964bb34b5939ab768877cb8ae701e20443f75 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -227,6 +227,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { if (skipped) { slot = 0; stackidx = 0; + tVariantDestroy(&tag); continue; } @@ -334,6 +335,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { } if (mergeDone) { + tVariantDestroy(&tag); break; } @@ -341,6 +343,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { stackidx = 0; skipRemainValue(mainCtx->p->pTSBuf, &tag); + tVariantDestroy(&tag); } stackidx = 0; @@ -633,16 +636,21 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { // set the join condition tag column info, todo extract method if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { assert(pQueryInfo->tagCond.joinInfo.hasJoin); + pExpr->base.numOfParams = 0; // the value is 0 by default. just make sure. + // add json tag key, if there is no json tag key, just hold place. + tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJsonKeyName, + strlen(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY); + pExpr->base.numOfParams++; int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value - tVariant* pVariant = &pExpr->base.param[0]; + tVariant* pVariant = &pExpr->base.param[pExpr->base.numOfParams]; pVariant->i64 = colId; pVariant->nType = TSDB_DATA_TYPE_BIGINT; pVariant->nLen = sizeof(int64_t); - pExpr->base.numOfParams = 1; + pExpr->base.numOfParams++; } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -729,8 +737,15 @@ int32_t tagValCompar(const void* p1, const void* p2) { const STidTags* t1 = (const STidTags*) varDataVal(p1); const STidTags* t2 = (const STidTags*) varDataVal(p2); - __compar_fn_t func = getComparFunc(t1->padding, 0); + if (t1->padding == TSDB_DATA_TYPE_JSON){ + bool canReturn = true; + int32_t result = jsonCompareUnit(t1->tag, t2->tag, &canReturn); + if(canReturn) return result; + __compar_fn_t func = getComparFunc(t1->tag[0], 0); + return func(t1->tag + CHAR_BYTES, t2->tag + CHAR_BYTES); + } + __compar_fn_t func = getComparFunc(t1->padding, 0); return func(t1->tag, t2->tag); } @@ -821,16 +836,21 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); + SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); // set the tags value for ts_comp function if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - SExprInfo *pExpr = tscExprGet(pQueryInfo, 0); - int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); - pExpr->base.param[0].i64 = tagColId; - pExpr->base.param[0].nLen = sizeof(int64_t); - pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; - pExpr->base.numOfParams = 1; + pExpr->base.numOfParams = 0; // the value is 0 by default. just make sure. + // add json tag key, if there is no json tag key, just hold place. + tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pSupporter->tagCond.joinInfo.joinTables[0]->tagJsonKeyName, + strlen(pSupporter->tagCond.joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY); + pExpr->base.numOfParams++; + + int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); + pExpr->base.param[pExpr->base.numOfParams].i64 = tagColId; + pExpr->base.param[pExpr->base.numOfParams].nLen = sizeof(int64_t); + pExpr->base.param[pExpr->base.numOfParams].nType = TSDB_DATA_TYPE_BIGINT; + pExpr->base.numOfParams++; } // add the filter tag column @@ -2012,7 +2032,12 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter // set get tags query type TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); - tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG, getNewResColId(pCmd)); + SExprInfo* pExpr = tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG, getNewResColId(pCmd)); + if(strlen(pTagCond->joinInfo.joinTables[0]->tagJsonKeyName) > 0){ + tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pTagCond->joinInfo.joinTables[0]->tagJsonKeyName, + strlen(pTagCond->joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY); + pExpr->base.numOfParams++; + } size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); tscDebug( @@ -2025,15 +2050,6 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); - // set the tags value for ts_comp function - SExprInfo *pExpr = tscExprGet(pNewQueryInfo, 0); - - if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); - pExpr->base.param->i64 = tagColId; - pExpr->base.numOfParams = 1; - } - // add the filter tag column if (pSupporter->colList != NULL) { size_t s = taosArrayGetSize(pSupporter->colList); @@ -2293,7 +2309,15 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { if (row[i] == NULL) { setNull(p + offset, pExpr->base.resType, pExpr->base.resBytes); } else { - memcpy(p + offset, row[i], length[i]); + if(pExpr->base.resType == TSDB_DATA_TYPE_NCHAR){ + int32_t output = 0; + bool ret = taosMbsToUcs4(row[i], length[i], p + offset, pExpr->base.resBytes, &output); + if (!ret) { + tscError("stddev convert tag error:%d", ret); + } + }else{ + memcpy(p + offset, row[i], length[i]); + } } offset += pExpr->base.resBytes; } @@ -2419,6 +2443,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); tscInitQueryInfo(pNewQueryInfo); + pNewQueryInfo->isStddev = true; // for json tag // add the group cond pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; @@ -2482,6 +2507,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { } SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd)); + if (schema->type == TSDB_DATA_TYPE_JSON){ + p->base.numOfParams = pExpr->base.numOfParams; + tVariantAssign(&p->base.param[0], &pExpr->base.param[0]); + } p->base.resColId = pExpr->base.resColId; } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) { int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); @@ -3592,7 +3621,7 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { finalRowSize += pField->bytes; } - doArithmeticCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize); + doScalarExprCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize); pRes->data = pFilePage->data; tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted); @@ -3631,8 +3660,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { } } -char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) { - SArithmeticSupport *pSupport = (SArithmeticSupport *) param; +char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) { + SScalarExprSupport*pSupport = (SScalarExprSupport*) param; int32_t index = -1; SExprInfo* pExpr = NULL; @@ -3673,7 +3702,9 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) { int32_t type = pInfo->field.type; int32_t bytes = pInfo->field.bytes; - if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { + if (pQueryInfo->isStddev && type == TSDB_DATA_TYPE_JSON){ // for json tag compare in the second round of stddev + pRes->tsrow[j] = pRes->urow[i]; + }else if (!IS_VAR_DATA_TYPE(type) && type != TSDB_DATA_TYPE_JSON) { pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i]; } else { pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index edb8169f761e2b5aaba1ddfd7cda8a9008298948..bbd448e2d8b5069fae438d7adb9de14a31446d1b 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,7 +47,17 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads +#ifdef _TD_POWER_ +char tscLogFileName[12] = "powerlog"; +#elif (_TD_TQ_ == true) +char tscLogFileName[12] = "tqlog"; +#elif (_TD_PRO_ == true) +char tscLogFileName[12] = "prolog"; +#elif (_TD_KH_ == true) +char tscLogFileName[12] = "khclientlog"; +#else char tscLogFileName[12] = "taoslog"; +#endif int tscLogFileNum = 10; static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently @@ -107,7 +117,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry rpcObj.pDnodeConn = rpcOpen(&rpcInit); if (rpcObj.pDnodeConn == NULL) { pthread_mutex_unlock(&rpcObjMutex); - tscError("failed to init connection to TDengine"); + tscError("failed to init connection to server"); return -1; } @@ -133,8 +143,8 @@ void tscClusterInfoDestroy(SClusterInfo *pObj) { void *tscAcquireClusterInfo(const char *clusterId) { pthread_mutex_lock(&clusterMutex); - size_t len = strlen(clusterId); + SClusterInfo *pObj = NULL; SClusterInfo **ppObj = taosHashGet(tscClusterMap, clusterId, len); if (ppObj == NULL || *ppObj == NULL) { @@ -210,10 +220,10 @@ void taos_init_imp(void) { taosInitNotes(); rpcInit(); - +#ifdef LUA_EMBEDDED scriptEnvPoolInit(); - - tscDebug("starting to initialize TAOS client ..."); +#endif + tscDebug("starting to initialize client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } @@ -276,7 +286,9 @@ void taos_cleanup(void) { } if (tscEmbedded == 0) { + #ifdef LUA_EMBEDDED scriptEnvPoolCleanup(); + #endif } int32_t id = tscObjRef; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 50b9a8fd7eea703dffe34f8ad5eb204b310f0d7e..3367c4a36acc33413fae256290ddc5926f2720e3 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -29,6 +29,7 @@ #include "tsclient.h" #include "ttimer.h" #include "ttokendef.h" +#include "cJSON.h" #ifdef HTTP_EMBEDDED #include "httpInt.h" @@ -81,23 +82,43 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le break; case TSDB_DATA_TYPE_FLOAT: - n = sprintf(str, "%e", GET_FLOAT_VAL(buf)); + n = sprintf(str, "%.*e", DECIMAL_DIG, GET_FLOAT_VAL(buf)); break; case TSDB_DATA_TYPE_DOUBLE: - n = sprintf(str, "%e", GET_DOUBLE_VAL(buf)); + n = sprintf(str, "%.*e", DECIMAL_DIG, GET_DOUBLE_VAL(buf)); break; case TSDB_DATA_TYPE_BINARY: + if (bufSize < 0) { + tscError("invalid buf size"); + return TSDB_CODE_TSC_INVALID_VALUE; + } + int32_t escapeSize = 0; + *str++ = '\''; + ++escapeSize; + char* data = buf; + for (int32_t i = 0; i < bufSize; ++i) { + if (data[i] == '\'' || data[i] == '"') { + *str++ = '\\'; + ++escapeSize; + } + *str++ = data[i]; + } + *str = '\''; + ++escapeSize; + n = bufSize + escapeSize; + break; case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: if (bufSize < 0) { tscError("invalid buf size"); return TSDB_CODE_TSC_INVALID_VALUE; } - *str = '"'; + *str = '\''; memcpy(str + 1, buf, bufSize); - *(str + bufSize + 1) = '"'; + *(str + bufSize + 1) = '\''; n = bufSize + 2; break; @@ -266,17 +287,11 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && - functionId != TSDB_FUNC_ARITHM && + functionId != TSDB_FUNC_SCALAR_EXPR && functionId != TSDB_FUNC_TS_COMP && - functionId != TSDB_FUNC_DIFF && - functionId != TSDB_FUNC_DERIVATIVE && - functionId != TSDB_FUNC_MAVG && - functionId != TSDB_FUNC_CSUM && functionId != TSDB_FUNC_TS_DUMMY && functionId != TSDB_FUNC_TID_TAG && - functionId != TSDB_FUNC_CEIL && - functionId != TSDB_FUNC_FLOOR && - functionId != TSDB_FUNC_ROUND) { + !TSDB_FUNC_IS_SCALAR(functionId)) { return false; } } @@ -313,8 +328,8 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { } if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG && - f != TSDB_FUNC_TS && f != TSDB_FUNC_ARITHM && f != TSDB_FUNC_DIFF && - f != TSDB_FUNC_DERIVATIVE) { + f != TSDB_FUNC_TS && f != TSDB_FUNC_SCALAR_EXPR && f != TSDB_FUNC_DIFF && + f != TSDB_FUNC_DERIVATIVE && !TSDB_FUNC_IS_SCALAR(f)) { return false; } } @@ -379,6 +394,10 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { return true; } +bool tscNeedTableSeqScan(SQueryInfo* pQueryInfo) { + return pQueryInfo->stableQuery && (tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_TWA) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED)); +} + bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { size_t size = tscNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { @@ -395,7 +414,6 @@ bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { return false; } - bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) { if (tscIsProjectionQuery(pQueryInfo)) { return false; @@ -528,7 +546,7 @@ bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) { } int32_t functionId = pExpr->base.functionId; - if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) { + if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_ELAPSED) { return true; } } @@ -660,6 +678,10 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) { continue; } + if (TSDB_FUNC_IS_SCALAR(functionId)) { + continue; + } + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -714,34 +736,33 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } -static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bool convertNchar) { +static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bool convertNchar, bool convertJson) { // generated the user-defined column result if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) { - if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) { + if (pInfo->pExpr->base.param[0].nType == TSDB_DATA_TYPE_NULL) { setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows); } else { if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) { - assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes); + assert(pInfo->pExpr->base.param[0].nLen <= pInfo->field.bytes); for (int32_t k = 0; k < pRes->numOfRows; ++k) { char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; - memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen); - varDataSetLen(p, pInfo->pExpr->base.param[1].nLen); + memcpy(varDataVal(p), pInfo->pExpr->base.param[0].pz, pInfo->pExpr->base.param[0].nLen); + varDataSetLen(p, pInfo->pExpr->base.param[0].nLen); } } else { for (int32_t k = 0; k < pRes->numOfRows; ++k) { char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes; - memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes); + memcpy(p, &pInfo->pExpr->base.param[0].i64, pInfo->field.bytes); } } } - } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { + } else if (convertNchar && (pInfo->field.type == TSDB_DATA_TYPE_NCHAR)) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); - if(buffer == NULL) - return ; + if (buffer == NULL) return; pRes->buffer[i] = buffer; // string terminated char for binary data memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); @@ -765,8 +786,63 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo p += pInfo->field.bytes; } - memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + }else if (pInfo->field.type == TSDB_DATA_TYPE_JSON) { + if (convertJson){ + // convert unicode to native code in a temporary buffer extra one byte for terminated symbol + char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + if (buffer == NULL) return; + pRes->buffer[i] = buffer; + // string terminated char for binary data + memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); + + char* p = pRes->urow[i]; + for (int32_t k = 0; k < pRes->numOfRows; ++k) { + char* dst = pRes->buffer[i] + k * pInfo->field.bytes; + char type = *p; + char* realData = p + CHAR_BYTES; + if (type == TSDB_DATA_TYPE_JSON && isNull(realData, TSDB_DATA_TYPE_JSON)) { + memcpy(dst, realData, varDataTLen(realData)); + } else if (type == TSDB_DATA_TYPE_BINARY) { + assert(*(uint32_t*)varDataVal(realData) == TSDB_DATA_JSON_null); // json null value + assert(varDataLen(realData) == INT_BYTES); + sprintf(varDataVal(dst), "%s", "null"); + varDataSetLen(dst, strlen(varDataVal(dst))); + }else if (type == TSDB_DATA_TYPE_JSON) { + int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst)); + varDataSetLen(dst, length); + if (length == 0) { + tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); + } + }else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value" + *(char*)varDataVal(dst) = '\"'; + int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES)); + *(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"'; + varDataSetLen(dst, length + CHAR_BYTES*2); + if (length == 0) { + tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); + } + }else if (type == TSDB_DATA_TYPE_DOUBLE) { + double jsonVd = *(double*)(realData); + sprintf(varDataVal(dst), "%.9lf", jsonVd); + varDataSetLen(dst, strlen(varDataVal(dst))); + }else if (type == TSDB_DATA_TYPE_BIGINT) { + int64_t jsonVd = *(int64_t*)(realData); + sprintf(varDataVal(dst), "%" PRId64, jsonVd); + varDataSetLen(dst, strlen(varDataVal(dst))); + }else if (type == TSDB_DATA_TYPE_BOOL) { + sprintf(varDataVal(dst), "%s", (*((char *)realData) == 1) ? "true" : "false"); + varDataSetLen(dst, strlen(varDataVal(dst))); + }else { + assert(0); + } + + p += pInfo->field.bytes; + } + memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + }else{ + // if convertJson is false, json data as raw data used for stddev for the second round + } } if (convertNchar) { @@ -774,6 +850,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo } } +void tscJson2String(char *src, char* dst){ + +} + void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted) { assert(pRes->numOfCols > 0); if (pRes->numOfRows == 0) { @@ -787,11 +867,11 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted) { pRes->length[i] = pInfo->field.bytes; offset += pInfo->field.bytes; - setResRawPtrImpl(pRes, pInfo, i, converted ? false : true); + setResRawPtrImpl(pRes, pInfo, i, converted ? false : true, true); } } -void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar) { +void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar, bool convertJson) { assert(pRes->numOfCols > 0); for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { @@ -802,7 +882,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc pRes->urow[i] = pColData->pData; pRes->length[i] = pInfo->field.bytes; - setResRawPtrImpl(pRes, pInfo, i, convertNchar); + setResRawPtrImpl(pRes, pInfo, i, convertNchar, convertJson); /* // generated the user-defined column result if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.ColName.flag)) { @@ -858,17 +938,6 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc } } -static SColumnInfo* extractColumnInfoFromResult(SArray* pTableCols) { - int32_t numOfCols = (int32_t) taosArrayGetSize(pTableCols); - SColumnInfo* pColInfo = calloc(numOfCols, sizeof(SColumnInfo)); - for(int32_t i = 0; i < numOfCols; ++i) { - SColumn* pCol = taosArrayGetP(pTableCols, i); - pColInfo[i] = pCol->info;//[index].type; - } - - return pColInfo; -} - typedef struct SDummyInputInfo { SSDataBlock *block; STableQueryInfo *pTableQueryInfo; @@ -1258,14 +1327,14 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUp return pOperator; } -void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar) { +void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar, bool convertJson) { // set the correct result SSDataBlock* p = pQueryInfo->pQInfo->runtimeEnv.outputBuf; pRes->numOfRows = (p != NULL)? p->info.rows: 0; if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { tscCreateResPointerInfo(pRes, pQueryInfo); - tscSetResRawPtrRv(pRes, pQueryInfo, p, convertNchar); + tscSetResRawPtrRv(pRes, pQueryInfo, p, convertNchar, convertJson); } tscDebug("0x%"PRIx64" retrieve result in pRes, numOfRows:%d", objId, pRes->numOfRows); @@ -1299,8 +1368,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue // handle the following query process if (px->pQInfo == NULL) { - SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList); - STableMeta* pTableMeta = tscGetMetaInfo(px, 0)->pTableMeta; SSchema* pSchema = tscGetTableSchema(pTableMeta); @@ -1415,7 +1482,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue px->pQInfo->runtimeEnv.udfIsCopy = true; px->pQInfo->runtimeEnv.pUdfInfo = pUdfInfo; - tfree(pColumnInfo); tfree(schema); // set the pRuntimeEnv for pSourceOperator @@ -1424,7 +1490,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue uint64_t qId = pSql->self; qTableQuery(px->pQInfo, &qId); - convertQueryResult(pOutput, px, pSql->self, false); + convertQueryResult(pOutput, px, pSql->self, false, false); } static void tscDestroyResPointerInfo(SSqlRes* pRes) { @@ -2000,18 +2066,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI } } else { for (int32_t i = 0; i < numOfRows; ++i) { - char* payload = (blkKeyTuple + i)->payloadAddr; - if (isNeedConvertRow(payload)) { - convertSMemRow(pDataBlock, payload, pTableDataBlock); - TDRowTLenT rowTLen = memRowTLen(pDataBlock); - pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); - pBlock->dataLen += rowTLen; - } else { - TDRowTLenT rowTLen = memRowTLen(payload); - memcpy(pDataBlock, payload, rowTLen); - pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); - pBlock->dataLen += rowTLen; - } + char* payload = (blkKeyTuple + i)->payloadAddr; + TDRowTLenT rowTLen = memRowTLen(payload); + memcpy(pDataBlock, payload, rowTLen); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; } } @@ -3075,12 +3134,12 @@ void tscIncStreamExecutionCount(void* pStream) { ps->num += 1; } -bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams) { +bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId) { if (pTableMetaInfo->pTableMeta == NULL) { return false; } - if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) { + if (colId == TSDB_TBNAME_COLUMN_INDEX || colId <= TSDB_UD_COLUMN_INDEX) { return true; } @@ -3462,6 +3521,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { pQueryInfo->sessionWindow = pSrc->sessionWindow; pQueryInfo->pTableMetaInfo = NULL; pQueryInfo->multigroupResult = pSrc->multigroupResult; + pQueryInfo->stateWindow = pSrc->stateWindow; pQueryInfo->bufLen = pSrc->bufLen; pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery; @@ -4410,6 +4470,8 @@ bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) { char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; } +int32_t tscGetErrorMsgLength(SSqlCmd* pCmd) { return (int32_t)strlen(pCmd->payload); } + /** * If current vnode query does not return results anymore (pRes->numOfRows == 0), try the next vnode if exists, * while multi-vnode super table projection query and the result does not reach the limitation. @@ -4852,7 +4914,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI strncpy(pse->aliasName, pSource->base.aliasName, tListLen(pse->aliasName)); strncpy(pse->token, pSource->base.token, tListLen(pse->token)); - if (pSource->base.functionId != TSDB_FUNC_ARITHM) { // this should be switched to projection query + if (pSource->base.functionId != TSDB_FUNC_SCALAR_EXPR) { // this should be switched to projection query pse->numOfParams = 0; // no params for projection query pse->functionId = TSDB_FUNC_PRJ; pse->colInfo.colId = pSource->base.resColId; @@ -4866,7 +4928,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI } } - pse->colInfo.flag = TSDB_COL_NORMAL; + pse->colInfo.flag = pSource->base.colInfo.flag; //TSDB_COL_NORMAL; pse->resType = pSource->base.resType; pse->resBytes = pSource->base.resBytes; strncpy(pse->colInfo.name, pSource->base.aliasName, tListLen(pse->colInfo.name)); @@ -4896,16 +4958,17 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI pse->colInfo.colId = pSource->base.colInfo.colId; pse->colType = pSource->base.colType; pse->colBytes = pSource->base.colBytes; - pse->resBytes = sizeof(double); - pse->resType = TSDB_DATA_TYPE_DOUBLE; pse->functionId = pSource->base.functionId; pse->numOfParams = pSource->base.numOfParams; for (int32_t j = 0; j < pSource->base.numOfParams; ++j) { tVariantAssign(&pse->param[j], &pSource->base.param[j]); - buildArithmeticExprFromMsg(px, NULL); + buildScalarExprFromMsg(px, NULL); } + + pse->resBytes = px->pExpr->resultBytes; + pse->resType = px->pExpr->resultType; } } @@ -5057,6 +5120,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo); pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); + pQueryAttr->needTableSeqScan = tscNeedTableSeqScan(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); pQueryAttr->distinct = pQueryInfo->distinct; pQueryAttr->sw = pQueryInfo->sessionWindow; @@ -5097,9 +5161,9 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt SExprInfo* pExpr = tscExprGet(pQueryInfo, i); tscExprAssign(&pQueryAttr->pExpr1[i], pExpr); - if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_ARITHM) { + if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_SCALAR_EXPR) { for (int32_t j = 0; j < pQueryAttr->pExpr1[i].base.numOfParams; ++j) { - buildArithmeticExprFromMsg(&pQueryAttr->pExpr1[i], NULL); + buildScalarExprFromMsg(&pQueryAttr->pExpr1[i], NULL); } } } @@ -5371,3 +5435,152 @@ char* cloneCurrentDBName(SSqlObj* pSql) { return p; } +int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, int16_t startColId){ + // set json NULL data + uint8_t nullTypeVal[CHAR_BYTES + VARSTR_HEADER_SIZE + INT_BYTES] = {0}; + uint32_t jsonNULL = TSDB_DATA_JSON_NULL; + int jsonIndex = startColId + 1; + char nullTypeKey[VARSTR_HEADER_SIZE + INT_BYTES] = {0}; + varDataSetLen(nullTypeKey, INT_BYTES); + nullTypeVal[0] = TSDB_DATA_TYPE_JSON; + varDataSetLen(nullTypeVal + CHAR_BYTES, INT_BYTES); + *(uint32_t*)(varDataVal(nullTypeKey)) = jsonNULL; + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, nullTypeKey, false); // add json null type + if (strtrim(json) == 0 || strcasecmp(json, "null") == 0){ + *(uint32_t*)(varDataVal(nullTypeVal + CHAR_BYTES)) = jsonNULL; + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, nullTypeVal, true); // add json null value + return TSDB_CODE_SUCCESS; + } + int32_t jsonNotNull = TSDB_DATA_JSON_NOT_NULL; + *(uint32_t*)(varDataVal(nullTypeVal + CHAR_BYTES)) = jsonNotNull; + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, nullTypeVal, true); // add json type + + // set json real data + cJSON *root = cJSON_Parse(json); + if (root == NULL){ + tscError("json parse error"); + return tscSQLSyntaxErrMsg(errMsg, "json parse error", NULL); + } + + int size = cJSON_GetArraySize(root); + if(!cJSON_IsObject(root)){ + tscError("json error invalide value"); + return tscSQLSyntaxErrMsg(errMsg, "json error invalide value", NULL); + } + + int retCode = 0; + SHashObj* keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false); + for(int i = 0; i < size; i++) { + cJSON* item = cJSON_GetArrayItem(root, i); + if (!item) { + tscError("json inner error:%d", i); + retCode = tscSQLSyntaxErrMsg(errMsg, "json inner error", NULL); + goto end; + } + + char *jsonKey = item->string; + if(!isValidateTag(jsonKey)){ + tscError("json key not validate"); + retCode = tscSQLSyntaxErrMsg(errMsg, "json key not validate", NULL); + goto end; + } + if(strlen(jsonKey) > TSDB_MAX_JSON_KEY_LEN){ + tscError("json key too long error"); + retCode = tscSQLSyntaxErrMsg(errMsg, "json key too long, more than 256", NULL); + goto end; + } + if(strlen(jsonKey) == 0 || taosHashGet(keyHash, jsonKey, strlen(jsonKey)) != NULL){ + continue; + } + + // json key encode by binary + char tagKey[TSDB_MAX_JSON_KEY_LEN + VARSTR_HEADER_SIZE] = {0}; + strncpy(varDataVal(tagKey), jsonKey, strlen(jsonKey)); + int32_t outLen = (int32_t)strlen(jsonKey); + taosHashPut(keyHash, jsonKey, outLen, &outLen, CHAR_BYTES); // add key to hash to remove dumplicate, value is useless + + varDataSetLen(tagKey, outLen); + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, tagKey, false); // add json key + + if(item->type == cJSON_String){ // add json value format: type|data + char *jsonValue = item->valuestring; + outLen = 0; + char tagVal[TSDB_MAX_JSON_TAGS_LEN] = {0}; + *tagVal = jsonType2DbType(0, item->type); // type + char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); + if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), + TSDB_MAX_JSON_TAGS_LEN - CHAR_BYTES - VARSTR_HEADER_SIZE, &outLen)) { + tscError("json string error:%s|%s", strerror(errno), jsonValue); + retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL); + goto end; + } + + varDataSetLen(tagData, outLen); + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, tagVal, true); + }else if(item->type == cJSON_Number){ + char tagVal[LONG_BYTES + CHAR_BYTES] = {0}; + *tagVal = jsonType2DbType(item->valuedouble, item->type); // type + char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); + if(*tagVal == TSDB_DATA_TYPE_DOUBLE) *((double *)tagData) = item->valuedouble; + else if(*tagVal == TSDB_DATA_TYPE_BIGINT) *((int64_t *)tagData) = item->valueint; + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_BIGINT, tagVal, true); + }else if(item->type == cJSON_True || item->type == cJSON_False){ + char tagVal[CHAR_BYTES + CHAR_BYTES] = {0}; + *tagVal = jsonType2DbType((double)(item->valueint), item->type); // type + char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); + *tagData = (char)(item->valueint); + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_BOOL, tagVal, true); + }else if(item->type == cJSON_NULL){ + char tagVal[CHAR_BYTES + VARSTR_HEADER_SIZE + INT_BYTES] = {0}; + *tagVal = jsonType2DbType(0, item->type); // type + int32_t* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); + varDataSetLen(tagData, INT_BYTES); + *(uint32_t*)(varDataVal(tagData)) = TSDB_DATA_JSON_null; + tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_BINARY, tagVal, true); + } + else{ + retCode = tscSQLSyntaxErrMsg(errMsg, "invalidate json value", NULL); + goto end; + } + } + + if(taosHashGetSize(keyHash) == 0){ // set json NULL true + *(uint32_t*)(varDataVal(nullTypeVal + CHAR_BYTES)) = jsonNULL; + memcpy(POINTER_SHIFT(kvRowBuilder->buf, kvRowBuilder->pColIdx[2].offset), nullTypeVal, CHAR_BYTES + VARSTR_HEADER_SIZE + INT_BYTES); + } + +end: + taosHashCleanup(keyHash); + cJSON_Delete(root); + return retCode; +} + +int8_t jsonType2DbType(double data, int jsonType){ + switch(jsonType){ + case cJSON_Number: + if (data - (int64_t)data == 0) return TSDB_DATA_TYPE_BIGINT; else return TSDB_DATA_TYPE_DOUBLE; + case cJSON_String: + return TSDB_DATA_TYPE_NCHAR; + case cJSON_NULL: + return TSDB_DATA_TYPE_BINARY; + case cJSON_True: + case cJSON_False: + return TSDB_DATA_TYPE_BOOL; + } + return TSDB_DATA_TYPE_NULL; +} + +// get key from json->'key' +void getJsonKey(SStrToken *t0){ + while(true){ + t0->n = tGetToken(t0->z, &t0->type); + if (t0->type == TK_STRING){ + t0->z++; + t0->n -= 2; + break; + }else if (t0->type == TK_ILLEGAL){ + assert(0); + } + t0->z += t0->n; + } +} diff --git a/src/common/inc/tarithoperator.h b/src/common/inc/tarithoperator.h index 27e8871e2f7b32bf7772dfa0cbc9c3164a312f2d..38c9feed466e5d326c21acbd0f0775ddee61562a 100644 --- a/src/common/inc/tarithoperator.h +++ b/src/common/inc/tarithoperator.h @@ -25,6 +25,7 @@ typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t l _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr); + #ifdef __cplusplus } #endif diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 3a5b49e9eee004f8a93121653781c23b5fd347bf..cb001becd45271989586b0d45d4f91c36bbba5aa 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -448,13 +448,14 @@ typedef struct { #define kvRowSetNCols(r, n) kvRowNCols(r) = (n) #define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE) #define kvRowValues(r) POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * kvRowNCols(r)) +#define kvRowKeys(r) POINTER_SHIFT(r, *(uint16_t *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(int16_t))) #define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r)) #define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset) #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) #define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) -#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) +#define kvRowTKey(r) (*(TKEY *)(kvRowKeys(r))) #define kvRowKey(r) tdGetKey(kvRowTKey(r)) #define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) @@ -546,7 +547,7 @@ void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder); void tdResetKVRowBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); -static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { +static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value, bool isJumpJsonVType) { if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); @@ -559,9 +560,14 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, pBuilder->nCols++; - int tlen = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; + char* jumpType = (char*)value; + if(isJumpJsonVType) jumpType += CHAR_BYTES; + int tlen = IS_VAR_DATA_TYPE(type) ? varDataTLen(jumpType) : TYPE_BYTES[type]; + if(isJumpJsonVType) tlen += CHAR_BYTES; // add type size + if (tlen > pBuilder->alloc - pBuilder->size) { while (tlen > pBuilder->alloc - pBuilder->size) { + assert(pBuilder->alloc > 0); pBuilder->alloc *= 2; } void* buf = realloc(pBuilder->buf, pBuilder->alloc); @@ -608,22 +614,17 @@ typedef void *SMemRow; #define SMEM_ROW_DATA 0x0U // SDataRow #define SMEM_ROW_KV 0x01U // SKVRow -#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag -#define KVRatioKV (0.2f) // all bool -#define KVRatioPredict (0.4f) -#define KVRatioData (0.75f) // all bigint #define KVRatioConvert (0.9f) #define memRowType(r) ((*(uint8_t *)(r)) & 0x01) #define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory -#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit #define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) #define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) -#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT) +#define isUtilizeKVRow(k, d) ((k) < ((d)*KVRatioConvert)) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ @@ -652,7 +653,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) { #define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) #define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version #define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v)) -#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r))) +#define memRowKeys(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowKeys(memRowKvBody(r))) #define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r))) #define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r))) diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index bfeb3a6dfeee22f793c82748611c28ec537e8825..311f65073367e2b2a2c5e436b4655cf12ef4c02a 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -41,6 +41,55 @@ struct SSchema; #define QUERY_COND_REL_PREFIX_MATCH_LEN 6 #define QUERY_COND_REL_PREFIX_NMATCH_LEN 7 +#define TSDB_FUNC_FLAG_SCALAR 0x4000 +#define TSDB_FUNC_IS_SCALAR(id) ((((id) > 0)) && (((id) & TSDB_FUNC_FLAG_SCALAR) != 0)) +#define TSDB_FUNC_SCALAR_INDEX(id) ((id) & ~TSDB_FUNC_FLAG_SCALAR) + +/////////////////////////////////////////// +// SCALAR FUNCTIONS +#define TSDB_FUNC_SCALAR_POW (TSDB_FUNC_FLAG_SCALAR | 0x0000) +#define TSDB_FUNC_SCALAR_LOG (TSDB_FUNC_FLAG_SCALAR | 0x0001) +#define TSDB_FUNC_SCALAR_ABS (TSDB_FUNC_FLAG_SCALAR | 0x0002) +#define TSDB_FUNC_SCALAR_ACOS (TSDB_FUNC_FLAG_SCALAR | 0x0003) +#define TSDB_FUNC_SCALAR_ASIN (TSDB_FUNC_FLAG_SCALAR | 0x0004) +#define TSDB_FUNC_SCALAR_ATAN (TSDB_FUNC_FLAG_SCALAR | 0x0005) +#define TSDB_FUNC_SCALAR_COS (TSDB_FUNC_FLAG_SCALAR | 0x0006) +#define TSDB_FUNC_SCALAR_SIN (TSDB_FUNC_FLAG_SCALAR | 0x0007) +#define TSDB_FUNC_SCALAR_TAN (TSDB_FUNC_FLAG_SCALAR | 0x0008) +#define TSDB_FUNC_SCALAR_SQRT (TSDB_FUNC_FLAG_SCALAR | 0x0009) +#define TSDB_FUNC_SCALAR_CEIL (TSDB_FUNC_FLAG_SCALAR | 0x000A) +#define TSDB_FUNC_SCALAR_FLOOR (TSDB_FUNC_FLAG_SCALAR | 0x000B) +#define TSDB_FUNC_SCALAR_ROUND (TSDB_FUNC_FLAG_SCALAR | 0x000C) +#define TSDB_FUNC_SCALAR_CONCAT (TSDB_FUNC_FLAG_SCALAR | 0x000D) +#define TSDB_FUNC_SCALAR_LENGTH (TSDB_FUNC_FLAG_SCALAR | 0x000E) +#define TSDB_FUNC_SCALAR_CONCAT_WS (TSDB_FUNC_FLAG_SCALAR | 0x000F) +#define TSDB_FUNC_SCALAR_CHAR_LENGTH (TSDB_FUNC_FLAG_SCALAR | 0x0010) +#define TSDB_FUNC_SCALAR_CAST (TSDB_FUNC_FLAG_SCALAR | 0x0011) +#define TSDB_FUNC_SCALAR_MAX_NUM 18 + +#define TSDB_FUNC_SCALAR_NAME_MAX_LEN 16 + +typedef struct { + int16_t type; + int16_t bytes; + int16_t numOfRows; + char* data; +} tExprOperandInfo; + +typedef void (*_expr_scalar_function_t)(int16_t functionId, tExprOperandInfo* pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order); + +_expr_scalar_function_t getExprScalarFunction(uint16_t scalar); + +typedef struct tScalarFunctionInfo{ + int16_t functionId; // scalar function id & ~TSDB_FUNC_FLAG_SCALAR == index + char name[TSDB_FUNC_SCALAR_NAME_MAX_LEN]; + _expr_scalar_function_t scalarFunc; +} tScalarFunctionInfo; + +/* global scalar sql functions array */ +extern struct tScalarFunctionInfo aScalarFunctions[TSDB_FUNC_SCALAR_MAX_NUM]; + + typedef bool (*__result_filter_fn_t)(const void *, void *); typedef void (*__do_filter_suppl_fn_t)(void *, void *); @@ -49,6 +98,8 @@ enum { TSQL_NODE_EXPR = 0x1, TSQL_NODE_COL = 0x2, TSQL_NODE_VALUE = 0x4, + TSQL_NODE_FUNC = 0x8, + TSQL_NODE_TYPE = 0x10 }; /** @@ -74,8 +125,19 @@ typedef struct tExprNode { } _node; struct SSchema *pSchema; + tVariant *pVal; + + struct { + int16_t functionId; + int32_t numChildren; + struct tExprNode **pChildren; + } _func; + + TAOS_FIELD *pType; }; + int16_t resultType; + int16_t resultBytes; } tExprNode; typedef struct SExprTraverseSupp { @@ -86,6 +148,8 @@ typedef struct SExprTraverseSupp { void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)); +int32_t exprTreeValidateTree(char* msgbuf, tExprNode *pExpr); + void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); tExprNode* exprTreeFromBinary(const void* data, size_t size); tExprNode* exprdup(tExprNode* pTree); @@ -94,8 +158,8 @@ void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param); -void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, - char *(*cb)(void *, const char*, int32_t)); +void exprTreeNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)); void buildFilterSetFromBinary(void **q, const char *buf, int32_t len); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 317d48ea5987935c5d53af6ad578834071643f26..cde5eab48783351d4bd8c00be9008d52b5bf6561 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -42,6 +42,7 @@ extern int8_t tsArbOnline; extern int64_t tsArbOnlineTimestamp; extern int32_t tsDnodeId; extern int64_t tsDnodeStartTime; +extern int8_t tsDnodeNopLoop; // common extern int tsRpcTimer; @@ -237,6 +238,7 @@ extern int8_t tsDeadLockKillQuery; // schemaless extern char tsDefaultJSONStrType[]; extern char tsSmlChildTableName[]; +extern char tsSmlTagNullName[]; typedef struct { diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h index 03b17bdc463d6b5d0f812eafa723c886964d35fc..23fd601090bc24606e22b7bc99f7fbf52bd4ac34 100644 --- a/src/common/inc/tvariant.h +++ b/src/common/inc/tvariant.h @@ -25,7 +25,7 @@ extern "C" { // variant, each number/string/field_id has a corresponding struct during parsing sql typedef struct tVariant { - uint32_t nType; + int32_t nType; // change uint to int, because in tVariantCreate() pVar->nType = -1; // -1 means error type int32_t nLen; // only used for string, for number, it is useless union { int64_t i64; @@ -39,7 +39,9 @@ typedef struct tVariant { bool tVariantIsValid(tVariant *pVar); -void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape); +void tVariantCreate(tVariant *pVar, SStrToken *token); + +void tVariantCreateExt(tVariant *pVar, SStrToken *token, int32_t optrType, bool needRmquoteEscape); void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32_t type); diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index 000ef79fcf9b5ee9e52dae65b99f719cec6a8059..31c7e32773965c866f069d04910cbbc59c187762 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -19,6 +19,7 @@ #include "tutil.h" #include "tarithoperator.h" #include "tcompare.h" +#include "texpr.h" //GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index cc2bb8803badc2aae2e80200691be0439bac3afe..63a19c5c1b74028b8536982b0b2445c06de121b4 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -28,6 +28,129 @@ #include "texpr.h" #include "tarithoperator.h" +static int32_t exprValidateMathNode(tExprNode *pExpr); +static int32_t exprValidateStringConcatNode(tExprNode *pExpr); +static int32_t exprValidateStringConcatWsNode(tExprNode *pExpr); +static int32_t exprValidateStringLengthNode(tExprNode *pExpr); +static int32_t exprValidateCastNode(char* msgbuf, tExprNode *pExpr); + +static int32_t exprInvalidOperationMsg(char *msgbuf, const char *msg) { + const char* msgFormat = "invalid operation: %s"; + + sprintf(msgbuf, msgFormat, msg); + + return TSDB_CODE_TSC_INVALID_OPERATION; +} + + + +int32_t exprTreeValidateFunctionNode(char* msgbuf, tExprNode *pExpr) { + int32_t code = TSDB_CODE_SUCCESS; + //TODO: check childs for every function + switch (pExpr->_func.functionId) { + case TSDB_FUNC_SCALAR_POW: + case TSDB_FUNC_SCALAR_LOG: + case TSDB_FUNC_SCALAR_ABS: + case TSDB_FUNC_SCALAR_ACOS: + case TSDB_FUNC_SCALAR_ASIN: + case TSDB_FUNC_SCALAR_ATAN: + case TSDB_FUNC_SCALAR_COS: + case TSDB_FUNC_SCALAR_SIN: + case TSDB_FUNC_SCALAR_TAN: + case TSDB_FUNC_SCALAR_SQRT: + case TSDB_FUNC_SCALAR_CEIL: + case TSDB_FUNC_SCALAR_FLOOR: + case TSDB_FUNC_SCALAR_ROUND: { + return exprValidateMathNode(pExpr); + } + case TSDB_FUNC_SCALAR_CONCAT: { + return exprValidateStringConcatNode(pExpr); + } + case TSDB_FUNC_SCALAR_LENGTH: + case TSDB_FUNC_SCALAR_CHAR_LENGTH: { + return exprValidateStringLengthNode(pExpr); + } + case TSDB_FUNC_SCALAR_CAST: { + return exprValidateCastNode(msgbuf, pExpr); + } + case TSDB_FUNC_SCALAR_CONCAT_WS: { + return exprValidateStringConcatWsNode(pExpr); + } + + default: + break; + } + return code; +} + +int32_t exprTreeValidateExprNode(tExprNode *pExpr) { + if (pExpr->_node.optr == TSDB_BINARY_OP_ADD || pExpr->_node.optr == TSDB_BINARY_OP_SUBTRACT || + pExpr->_node.optr == TSDB_BINARY_OP_MULTIPLY || pExpr->_node.optr == TSDB_BINARY_OP_DIVIDE || + pExpr->_node.optr == TSDB_BINARY_OP_REMAINDER) { + int16_t leftType = pExpr->_node.pLeft->resultType; + int16_t rightType = pExpr->_node.pRight->resultType; + if (!IS_NUMERIC_TYPE(leftType) || !IS_NUMERIC_TYPE(rightType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + pExpr->resultType = TSDB_DATA_TYPE_DOUBLE; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_SUCCESS; + } +} + +int32_t exprTreeValidateTree(char* msgbuf, tExprNode *pExpr) { + int32_t code = TSDB_CODE_SUCCESS; + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + if (pExpr->nodeType == TSQL_NODE_VALUE) { + pExpr->resultType = pExpr->pVal->nType; + if (!IS_VAR_DATA_TYPE(pExpr->pVal->nType)) { + pExpr->resultBytes = tDataTypes[pExpr->pVal->nType].bytes; + } else { + pExpr->resultBytes = (int16_t)(pExpr->pVal->nLen + VARSTR_HEADER_SIZE); + } + } else if (pExpr->nodeType == TSQL_NODE_COL) { + pExpr->resultType = pExpr->pSchema->type; + if (pExpr->pSchema->colId != TSDB_TBNAME_COLUMN_INDEX) { + pExpr->resultBytes = pExpr->pSchema->bytes; + } else { + pExpr->resultBytes = tGetTbnameColumnSchema()->bytes; + } + } else if (pExpr->nodeType == TSQL_NODE_EXPR) { + code = exprTreeValidateTree(msgbuf, pExpr->_node.pLeft); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + code = exprTreeValidateTree(msgbuf, pExpr->_node.pRight); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + code = exprTreeValidateExprNode(pExpr); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } else if (pExpr->nodeType == TSQL_NODE_FUNC) { + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + code = exprTreeValidateTree(msgbuf, pExpr->_func.pChildren[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + code = exprTreeValidateFunctionNode(msgbuf, pExpr); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } else if (pExpr->nodeType == TSQL_NODE_TYPE) { + pExpr->resultType = pExpr->pType->type; + pExpr->resultBytes = pExpr->pType->bytes; + } + + return TSDB_CODE_SUCCESS; +} + static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) { if (pLeft->nodeType == TSQL_NODE_COL) { // if left node is the primary column,return true @@ -41,7 +164,7 @@ static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, co } } -static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOfRows) { +static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOfRows, int16_t colSize) { switch(type) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_UTINYINT:{ @@ -102,6 +225,13 @@ static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOf } return; } + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR:{ + for(int32_t i = 0; i < numOfRows; ++i) { + memcpy(dest + i * colSize, src + (numOfRows - i - 1) * colSize, colSize); + } + return; + } default: assert(0); } } @@ -117,11 +247,16 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) { doExprTreeDestroy(&pNode, fp); } else if (pNode->nodeType == TSQL_NODE_VALUE) { tVariantDestroy(pNode->pVal); + tfree(pNode->pVal); } else if (pNode->nodeType == TSQL_NODE_COL) { tfree(pNode->pSchema); + } else if (pNode->nodeType == TSQL_NODE_FUNC) { + doExprTreeDestroy(&pNode, fp); + } else if (pNode->nodeType == TSQL_NODE_TYPE) { + tfree(pNode->pType); } - free(pNode); + tfree(pNode); } static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { @@ -138,12 +273,19 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { } } else if ((*pExpr)->nodeType == TSQL_NODE_VALUE) { tVariantDestroy((*pExpr)->pVal); - free((*pExpr)->pVal); + tfree((*pExpr)->pVal); } else if ((*pExpr)->nodeType == TSQL_NODE_COL) { free((*pExpr)->pSchema); + } else if ((*pExpr)->nodeType == TSQL_NODE_FUNC) { + for (int i = 0; i < (*pExpr)->_func.numChildren; ++i) { + doExprTreeDestroy((*pExpr)->_func.pChildren + i, fp); + } + free((*pExpr)->_func.pChildren); + } else if ((*pExpr)->nodeType == TSQL_NODE_TYPE) { + tfree((*pExpr)->pType); } - free(*pExpr); + tfree(*pExpr); *pExpr = NULL; } @@ -174,111 +316,184 @@ bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp return param->nodeFilterFn(pItem, pExpr->_node.info); } -void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, - char *(*getSourceDataBlock)(void *, const char*, int32_t)) { - if (pExprs == NULL) { - return; - } +void exprTreeExprNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)); - tExprNode *pLeft = pExprs->_node.pLeft; - tExprNode *pRight = pExprs->_node.pRight; +void exprTreeFunctionNodeTraverse(tExprNode *pExprs, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)); +void exprTreeInternalNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)); - /* the left output has result from the left child syntax tree */ - char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows); - if (pLeft->nodeType == TSQL_NODE_EXPR) { - arithmeticTreeTraverse(pLeft, numOfRows, pLeftOutput, param, order, getSourceDataBlock); +void exprTreeNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void*, const char*, int32_t)) { + char* pOutput = output->data; + if (pExpr->nodeType == TSQL_NODE_FUNC || pExpr->nodeType == TSQL_NODE_EXPR) { + exprTreeInternalNodeTraverse(pExpr, numOfRows, output, param, order, getSourceDataBlock); + } else if (pExpr->nodeType == TSQL_NODE_COL) { + char *pInputData = getSourceDataBlock(param, pExpr->pSchema->name, pExpr->pSchema->colId); + if (order == TSDB_ORDER_DESC) { + reverseCopy(pOutput, pInputData, pExpr->pSchema->type, numOfRows, pExpr->pSchema->bytes); + } else { + memcpy(pOutput, pInputData, pExpr->pSchema->bytes*numOfRows); + } + assert(pExpr->resultType == pExpr->pSchema->type && pExpr->pSchema->bytes == pExpr->resultBytes); + output->numOfRows = numOfRows; + } else if (pExpr->nodeType == TSQL_NODE_VALUE) { + tVariantDump(pExpr->pVal, pOutput, pExpr->resultType, true); + output->numOfRows = 1; } +} - /* the right output has result from the right child syntax tree */ - char *pRightOutput = malloc(sizeof(int64_t) * numOfRows); - char *pdata = malloc(sizeof(int64_t) * numOfRows); +void exprTreeInternalNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)) { + if (pExpr->nodeType == TSQL_NODE_FUNC) { + exprTreeFunctionNodeTraverse(pExpr, numOfRows, output, param, order, getSourceDataBlock); + } else if (pExpr->nodeType == TSQL_NODE_EXPR){ + exprTreeExprNodeTraverse(pExpr, numOfRows, output, param, order, getSourceDataBlock); + } +} - if (pRight->nodeType == TSQL_NODE_EXPR) { - arithmeticTreeTraverse(pRight, numOfRows, pRightOutput, param, order, getSourceDataBlock); +void exprTreeFunctionNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)) { + int32_t numChildren = pExpr->_func.numChildren; + if (numChildren == 0) { + _expr_scalar_function_t scalarFn = getExprScalarFunction(pExpr->_func.functionId); + output->type = pExpr->resultType; + output->bytes = pExpr->resultBytes; + output->numOfRows = numOfRows; + scalarFn(pExpr->_func.functionId, NULL, 0, output, order); + return; } - if (pLeft->nodeType == TSQL_NODE_EXPR) { - if (pRight->nodeType == TSQL_NODE_EXPR) { - /* - * exprLeft + exprRight - * the type of returned value of one expression is always double float precious - */ - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); - OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC); + char** pChildrenOutput = calloc(numChildren, sizeof(char*)); + tExprOperandInfo* pChildrenResults = calloc(numChildren, sizeof(tExprOperandInfo)); - } else if (pRight->nodeType == TSQL_NODE_COL) { // exprLeft + columnRight - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); + tExprOperandInfo* pInputs = calloc(numChildren, sizeof(tExprOperandInfo)); + for (int i = 0; i < numChildren; ++i) { + tExprNode *pChild = pExpr->_func.pChildren[i]; + pInputs[i].type = pChild->resultType; + pInputs[i].bytes = pChild->resultBytes; + } - // set input buffer - char *pInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId); + for (int i = 0; i < numChildren; ++i) { + tExprNode *pChild = pExpr->_func.pChildren[i]; + if (pChild->nodeType == TSQL_NODE_EXPR || pChild->nodeType == TSQL_NODE_FUNC) { + pChildrenOutput[i] = malloc(pChild->resultBytes * numOfRows); + pChildrenResults[i].data = pChildrenOutput[i]; + exprTreeInternalNodeTraverse(pChild, numOfRows, pChildrenResults+i, param, order, getSourceDataBlock); + pInputs[i].data = pChildrenOutput[i]; + pInputs[i].numOfRows = pChildrenResults[i].numOfRows; + } else if (pChild->nodeType == TSQL_NODE_COL) { + assert(pChild->resultType == pChild->pSchema->type && pChild->resultBytes == pChild->pSchema->bytes); + char *pInputData = getSourceDataBlock(param, pChild->pSchema->name, pChild->pSchema->colId); if (order == TSDB_ORDER_DESC) { - reverseCopy(pdata, pInputData, pRight->pSchema->type, numOfRows); - OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pdata, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC); + pChildrenOutput[i] = malloc(pChild->pSchema->bytes * numOfRows); + reverseCopy(pChildrenOutput[i], pInputData, pChild->pSchema->type, numOfRows, pChild->pSchema->bytes); + pInputs[i].data = pChildrenOutput[i]; } else { - OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pInputData, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC); + pInputs[i].data = pInputData; } - - } else if (pRight->nodeType == TSQL_NODE_VALUE) { // exprLeft + 12 - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); - OperatorFn(pLeftOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC); + pInputs[i].numOfRows = (int16_t)numOfRows; + } else if (pChild->nodeType == TSQL_NODE_VALUE) { + pChildrenOutput[i] = malloc(pChild->resultBytes); + tVariantDump(pChild->pVal, pChildrenOutput[i], pChild->resultType, true); + pInputs[i].data = pChildrenOutput[i]; + pInputs[i].numOfRows = 1; } - } else if (pLeft->nodeType == TSQL_NODE_COL) { - // column data specified on left-hand-side - char *pLeftInputData = getSourceDataBlock(param, pLeft->pSchema->name, pLeft->pSchema->colId); - if (pRight->nodeType == TSQL_NODE_EXPR) { // columnLeft + expr2 - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); + } - if (order == TSDB_ORDER_DESC) { - reverseCopy(pdata, pLeftInputData, pLeft->pSchema->type, numOfRows); - OperatorFn(pdata, numOfRows, pLeft->pSchema->type, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC); - } else { - OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC); - } + _expr_scalar_function_t scalarFn = getExprScalarFunction(pExpr->_func.functionId); + output->type = pExpr->resultType; + output->bytes = pExpr->resultBytes; + output->numOfRows = (int16_t)numOfRows; + scalarFn(pExpr->_func.functionId, pInputs, numChildren, output, TSDB_ORDER_ASC); - } else if (pRight->nodeType == TSQL_NODE_COL) { // columnLeft + columnRight - // column data specified on right-hand-side - char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId); - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); + tfree(pChildrenResults); + for (int i = 0; i < numChildren; ++i) { + tfree(pChildrenOutput[i]); + } + tfree(pInputs); + tfree(pChildrenOutput); +} - // both columns are descending order, do not reverse the source data - OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, pRightInputData, numOfRows, pRight->pSchema->type, pOutput, order); - } else if (pRight->nodeType == TSQL_NODE_VALUE) { // columnLeft + 12 - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); +void exprTreeExprNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, const char*, int32_t)) { - if (order == TSDB_ORDER_DESC) { - reverseCopy(pdata, pLeftInputData, pLeft->pSchema->type, numOfRows); - OperatorFn(pdata, numOfRows, pLeft->pSchema->type, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC); - } else { - OperatorFn(pLeftInputData, numOfRows, pLeft->pSchema->type, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC); - } + tExprNode *pLeft = pExpr->_node.pLeft; + tExprNode *pRight = pExpr->_node.pRight; + char *ltmp = NULL, *rtmp = NULL; + char *leftIn = NULL, *rightIn = NULL; + int32_t leftNum = 0, rightNum = 0; + int32_t leftType = 0, rightType = 0; + int32_t fnOrder = TSDB_ORDER_ASC; + + if (pLeft->nodeType == TSQL_NODE_EXPR || pLeft->nodeType == TSQL_NODE_FUNC) { + ltmp = (char*)malloc(sizeof(int64_t) * numOfRows); + tExprOperandInfo left; + left.data = ltmp; + exprTreeInternalNodeTraverse(pLeft, numOfRows, &left, param, order, getSourceDataBlock); + + leftIn = ltmp; + leftType = left.type; + leftNum = left.numOfRows; + } else if (pLeft->nodeType == TSQL_NODE_COL) { + char *pInputData = getSourceDataBlock(param, pLeft->pSchema->name, pLeft->pSchema->colId); + if (order == TSDB_ORDER_DESC && (pRight->nodeType != TSQL_NODE_COL)) { + ltmp = malloc(sizeof(int64_t) * numOfRows); + reverseCopy(ltmp, pInputData, pLeft->pSchema->type, numOfRows, pLeft->pSchema->bytes); + leftIn = ltmp; + } else { + leftIn = pInputData; + fnOrder = order; } - } else { - // column data specified on left-hand-side - if (pRight->nodeType == TSQL_NODE_EXPR) { // 12 + expr2 - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); - OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pRightOutput, numOfRows, TSDB_DATA_TYPE_DOUBLE, pOutput, TSDB_ORDER_ASC); - } else if (pRight->nodeType == TSQL_NODE_COL) { // 12 + columnRight - // column data specified on right-hand-side - char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId); - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); - - if (order == TSDB_ORDER_DESC) { - reverseCopy(pdata, pRightInputData, pRight->pSchema->type, numOfRows); - OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pdata, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC); - } else { - OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, pRightInputData, numOfRows, pRight->pSchema->type, pOutput, TSDB_ORDER_ASC); - } + leftType = pLeft->pSchema->type; + leftNum = numOfRows; + } else { + assert(pLeft->nodeType == TSQL_NODE_VALUE); + leftIn = (char *)&pLeft->pVal->i64; + leftType = pLeft->pVal->nType; + leftNum = 1; + } - } else if (pRight->nodeType == TSQL_NODE_VALUE) { // 12 + 12 - _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExprs->_node.optr); - OperatorFn(&pLeft->pVal->i64, 1, pLeft->pVal->nType, &pRight->pVal->i64, 1, pRight->pVal->nType, pOutput, TSDB_ORDER_ASC); + if (pRight->nodeType == TSQL_NODE_EXPR || pRight->nodeType == TSQL_NODE_FUNC) { + rtmp = (char*)malloc(sizeof(int64_t) * numOfRows); + tExprOperandInfo right; + right.data = rtmp; + exprTreeInternalNodeTraverse(pRight, numOfRows, &right, param, order, getSourceDataBlock); + + rightIn = rtmp; + rightType = right.type; + rightNum = right.numOfRows; + } else if (pRight->nodeType == TSQL_NODE_COL) { + char *pInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->pSchema->colId); + if (order == TSDB_ORDER_DESC && (pLeft->nodeType != TSQL_NODE_COL)) { + rtmp = malloc(sizeof(int64_t) * numOfRows); + reverseCopy(rtmp, pInputData, pRight->pSchema->type, numOfRows, pRight->pSchema->bytes); + rightIn = rtmp; + } else { + rightIn = pInputData; + fnOrder = order; } + + rightType = pRight->pSchema->type; + rightNum = numOfRows; + } else { + assert(pRight->nodeType == TSQL_NODE_VALUE); + rightIn = (char *)&pRight->pVal->i64; + rightType = pRight->pVal->nType; + rightNum = 1; } - tfree(pdata); - tfree(pLeftOutput); - tfree(pRightOutput); + _arithmetic_operator_fn_t OperatorFn = getArithmeticOperatorFn(pExpr->_node.optr); + OperatorFn(leftIn, leftNum, leftType, rightIn, rightNum, rightType, output->data, fnOrder); + + output->numOfRows = MAX(leftNum, rightNum); + output->type = TSDB_DATA_TYPE_DOUBLE; + output->bytes = tDataTypes[output->type].bytes; + + tfree(ltmp); + tfree(rtmp); } static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) { @@ -288,7 +503,7 @@ static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) { tVariant* pVal = expr->pVal; tbufWriteUint32(bw, pVal->nType); - if (pVal->nType == TSDB_DATA_TYPE_BINARY) { + if (pVal->nType == TSDB_DATA_TYPE_BINARY || pVal->nType == TSDB_DATA_TYPE_NCHAR) { tbufWriteInt32(bw, pVal->nLen); tbufWrite(bw, pVal->pz, pVal->nLen); } else { @@ -307,7 +522,15 @@ static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) { tbufWriteUint8(bw, expr->_node.hasPK); exprTreeToBinaryImpl(bw, expr->_node.pLeft); exprTreeToBinaryImpl(bw, expr->_node.pRight); + } else if (expr->nodeType == TSQL_NODE_FUNC) { + tbufWriteInt16(bw, expr->_func.functionId); + tbufWriteInt32(bw, expr->_func.numChildren); + for (int i = 0; i < expr->_func.numChildren; ++i) { + exprTreeToBinaryImpl(bw, expr->_func.pChildren[i]); + } } + tbufWriteInt16(bw, expr->resultType); + tbufWriteInt16(bw, expr->resultBytes); } void exprTreeToBinary(SBufferWriter* bw, tExprNode* expr) { @@ -349,7 +572,7 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) { pExpr->pVal = pVal; pVal->nType = tbufReadUint32(br); - if (pVal->nType == TSDB_DATA_TYPE_BINARY) { + if (pVal->nType == TSDB_DATA_TYPE_BINARY || pVal->nType == TSDB_DATA_TYPE_NCHAR) { tbufReadToBuffer(br, &pVal->nLen, sizeof(pVal->nLen)); pVal->pz = calloc(1, pVal->nLen + 1); tbufReadToBuffer(br, pVal->pz, pVal->nLen); @@ -372,8 +595,16 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) { pExpr->_node.pLeft = exprTreeFromBinaryImpl(br); pExpr->_node.pRight = exprTreeFromBinaryImpl(br); assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL); + } else if (pExpr->nodeType == TSQL_NODE_FUNC) { + pExpr->_func.functionId = tbufReadInt16(br); + pExpr->_func.numChildren = tbufReadInt32(br); + pExpr->_func.pChildren = (tExprNode**)calloc(pExpr->_func.numChildren, sizeof(tExprNode*)); + for (int i = 0; i < pExpr->_func.numChildren; ++i) { + pExpr->_func.pChildren[i] = exprTreeFromBinaryImpl(br); + } } - + pExpr->resultType = tbufReadInt16(br); + pExpr->resultBytes = tbufReadInt16(br); CLEANUP_EXECUTE_TO(anchor, false); return pExpr; } @@ -620,9 +851,854 @@ tExprNode* exprdup(tExprNode* pNode) { } else if (pNode->nodeType == TSQL_NODE_COL) { pCloned->pSchema = calloc(1, sizeof(SSchema)); *pCloned->pSchema = *pNode->pSchema; - } - + } else if (pNode->nodeType == TSQL_NODE_FUNC) { + pCloned->_func.functionId = pNode->_func.functionId; + pCloned->_func.numChildren = pNode->_func.numChildren; + pCloned->_func.pChildren = calloc(pNode->_func.numChildren, sizeof(tExprNode*)); + for (int i = 0; i < pNode->_func.numChildren; ++i) { + pCloned->_func.pChildren[i] = exprdup(pNode->_func.pChildren[i]); + } + } else if (pNode->nodeType == TSQL_NODE_TYPE) { + pCloned->pType = calloc(1, sizeof(TAOS_FIELD)); + *pCloned->pType = *pNode->pType; + } + pCloned->nodeType = pNode->nodeType; + pCloned->resultType = pNode->resultType; + pCloned->resultBytes = pNode->resultBytes; return pCloned; } +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// scalar functions +int32_t exprValidateStringConcatNode(tExprNode *pExpr) { + if (pExpr->_func.numChildren < 2 || pExpr->_func.numChildren > 8) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + int16_t prevResultType = TSDB_DATA_TYPE_NULL; + int16_t resultType = TSDB_DATA_TYPE_NULL; + bool resultTypeDeduced = false; + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (child->nodeType != TSQL_NODE_VALUE) { + resultType = child->resultType; + if (!IS_VAR_DATA_TYPE(resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if (!resultTypeDeduced) { + resultTypeDeduced = true; + } else { + if (resultType != prevResultType) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + prevResultType = child->resultType; + } else { + if (!IS_VAR_DATA_TYPE(child->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + } + + if (resultTypeDeduced) { + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (child->nodeType == TSQL_NODE_VALUE) { + if (!IS_VAR_DATA_TYPE(child->pVal->nType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + char* payload = malloc(child->pVal->nLen * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); + tVariantDump(child->pVal, payload, resultType, true); + int16_t resultBytes = varDataTLen(payload); + free(payload); + child->resultType = resultType; + child->resultBytes = (int16_t)(resultBytes); + } + } + } else { + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + assert(child->nodeType == TSQL_NODE_VALUE) ; + resultType = child->resultType; + for (int j = i+1; j < pExpr->_func.numChildren; ++j) { + if (pExpr->_func.pChildren[j]->resultType != resultType) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + } + } + + pExpr->resultType = resultType; + int16_t resultBytes = 0; + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (resultBytes <= resultBytes + child->resultBytes - VARSTR_HEADER_SIZE) { + resultBytes += child->resultBytes - VARSTR_HEADER_SIZE; + } else { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + pExpr->resultBytes = resultBytes + VARSTR_HEADER_SIZE; + return TSDB_CODE_SUCCESS; +} + +int32_t exprValidateStringConcatWsNode(tExprNode *pExpr) { + if (pExpr->_func.numChildren < 3 || pExpr->_func.numChildren > 9) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + int16_t prevResultType = TSDB_DATA_TYPE_NULL; + int16_t resultType = TSDB_DATA_TYPE_NULL; + bool resultTypeDeduced = false; + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (child->nodeType != TSQL_NODE_VALUE) { + resultType = child->resultType; + if (!IS_VAR_DATA_TYPE(resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if (!resultTypeDeduced) { + resultTypeDeduced = true; + } else { + if (resultType != prevResultType) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + prevResultType = child->resultType; + } else { + if (!IS_VAR_DATA_TYPE(child->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + } + + if (resultTypeDeduced) { + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (child->nodeType == TSQL_NODE_VALUE) { + if (!IS_VAR_DATA_TYPE(child->pVal->nType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + char* payload = malloc(child->pVal->nLen * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); + tVariantDump(child->pVal, payload, resultType, true); + int16_t resultBytes = varDataTLen(payload); + free(payload); + child->resultType = resultType; + child->resultBytes = (int16_t)(resultBytes); + } + } + } else { + for (int32_t i = 0; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + assert(child->nodeType == TSQL_NODE_VALUE) ; + resultType = child->resultType; + for (int j = i+1; j < pExpr->_func.numChildren; ++j) { + if (pExpr->_func.pChildren[j]->resultType != resultType) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + } + } + + pExpr->resultType = resultType; + int16_t resultBytes = 0; + for (int32_t i = 1; i < pExpr->_func.numChildren; ++i) { + tExprNode *child = pExpr->_func.pChildren[i]; + if (resultBytes <= resultBytes + child->resultBytes - VARSTR_HEADER_SIZE) { + resultBytes += child->resultBytes - VARSTR_HEADER_SIZE; + } else { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + } + tExprNode* wsNode = pExpr->_func.pChildren[0]; + int16_t wsResultBytes = wsNode->resultBytes - VARSTR_HEADER_SIZE; + resultBytes += wsResultBytes * (pExpr->_func.numChildren - 2); + pExpr->resultBytes = resultBytes + VARSTR_HEADER_SIZE; + return TSDB_CODE_SUCCESS; +} + + +int32_t exprValidateStringLengthNode(tExprNode *pExpr) { + if (pExpr->_func.numChildren != 1) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + tExprNode* child1 = pExpr->_func.pChildren[0]; + + if (child1->nodeType == TSQL_NODE_VALUE) { + child1->resultType = (int16_t)child1->pVal->nType; + child1->resultBytes = (int16_t)(child1->pVal->nLen + VARSTR_HEADER_SIZE); + } + + if (!IS_VAR_DATA_TYPE(child1->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + pExpr->resultType = TSDB_DATA_TYPE_INT; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes; + + return TSDB_CODE_SUCCESS; +} + +int32_t exprValidateCastNode(char* msgbuf, tExprNode *pExpr) { + const char* msg1 = "invalid param num for cast function"; + const char* msg2 = "the second param should be a valid type name for cast function"; + const char* msg3 = "target type is not supported for cast function"; + const char* msg4 = "not supported type convertion for cast function"; + + if (pExpr->_func.numChildren != 2) { + return exprInvalidOperationMsg(msgbuf, msg1); + } + + tExprNode* child0 = pExpr->_func.pChildren[0]; + tExprNode* child1 = pExpr->_func.pChildren[1]; + + if (child1->nodeType != TSQL_NODE_TYPE) { + return exprInvalidOperationMsg(msgbuf, msg2); + } + + if (child1->resultType != TSDB_DATA_TYPE_BIGINT && child1->resultType != TSDB_DATA_TYPE_UBIGINT + && child1->resultType != TSDB_DATA_TYPE_TIMESTAMP && child1->resultType != TSDB_DATA_TYPE_BINARY + && child1->resultType != TSDB_DATA_TYPE_NCHAR) { + return exprInvalidOperationMsg(msgbuf, msg3); + } + + if ((child0->resultType == TSDB_DATA_TYPE_BINARY && child1->resultType == TSDB_DATA_TYPE_TIMESTAMP) + || (child0->resultType == TSDB_DATA_TYPE_TIMESTAMP && (child1->resultType == TSDB_DATA_TYPE_BINARY || child1->resultType == TSDB_DATA_TYPE_NCHAR)) + || (child0->resultType == TSDB_DATA_TYPE_NCHAR && (child1->resultType == TSDB_DATA_TYPE_BINARY || child1->resultType == TSDB_DATA_TYPE_TIMESTAMP))) { + return exprInvalidOperationMsg(msgbuf, msg4); + } + + pExpr->resultType = child1->resultType; + pExpr->resultBytes = child1->resultBytes; + + doExprTreeDestroy(&pExpr->_func.pChildren[1], NULL); + pExpr->_func.numChildren = 1; + + return TSDB_CODE_SUCCESS; +} + + +int32_t exprValidateMathNode(tExprNode *pExpr) { + switch (pExpr->_func.functionId) { + case TSDB_FUNC_SCALAR_POW: + case TSDB_FUNC_SCALAR_LOG: { + if (pExpr->_func.numChildren != 2) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + tExprNode *child1 = pExpr->_func.pChildren[0]; + tExprNode *child2 = pExpr->_func.pChildren[1]; + if (!IS_NUMERIC_TYPE(child1->resultType) || !IS_NUMERIC_TYPE(child2->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + pExpr->resultType = TSDB_DATA_TYPE_DOUBLE; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + + return TSDB_CODE_SUCCESS; + } + case TSDB_FUNC_SCALAR_ABS: { + if (pExpr->_func.numChildren != 1) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + tExprNode *child1 = pExpr->_func.pChildren[0]; + if (!IS_NUMERIC_TYPE(child1->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if (IS_SIGNED_NUMERIC_TYPE(child1->resultType) || IS_UNSIGNED_NUMERIC_TYPE(child1->resultType)) { + pExpr->resultType = TSDB_DATA_TYPE_UBIGINT; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_UBIGINT].bytes; + } else if (IS_FLOAT_TYPE(child1->resultType)) { + pExpr->resultType = TSDB_DATA_TYPE_DOUBLE; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + } + break; + } + case TSDB_FUNC_SCALAR_SQRT: + case TSDB_FUNC_SCALAR_ASIN: + case TSDB_FUNC_SCALAR_ACOS: + case TSDB_FUNC_SCALAR_ATAN: + case TSDB_FUNC_SCALAR_SIN: + case TSDB_FUNC_SCALAR_COS: + case TSDB_FUNC_SCALAR_TAN: { + if (pExpr->_func.numChildren != 1) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + tExprNode *child1 = pExpr->_func.pChildren[0]; + if (!IS_NUMERIC_TYPE(child1->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + pExpr->resultType = TSDB_DATA_TYPE_DOUBLE; + pExpr->resultBytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + break; + } + + case TSDB_FUNC_SCALAR_CEIL: + case TSDB_FUNC_SCALAR_FLOOR: + case TSDB_FUNC_SCALAR_ROUND: { + if (pExpr->_func.numChildren != 1) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + tExprNode* child = pExpr->_func.pChildren[0]; + if (!IS_NUMERIC_TYPE(child->resultType)) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + pExpr->resultType = child->resultType; + pExpr->resultBytes = child->resultBytes; + break; + } + default: { + assert(false); + break; + } + } + return TSDB_CODE_SUCCESS; +} + +void vectorConcat(int16_t functionId, tExprOperandInfo* pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order) { + assert(functionId == TSDB_FUNC_SCALAR_CONCAT && numInputs >=2 && order == TSDB_ORDER_ASC); + for (int i = 0; i < numInputs; ++i) { + assert(pInputs[i].numOfRows == 1 || pInputs[i].numOfRows == pOutput->numOfRows); + } + + char* outputData = NULL; + char** inputData = calloc(numInputs, sizeof(char*)); + for (int i = 0; i < pOutput->numOfRows; ++i) { + for (int j = 0; j < numInputs; ++j) { + if (pInputs[j].numOfRows == 1) { + inputData[j] = pInputs[j].data; + } else { + inputData[j] = pInputs[j].data + i * pInputs[j].bytes; + } + } + + outputData = pOutput->data + i * pOutput->bytes; + + bool hasNullInputs = false; + for (int j = 0; j < numInputs; ++j) { + if (isNull(inputData[j], pInputs[j].type)) { + hasNullInputs = true; + setNull(outputData, pOutput->type, pOutput->bytes); + } + } + + if (!hasNullInputs) { + int16_t dataLen = 0; + for (int j = 0; j < numInputs; ++j) { + memcpy(((char*)varDataVal(outputData))+dataLen, varDataVal(inputData[j]), varDataLen(inputData[j])); + dataLen += varDataLen(inputData[j]); + } + varDataSetLen(outputData, dataLen); + } + } + + free(inputData); +} + +void vectorConcatWs(int16_t functionId, tExprOperandInfo* pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order) { + assert(functionId == TSDB_FUNC_SCALAR_CONCAT_WS && numInputs >=3 && order == TSDB_ORDER_ASC); + for (int i = 0; i < numInputs; ++i) { + assert(pInputs[i].numOfRows == 1 || pInputs[i].numOfRows == pOutput->numOfRows); + } + + char* outputData = NULL; + char** inputData = calloc(numInputs, sizeof(char*)); + for (int i = 0; i < pOutput->numOfRows; ++i) { + for (int j = 0; j < numInputs; ++j) { + if (pInputs[j].numOfRows == 1) { + inputData[j] = pInputs[j].data; + } else { + inputData[j] = pInputs[j].data + i * pInputs[j].bytes; + } + } + + outputData = pOutput->data + i * pOutput->bytes; + + if (isNull(inputData[0], pInputs[0].type)) { + setNull(outputData, pOutput->type, pOutput->bytes); + continue; + } + + int16_t dataLen = 0; + for (int j = 1; j < numInputs; ++j) { + if (isNull(inputData[j], pInputs[j].type)) { + continue; + } + memcpy(((char*)varDataVal(outputData))+dataLen, varDataVal(inputData[j]), varDataLen(inputData[j])); + dataLen += varDataLen(inputData[j]); + if (j < numInputs - 1) { + memcpy(((char*)varDataVal(outputData))+dataLen, varDataVal(inputData[0]), varDataLen(inputData[0])); + dataLen += varDataLen(inputData[0]); + } + } + varDataSetLen(outputData, dataLen); + } + + + free(inputData); +} + +void vectorLength(int16_t functionId, tExprOperandInfo *pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order) { + assert(functionId == TSDB_FUNC_SCALAR_LENGTH && numInputs == 1 && order == TSDB_ORDER_ASC); + assert(IS_VAR_DATA_TYPE(pInputs[0].type)); + + char* data0 = NULL; + char* outputData = NULL; + for (int32_t i = 0; i < pOutput->numOfRows; ++i) { + if (pInputs[0].numOfRows == 1) { + data0 = pInputs[0].data; + } else { + data0 = pInputs[0].data + i * pInputs[0].bytes; + } + + outputData = pOutput->data + i * pOutput->bytes; + if (isNull(data0, pInputs[0].type)) { + setNull(outputData, pOutput->type, pOutput->bytes); + } else { + int16_t result = varDataLen(data0); + SET_TYPED_DATA(outputData, pOutput->type, result); + } + } +} + +void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t OutputType, int16_t outputBytes, char *output) { + switch (OutputType) { + case TSDB_DATA_TYPE_BIGINT: + if (inputType == TSDB_DATA_TYPE_BINARY) { + char *tmp = malloc(varDataLen(input) + 1); + memcpy(tmp, varDataVal(input), varDataLen(input)); + tmp[varDataLen(input)] = 0; + *(int64_t *)output = strtoll(tmp, NULL, 10); + free(tmp); + } else if (inputType == TSDB_DATA_TYPE_NCHAR) { + char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); + int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); + newColData[len] = 0; + *(int64_t *)output = strtoll(newColData, NULL, 10); + tfree(newColData); + } else { + GET_TYPED_DATA(*(int64_t *)output, int64_t, inputType, input); + } + break; + case TSDB_DATA_TYPE_UBIGINT: + if (inputType == TSDB_DATA_TYPE_BINARY) { + char *tmp = malloc(varDataLen(input) + 1); + memcpy(tmp, varDataVal(input), varDataLen(input)); + tmp[varDataLen(input)] = 0; + *(uint64_t *)output = strtoull(tmp, NULL, 10); + free(tmp); + } else if (inputType == TSDB_DATA_TYPE_NCHAR) { + char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); + int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); + newColData[len] = 0; + *(int64_t *)output = strtoull(newColData, NULL, 10); + tfree(newColData); + } else { + GET_TYPED_DATA(*(uint64_t *)output, uint64_t, inputType, input); + } + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (inputType == TSDB_DATA_TYPE_BINARY || inputType == TSDB_DATA_TYPE_NCHAR) { + assert(0); + } else { + GET_TYPED_DATA(*(int64_t *)output, int64_t, inputType, input); + } + break; + case TSDB_DATA_TYPE_BINARY: + if (inputType == TSDB_DATA_TYPE_BOOL) { + int32_t len = sprintf(varDataVal(output), "%.*s", (int32_t)(outputBytes - VARSTR_HEADER_SIZE), *(int8_t*)input ? "true" : "false"); + varDataSetLen(output, len); + } else if (inputType == TSDB_DATA_TYPE_BINARY) { + char *tmp = malloc(varDataLen(input) + 1); + memcpy(tmp, varDataVal(input), varDataLen(input)); + tmp[varDataLen(input)] = 0; + int32_t len = sprintf(varDataVal(output), "%.*s", (int32_t)(outputBytes - VARSTR_HEADER_SIZE), tmp); + varDataSetLen(output, len); + free(tmp); + } else if (inputType == TSDB_DATA_TYPE_TIMESTAMP || inputType == TSDB_DATA_TYPE_NCHAR) { + assert(0); + } else { + char tmp[400] = {0}; + NUM_TO_STRING(inputType, input, sizeof(tmp), tmp); + int32_t len = (int32_t)strlen(tmp); + len = (outputBytes - VARSTR_HEADER_SIZE) > len ? len : (outputBytes - VARSTR_HEADER_SIZE); + memcpy(varDataVal(output), tmp, len); + varDataSetLen(output, len); + } + break; + case TSDB_DATA_TYPE_NCHAR: { + int32_t ncharSize = (outputBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + if (inputType == TSDB_DATA_TYPE_BOOL) { + char tmp[8] = {0}; + int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false"); + taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + varDataSetLen(output, len); + } else if (inputType == TSDB_DATA_TYPE_BINARY) { + int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize; + taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + varDataSetLen(output, len); + } else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) { + assert(0); + } else if (inputType == TSDB_DATA_TYPE_NCHAR) { + int32_t len = (inputBytes > outputBytes) ? outputBytes : inputBytes; + memcpy(output, input, len); + varDataSetLen(output, len - VARSTR_HEADER_SIZE); + } else { + char tmp[400] = {0}; + NUM_TO_STRING(inputType, input, sizeof(tmp), tmp); + int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize); + taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + varDataSetLen(output, len); + } + break; + } + default: + assert(0); + break; + } +} + +void vectorCharLength(int16_t functionId, tExprOperandInfo *pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order) { + assert(functionId == TSDB_FUNC_SCALAR_CHAR_LENGTH && numInputs == 1 && order == TSDB_ORDER_ASC); + assert(IS_VAR_DATA_TYPE(pInputs[0].type)); + + char* data0 = NULL; + char* outputData = NULL; + for (int32_t i = 0; i < pOutput->numOfRows; ++i) { + if (pInputs[0].numOfRows == 1) { + data0 = pInputs[0].data; + } else { + data0 = pInputs[0].data + i * pInputs[0].bytes; + } + + outputData = pOutput->data + i * pOutput->bytes; + if (isNull(data0, pInputs[0].type)) { + setNull(outputData, pOutput->type, pOutput->bytes); + } else { + int16_t result = varDataLen(data0); + if (pInputs[0].type == TSDB_DATA_TYPE_BINARY) { + SET_TYPED_DATA(outputData, pOutput->type, result); + } else if (pInputs[0].type == TSDB_DATA_TYPE_NCHAR) { + SET_TYPED_DATA(outputData, pOutput->type, result/TSDB_NCHAR_SIZE); + } + } + } +} + +void vectorMathFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order) { + for (int i = 0; i < numInputs; ++i) { + assert(pInputs[i].numOfRows == 1 || pInputs[i].numOfRows == pOutput->numOfRows); + } + + char* outputData = NULL; + char** inputData = calloc(numInputs, sizeof(char*)); + for (int i = 0; i < pOutput->numOfRows; ++i) { + for (int j = 0; j < numInputs; ++j) { + if (pInputs[j].numOfRows == 1) { + inputData[j] = pInputs[j].data; + } else { + inputData[j] = pInputs[j].data + i * pInputs[j].bytes; + } + } + + outputData = pOutput->data + i * pOutput->bytes; + + bool hasNullInputs = false; + for (int j = 0; j < numInputs; ++j) { + if (isNull(inputData[j], pInputs[j].type)) { + hasNullInputs = true; + setNull(outputData, pOutput->type, pOutput->bytes); + } + } + + if (!hasNullInputs) { + switch (functionId) { + case TSDB_FUNC_SCALAR_LOG: { + assert(numInputs == 2); + double base = 0; + GET_TYPED_DATA(base, double, pInputs[1].type, inputData[1]); + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = log(v1) / log(base); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + + case TSDB_FUNC_SCALAR_POW:{ + assert(numInputs == 2); + double base = 0; + GET_TYPED_DATA(base, double, pInputs[1].type, inputData[1]); + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = pow(v1, base); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + + case TSDB_FUNC_SCALAR_ABS: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + if (IS_SIGNED_NUMERIC_TYPE(pInputs[0].type)) { + int64_t v1 = 0; + GET_TYPED_DATA(v1, int64_t, pInputs[0].type, inputData[0]); + uint64_t result = (uint64_t)(llabs(v1)); + SET_TYPED_DATA(outputData, pOutput->type, result); + } else if (IS_UNSIGNED_NUMERIC_TYPE(pInputs[0].type)) { + uint64_t v1 = 0; + GET_TYPED_DATA(v1, uint64_t, pInputs[0].type, inputData[0]); + SET_TYPED_DATA(outputData, pOutput->type, v1); + } else if (IS_FLOAT_TYPE(pInputs[0].type)) { + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = fabs(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + } + break; + } + case TSDB_FUNC_SCALAR_SQRT: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = sqrt(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + + break; + } + case TSDB_FUNC_SCALAR_ASIN: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = asin(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + case TSDB_FUNC_SCALAR_ACOS: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = acos(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + case TSDB_FUNC_SCALAR_ATAN: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = atan(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + case TSDB_FUNC_SCALAR_SIN: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = sin(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + case TSDB_FUNC_SCALAR_COS: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = cos(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + case TSDB_FUNC_SCALAR_TAN:{ + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + + double v1 = 0; + GET_TYPED_DATA(v1, double, pInputs[0].type, inputData[0]); + double result = tan(v1); + SET_TYPED_DATA(outputData, pOutput->type, result); + break; + } + + case TSDB_FUNC_SCALAR_CEIL: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + if (IS_UNSIGNED_NUMERIC_TYPE(pInputs[0].type) || IS_SIGNED_NUMERIC_TYPE(pInputs[0].type)) { + memcpy(outputData, inputData[0], pInputs[0].bytes); + } else { + if (pInputs[0].type == TSDB_DATA_TYPE_FLOAT) { + float v = 0; + GET_TYPED_DATA(v, float, pInputs[0].type, inputData[0]); + float result = ceilf(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } else if (pInputs[0].type == TSDB_DATA_TYPE_DOUBLE) { + double v = 0; + GET_TYPED_DATA(v, double, pInputs[0].type, inputData[0]); + double result = ceil(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } + } + break; + } + case TSDB_FUNC_SCALAR_FLOOR: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + if (IS_UNSIGNED_NUMERIC_TYPE(pInputs[0].type) || IS_SIGNED_NUMERIC_TYPE(pInputs[0].type)) { + memcpy(outputData, inputData[0], pInputs[0].bytes); + } else { + if (pInputs[0].type == TSDB_DATA_TYPE_FLOAT) { + float v = 0; + GET_TYPED_DATA(v, float, pInputs[0].type, inputData[0]); + float result = floorf(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } else if (pInputs[0].type == TSDB_DATA_TYPE_DOUBLE) { + double v = 0; + GET_TYPED_DATA(v, double, pInputs[0].type, inputData[0]); + double result = floor(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } + } + break; + } + + case TSDB_FUNC_SCALAR_ROUND: { + assert(numInputs == 1); + assert(IS_NUMERIC_TYPE(pInputs[0].type)); + if (IS_UNSIGNED_NUMERIC_TYPE(pInputs[0].type) || IS_SIGNED_NUMERIC_TYPE(pInputs[0].type)) { + memcpy(outputData, inputData[0], pInputs[0].bytes); + } else { + if (pInputs[0].type == TSDB_DATA_TYPE_FLOAT) { + float v = 0; + GET_TYPED_DATA(v, float, pInputs[0].type, inputData[0]); + float result = roundf(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } else if (pInputs[0].type == TSDB_DATA_TYPE_DOUBLE) { + double v = 0; + GET_TYPED_DATA(v, double, pInputs[0].type, inputData[0]); + double result = round(v); + SET_TYPED_DATA(outputData, pOutput->type, result); + } + } + break; + } + case TSDB_FUNC_SCALAR_CAST: { + castConvert(pInputs[0].type, pInputs[0].bytes, inputData[0], pOutput->type, pOutput->bytes, outputData); + break; + } + default: { + assert(false); + break; + } + } // end switch function(id) + } // end can produce value, all child has value + } // end for each row + free(inputData); +} + +_expr_scalar_function_t getExprScalarFunction(uint16_t funcId) { + assert(TSDB_FUNC_IS_SCALAR(funcId)); + int16_t scalaIdx = TSDB_FUNC_SCALAR_INDEX(funcId); + assert(scalaIdx>=0 && scalaIdx <= TSDB_FUNC_SCALAR_MAX_NUM); + return aScalarFunctions[scalaIdx].scalarFunc; +} + +tScalarFunctionInfo aScalarFunctions[] = { + { + TSDB_FUNC_SCALAR_POW, + "pow", + vectorMathFunc + }, + { + TSDB_FUNC_SCALAR_LOG, + "log", + vectorMathFunc + }, + { + TSDB_FUNC_SCALAR_ABS, + "abs", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_ACOS, + "acos", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_ASIN, + "asin", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_ATAN, + "atan", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_COS, + "cos", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_SIN, + "sin", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_TAN, + "tan", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_SQRT, + "sqrt", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_CEIL, + "ceil", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_FLOOR, + "floor", + vectorMathFunc, + }, + { + TSDB_FUNC_SCALAR_ROUND, + "round", + vectorMathFunc + }, + { + TSDB_FUNC_SCALAR_CONCAT, + "concat", + vectorConcat + }, + { + TSDB_FUNC_SCALAR_LENGTH, + "length", + vectorLength + }, + { + TSDB_FUNC_SCALAR_CONCAT_WS, + "concat_ws", + vectorConcatWs + }, + { + TSDB_FUNC_SCALAR_CHAR_LENGTH, + "char_length", + vectorCharLength + }, + { + TSDB_FUNC_SCALAR_CAST, + "cast", + vectorMathFunc + }, +}; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 62baaadbac2596bc66bf5955262a3d5ff35fcfc1..8627a3153cdac2b06cd3cf15dddefad32c39c58d 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -47,6 +47,7 @@ int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME; char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; int64_t tsDnodeStartTime = 0; +int8_t tsDnodeNopLoop = 0; // common int32_t tsRpcTimer = 300; @@ -291,7 +292,11 @@ int8_t tsDeadLockKillQuery = 0; // default JSON string type char tsDefaultJSONStrType[7] = "nchar"; -char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash. +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. + //If set to empty system will generate table name using MD5 hash. +char tsSmlTagNullName[TSDB_COL_NAME_LEN] = "_tag_null"; //for line protocol if tag is omitted, add a tag with NULL value + //to make sure inserted records belongs to the same measurement + //default name is _tag_null and can be user configurable int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -618,6 +623,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "dnodeNopLoop"; + cfg.ptr = &tsDnodeNopLoop; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "balance"; cfg.ptr = &tsEnableBalance; cfg.valType = TAOS_CFG_VTYPE_INT8; @@ -1328,7 +1343,7 @@ static void doInitGlobalConfig(void) { cfg.option = "httpDbNameMandatory"; cfg.ptr = &tsHttpDbNameMandatory; cfg.valType = TAOS_CFG_VTYPE_INT8; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 0; cfg.maxValue = 1; cfg.ptrLength = 0; @@ -1701,6 +1716,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // name for a NULL value tag added for Line Protocol when tag fields are omitted + cfg.option = "smlTagNullName"; + cfg.ptr = tsSmlTagNullName; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(tsSmlTagNullName); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks cfg.option = "walFlushSize"; cfg.ptr = &tsdbWalFlushSize; diff --git a/src/common/src/tname.c b/src/common/src/tname.c index c0951cba700fbea2d992da147620cf65bd1f75b9..5d7e8ce54219a1d9d36a7ac21997bb18712a286b 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -4,6 +4,7 @@ #include "tname.h" #include "ttoken.h" #include "tvariant.h" +#include "tglobal.h" #define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS) #define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS) @@ -251,6 +252,9 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen int32_t rowLen = 0; for (int32_t i = 0; i < numOfCols; ++i) { + if (pSchema[i].type == TSDB_DATA_TYPE_JSON && numOfCols != 1){ + return false; + } // 1. valid types if (!isValidDataType(pSchema[i].type)) { return false; @@ -301,8 +305,12 @@ bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTag if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) { return false; } + int32_t maxTagLen = TSDB_MAX_TAGS_LEN; + if (numOfTags == 1 && pSchema[numOfCols].type == TSDB_DATA_TYPE_JSON){ + maxTagLen = TSDB_MAX_JSON_TAGS_LEN; + } - if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) { + if (!doValidateSchema(&pSchema[numOfCols], numOfTags, maxTagLen)) { return false; } diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 08bfc2e9aa6f0b9337d484c725f2737cbbacaac0..81bc9c7275b07cf41dc1305e4db807e1b2b839a0 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -18,7 +18,7 @@ #include "ttokendef.h" #include "tscompression.h" -const int32_t TYPE_BYTES[15] = { +const int32_t TYPE_BYTES[16] = { -1, // TSDB_DATA_TYPE_NULL sizeof(int8_t), // TSDB_DATA_TYPE_BOOL sizeof(int8_t), // TSDB_DATA_TYPE_TINYINT @@ -34,6 +34,7 @@ const int32_t TYPE_BYTES[15] = { sizeof(uint16_t), // TSDB_DATA_TYPE_USMALLINT sizeof(uint32_t), // TSDB_DATA_TYPE_UINT sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT + sizeof(int8_t), // TSDB_DATA_TYPE_JSON }; #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ @@ -367,8 +368,8 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i *maxIndex = 0; } -tDataTypeDescriptor tDataTypes[15] = { - {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL, NULL}, +tDataTypeDescriptor tDataTypes[16] = { + {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL, NULL}, {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", false, true, tsCompressBool, tsDecompressBool, getStatics_bool}, {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", INT8_MIN, INT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_i8}, {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", INT16_MIN, INT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_i16}, @@ -376,13 +377,14 @@ tDataTypeDescriptor tDataTypes[15] = { {TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", INT64_MIN, INT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_i64}, {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f}, {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d}, - {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", 0, 0, tsCompressString, tsDecompressString, getStatics_bin}, + {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", 0, 0, tsCompressString, tsDecompressString, getStatics_bin}, {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64}, - {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, + {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, {TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_u8}, {TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_u16}, {TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt, getStatics_u32}, {TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_u64}, + {TSDB_DATA_TYPE_JSON,4, TSDB_MAX_JSON_TAGS_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, }; char tTokenTypeSwitcher[13] = { @@ -428,7 +430,7 @@ FORCE_INLINE void* getDataMax(int32_t type) { bool isValidDataType(int32_t type) { - return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT; + return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_JSON; } void setVardataNull(void* val, int32_t type) { @@ -438,6 +440,9 @@ void setVardataNull(void* val, int32_t type) { } else if (type == TSDB_DATA_TYPE_NCHAR) { varDataSetLen(val, sizeof(int32_t)); *(uint32_t*) varDataVal(val) = TSDB_DATA_NCHAR_NULL; + } else if (type == TSDB_DATA_TYPE_JSON) { + varDataSetLen(val, sizeof(int32_t)); + *(uint32_t*) varDataVal(val) = TSDB_DATA_JSON_NULL; } else { assert(0); } @@ -505,6 +510,7 @@ void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) { break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_JSON: for (int32_t i = 0; i < numOfElems; ++i) { setVardataNull(POINTER_SHIFT(val, i * bytes), type); } diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index f22e3da28b331d455f5f4d73251c37072e1f69fc..33b93cfde482e67667e463b13a95be33db785462 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -16,6 +16,7 @@ #include "hash.h" #include "taos.h" +#include "taoserror.h" #include "taosdef.h" #include "ttoken.h" #include "ttokendef.h" @@ -30,7 +31,11 @@ assert(0); \ } while (0) -void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape) { +void tVariantCreate(tVariant *pVar, SStrToken *token) { + tVariantCreateExt(pVar, token, TK_ID, true); +} + +void tVariantCreateExt(tVariant *pVar, SStrToken *token, int32_t optrType, bool needRmquoteEscape) { int32_t ret = 0; int32_t type = token->type; @@ -54,7 +59,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape) { case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_INT:{ ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true); - if (ret != 0) { + if (ret != TSDB_CODE_SUCCESS) { SStrToken t = {0}; tGetToken(token->z, &t.type); if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN @@ -64,7 +69,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape) { // data overflow, try unsigned parse the input number ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, false); - if (ret != 0) { + if (ret != TSDB_CODE_SUCCESS) { pVar->nType = -1; // -1 means error type return; } @@ -85,15 +90,29 @@ void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape) { break; } case TSDB_DATA_TYPE_TIMESTAMP: { - pVar->i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO); - break; - } - + if (optrType == TK_NOW) { + pVar->i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO); + } else if (optrType == TK_PLUS || optrType == TK_MINUS) { + char unit = 0; + ret = parseAbsoluteDuration(token->z, token->n, &pVar->i64, &unit, TSDB_TIME_PRECISION_NANO); + if (ret != TSDB_CODE_SUCCESS) { + pVar->nType = -1; // -1 means error type + return; + } + if (optrType == TK_PLUS) { + pVar->i64 += taosGetTimestamp(TSDB_TIME_PRECISION_NANO); + } else { + pVar->i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO) - pVar->i64; + } + } + break; + } + default: { // nType == 0 means the null value type = TSDB_DATA_TYPE_NULL; } } - + pVar->nType = type; } @@ -158,34 +177,40 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32 pVar->dKey = GET_FLOAT_VAL(pz); break; } - case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length + case TSDB_DATA_TYPE_NCHAR:{ // here we get the nchar length from raw binary bits length size_t lenInwchar = len / TSDB_NCHAR_SIZE; pVar->wpz = calloc(1, (lenInwchar + 1) * TSDB_NCHAR_SIZE); memcpy(pVar->wpz, pz, lenInwchar * TSDB_NCHAR_SIZE); pVar->nLen = (int32_t)len; - + + break; + } + case TSDB_DATA_TYPE_JSON:{ + pVar->pz = calloc(len + 2, sizeof(char)); + memcpy(pVar->pz, pz, len); + pVar->nLen = (int32_t)len; break; } - case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method + case TSDB_DATA_TYPE_BINARY:{ pVar->pz = calloc(len + 1, sizeof(char)); memcpy(pVar->pz, pz, len); pVar->nLen = (int32_t)len; break; } - + default: pVar->i64 = GET_INT32_VAL(pz); pVar->nLen = tDataTypes[TSDB_DATA_TYPE_INT].bytes; } - + pVar->nType = type; } void tVariantDestroy(tVariant *pVar) { if (pVar == NULL) return; - - if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) { + + if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR || pVar->nType == TSDB_DATA_TYPE_JSON) { tfree(pVar->pz); pVar->nLen = 0; } @@ -210,11 +235,41 @@ bool tVariantIsValid(tVariant *pVar) { return isValidDataType(pVar->nType); } +bool tVariantTypeMatch(tVariant *pVar, int8_t dbType){ + switch (dbType) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + if(pVar->nType != TSDB_DATA_TYPE_BINARY && pVar->nType != TSDB_DATA_TYPE_NCHAR){ + return false; + } + break; + } + + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE:{ + if(pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR){ + return false; + } + break; + } + } + return true; +} + void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; - + pDst->nType = pSrc->nType; - if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) { + if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR || pSrc->nType == TSDB_DATA_TYPE_JSON) { int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE; char* p = realloc(pDst->pz, len); assert(p); @@ -249,7 +304,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { } } - if (pDst->nType != TSDB_DATA_TYPE_POINTER_ARRAY && pDst->nType != TSDB_DATA_TYPE_VALUE_ARRAY) { + if (pDst->nType != TSDB_DATA_TYPE_POINTER_ARRAY && pDst->nType != TSDB_DATA_TYPE_VALUE_ARRAY && isValidDataType(pDst->nType)) { // if pDst->nType=-1, core dump. eg: where intcolumn=999999999999999999999999999 pDst->nLen = tDataTypes[pDst->nType].bytes; } } @@ -267,7 +322,7 @@ int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) { return 1; } - if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR) { + if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR || p1->nType == TSDB_DATA_TYPE_JSON) { if (p1->nLen == p2->nLen) { return memcmp(p1->pz, p2->pz, p1->nLen); } else { @@ -296,14 +351,14 @@ int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) { int32_t tVariantToString(tVariant *pVar, char *dst) { if (pVar == NULL || dst == NULL) return 0; - + switch (pVar->nType) { case TSDB_DATA_TYPE_BINARY: { int32_t len = sprintf(dst, "\'%s\'", pVar->pz); assert(len <= pVar->nLen + sizeof("\'") * 2); // two more chars return len; } - + case TSDB_DATA_TYPE_NCHAR: { dst[0] = '\''; taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); @@ -312,7 +367,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { dst[len + 1] = 0; return len + 1; } - + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_SMALLINT: @@ -321,7 +376,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_UINT: return sprintf(dst, "%d", (int32_t)pVar->i64); - + case TSDB_DATA_TYPE_BIGINT: return sprintf(dst, "%" PRId64, pVar->i64); case TSDB_DATA_TYPE_UBIGINT: @@ -329,7 +384,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: return sprintf(dst, "%.9lf", pVar->dKey); - + default: return 0; } @@ -367,21 +422,21 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { if (*pDest == pVariant->pz) { pBuf = calloc(1, INITIAL_ALLOC_SIZE); } - + if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { size_t newSize = pVariant->nLen * TSDB_NCHAR_SIZE; if (pBuf != NULL) { if (newSize >= INITIAL_ALLOC_SIZE) { pBuf = realloc(pBuf, newSize + 1); } - + taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf); free(pVariant->wpz); pBuf[newSize] = 0; } else { taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest); } - + } else { if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType)) { sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64); @@ -393,18 +448,18 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { setNull(pBuf == NULL ? *pDest : pBuf, TSDB_DATA_TYPE_BINARY, 0); } } - + if (pBuf != NULL) { *pDest = pBuf; } - + *pDestSize = (int32_t)strlen(*pDest); return 0; } static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { char tmpBuf[40] = {0}; - + char * pDst = tmpBuf; int32_t nLen = 0; @@ -742,7 +797,7 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc if (converted) { *converted = true; } - + if (value > FLT_MAX || value < -FLT_MAX) { SET_EXT_INFO(converted, value, -FLT_MAX, FLT_MAX, extInfo); return -1; @@ -753,8 +808,8 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc if (converted) { *converted = true; } - - if (pVariant->i64 > FLT_MAX || pVariant->i64 < -FLT_MAX) { + + if (pVariant->i64 > FLT_MAX || pVariant->i64 < -FLT_MAX) { SET_EXT_INFO(converted, pVariant->i64, -FLT_MAX, FLT_MAX, extInfo); return -1; } @@ -764,12 +819,12 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc if (converted) { *converted = true; } - - if (pVariant->dKey > FLT_MAX || pVariant->dKey < -FLT_MAX) { + + if (pVariant->dKey > FLT_MAX || pVariant->dKey < -FLT_MAX) { SET_EXT_INFO(converted, pVariant->dKey, -FLT_MAX, FLT_MAX, extInfo); return -1; } - + SET_FLOAT_VAL(payload, pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_NULL) { *((uint32_t *)payload) = TSDB_DATA_FLOAT_NULL; @@ -814,8 +869,8 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc break; } - - case TSDB_DATA_TYPE_BINARY: { + + case TSDB_DATA_TYPE_BINARY:{ if (!includeLengthPrefix) { if (pVariant->nType == TSDB_DATA_TYPE_NULL) { *(uint8_t*) payload = TSDB_DATA_BINARY_NULL; @@ -852,7 +907,7 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc } break; } - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR:{ int32_t newlen = 0; if (!includeLengthPrefix) { if (pVariant->nType == TSDB_DATA_TYPE_NULL) { @@ -885,11 +940,21 @@ int32_t tVariantDumpEx(tVariant *pVariant, char *payload, int16_t type, bool inc assert(p == varDataVal(payload)); } } - + + break; + } + case TSDB_DATA_TYPE_JSON:{ + if (pVariant->nType == TSDB_DATA_TYPE_BINARY){ + *((int8_t *)payload) = TSDB_DATA_JSON_PLACEHOLDER; + } else if (pVariant->nType == TSDB_DATA_TYPE_JSON){ // select * from stable, set tag type to json,from setTagValue/tag_project_function + memcpy(payload, pVariant->pz, pVariant->nLen); + }else { + return -1; + } break; } } - + return 0; } @@ -904,13 +969,13 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { if (pVariant == NULL || pVariant->nType == 0) { // value is not set return 0; } - + switch (type) { case TSDB_DATA_TYPE_BOOL: { // bool if (convertToBool(pVariant, &pVariant->i64) < 0) { return -1; } - + pVariant->nType = type; break; } @@ -931,7 +996,7 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { free(pVariant->pz); return -1; } - + free(pVariant->pz); pVariant->dKey = v; } else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { @@ -941,14 +1006,14 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { free(pVariant->pz); return -1; } - + free(pVariant->pz); pVariant->dKey = v; } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { double tmp = (double) pVariant->i64; pVariant->dKey = tmp; } - + pVariant->nType = TSDB_DATA_TYPE_DOUBLE; break; } @@ -969,6 +1034,6 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { break; } } - + return 0; } diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs deleted file mode 100644 index f9a5890eedb8714616cb4d624f9036ffdeef35fb..0000000000000000000000000000000000000000 --- a/src/connector/C#/TDengineDriver.cs +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - //get precision in restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - - //schemaless API - [DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision); - } -} diff --git a/src/connector/C#/csharpTaos.sln b/src/connector/C#/csharpTaos.sln new file mode 100644 index 0000000000000000000000000000000000000000..b18ca230011c1314fb354feeb61166374c822d3d --- /dev/null +++ b/src/connector/C#/csharpTaos.sln @@ -0,0 +1,72 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.30114.105 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{A1FB5B66-E32F-4789-9BE9-042E5BD21087}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TDengineDriver", "src\TDengineDriver\TDengineDriver.csproj", "{5BED7402-0A65-4ED9-A491-C56BFB518045}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{CB8E6458-31E1-4351-B704-1B918E998654}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "XUnitTest", "src\test\XUnitTest\XUnitTest.csproj", "{64C0A478-2591-4459-9F8F-A70F37976A41}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Cases", "src\test\Cases\Cases.csproj", "{19A69D26-66BF-4227-97BE-9B087BC76B2F}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|Any CPU.Build.0 = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x64.ActiveCfg = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x64.Build.0 = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x86.ActiveCfg = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x86.Build.0 = Debug|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|Any CPU.ActiveCfg = Release|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|Any CPU.Build.0 = Release|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x64.ActiveCfg = Release|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x64.Build.0 = Release|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x86.ActiveCfg = Release|Any CPU + {5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x86.Build.0 = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|Any CPU.Build.0 = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x64.ActiveCfg = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x64.Build.0 = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x86.ActiveCfg = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x86.Build.0 = Debug|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|Any CPU.ActiveCfg = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|Any CPU.Build.0 = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x64.ActiveCfg = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x64.Build.0 = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.ActiveCfg = Release|Any CPU + {64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.Build.0 = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.ActiveCfg = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.Build.0 = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.ActiveCfg = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.Build.0 = Debug|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.Build.0 = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.ActiveCfg = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.Build.0 = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.ActiveCfg = Release|Any CPU + {19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {5BED7402-0A65-4ED9-A491-C56BFB518045} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087} + {CB8E6458-31E1-4351-B704-1B918E998654} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087} + {64C0A478-2591-4459-9F8F-A70F37976A41} = {CB8E6458-31E1-4351-B704-1B918E998654} + {19A69D26-66BF-4227-97BE-9B087BC76B2F} = {CB8E6458-31E1-4351-B704-1B918E998654} + EndGlobalSection +EndGlobal diff --git a/src/connector/C#/src/TDengineDriver/TDengineDriver.cs b/src/connector/C#/src/TDengineDriver/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..1bfb8eae0729f9d5d68734209cb4cc5ef36d8c6a --- /dev/null +++ b/src/connector/C#/src/TDengineDriver/TDengineDriver.cs @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + public enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT = 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT = 14 // 8 bytes + } + + public enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + enum TaosField + { + STRUCT_SIZE = 68, + NAME_LENGTH = 65, + TYPE_OFFSET = 65, + BYTES_OFFSET = 66, + + } + public class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Ansi)] + public struct TAOS_BIND + { + // column type + public int buffer_type; + // one column value + public IntPtr buffer; + // unused + public Int32 buffer_length; + // actual value length in buffer + public IntPtr length; + // indicates the column value is null or not + public IntPtr is_null; + // unused + public int is_unsigned; + // unused + public IntPtr error; + public Int64 u; + public uint allocated; + } + + + [StructLayout(LayoutKind.Sequential)] + public struct TAOS_MULTI_BIND + { + // column type + public int buffer_type; + + // array, one or more lines column value + public IntPtr buffer; + + //length of element in TAOS_MULTI_BIND.buffer (for binary and nchar it is the longest element's length) + public ulong buffer_length; + + //array, actual data length for each value + public IntPtr length; + + //array, indicates each column value is null or not + public IntPtr is_null; + + // line number, or the values number in buffer + public int num; + } + + + public class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + // const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * (int)TaosField.STRUCT_SIZE; + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + (int)TaosField.TYPE_OFFSET); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + (int)TaosField.BYTES_OFFSET); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + + //get precision of restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); + + + + //stmt APIs: + /// + /// init a TAOS_STMT object for later use. + /// + /// a valid taos connection + /// + /// Not NULL returned for success, NULL for failure. And it should be freed with taos_stmt_close. + /// + [DllImport("taos", EntryPoint = "taos_stmt_init", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr StmtInit(IntPtr taos); + + /// + /// prepare a sql statement,'sql' should be a valid INSERT/SELECT statement. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// sql string,used to bind parameters with + /// no used + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_prepare", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtPrepare(IntPtr stmt, string sql); + + /// + /// For INSERT only. Used to bind table name as a parmeter for the input stmt object. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// table name you want to bind + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_set_tbname", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtSetTbname(IntPtr stmt, string name); + + /// + /// For INSERT only. + /// Set a table name for binding table name as parameter. Only used for binding all tables + /// in one stable, user application must call 'loadTableInfo' API to load all table + /// meta before calling this API. If the table meta is not cached locally, it will return error. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// table name which is belong to an stable + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_set_sub_tbname", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtSetSubTbname(IntPtr stmt, string name); + + /// + /// For INSERT only. + /// set a table name for binding table name as parameter and tag values for all tag parameters. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// use to set table name + /// + /// is an array contains all tag values,each item in the array represents a tag column's value. + /// the item number and sequence should keep consistence with that in stable tag definition. + /// + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_set_tbname_tags", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtSetTbnameTags(IntPtr stmt, string name, TAOS_BIND[] tags); + + /// + /// For both INSERT and SELECT. + /// bind a whole line data. + /// The usage of structure TAOS_BIND is the same with MYSQL_BIND in MySQL. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// + /// points to an array contains the whole line data. + /// the item number and sequence should keep consistence with columns in sql statement. + /// + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_bind_param", CallingConvention = CallingConvention.Cdecl, SetLastError = true)] + static extern public int StmtBindParam(IntPtr stmt, TAOS_BIND[] bind); + + /// + /// bind a single column's data, INTERNAL used and for INSERT only. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// points to a column's data which could be the one or more lines. + /// the column's index in prepared sql statement, it starts from 0. + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_bind_single_param_batch", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtBindSingleParamBatch(IntPtr stmt, ref TAOS_MULTI_BIND bind, int colIdx); + + /// + /// for INSERT only + /// bind one or multiple lines data. The parameter 'bind' + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// + /// points to an array contains one or more lines data.Each item in array represents a column's value(s), + /// the item number and sequence should keep consistence with columns in sql statement. + /// + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_bind_param_batch", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtBindParamBatch(IntPtr stmt, [In, Out] TAOS_MULTI_BIND[] bind); + + /// + /// For INSERT only. + /// add all current bound parameters to batch process. Must be called after each call to + /// StmtBindParam/StmtBindSingleParamBatch, or all columns binds for one or more lines + /// with StmtBindSingleParamBatch. User application can call any bind parameter + /// API again to bind more data lines after calling to this API. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_add_batch", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtAddBatch(IntPtr stmt); + + /// + /// actually execute the INSERT/SELECT sql statement. + /// User application can continue to bind new data after calling to this API. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// + [DllImport("taos", EntryPoint = "taos_stmt_execute", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtExecute(IntPtr stmt); + + /// + /// For SELECT only,getting the query result. User application should free it with API 'FreeResult' at the end. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// Not NULL for success, NULL for failure. + [DllImport("taos", EntryPoint = "taos_stmt_use_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr StmtUseResult(IntPtr stmt); + + /// + /// close STMT object and free resources. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// 0 for success, non-zero for failure. + [DllImport("taos", EntryPoint = "taos_stmt_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int StmtClose(IntPtr stmt); + + [DllImport("taos", EntryPoint = "taos_load_table_info", CallingConvention = CallingConvention.Cdecl)] + /// + /// user application must call this API to load all tables meta, + /// + /// taos connection + /// tablelist + /// + static extern private int LoadTableInfoDll(IntPtr taos, string tableList); + + /// + /// user application call this API to load all tables meta,this method call the native + /// method LoadTableInfoDll. + /// this method must be called before StmtSetSubTbname(IntPtr stmt, string name); + /// + /// taos connection + /// tables need to load meta info are form in an array + /// + static public int LoadTableInfo(IntPtr taos, string[] tableList) + { + string listStr = string.Join(",", tableList); + return LoadTableInfoDll(taos, listStr); + } + + /// + /// get detail error message when got failure for any stmt API call. If not failure, the result + /// returned in this API is unknown. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// piont the error message + [DllImport("taos", EntryPoint = "taos_stmt_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr StmtErrPtr(IntPtr stmt); + + /// + /// get detail error message when got failure for any stmt API call. If not failure, the result + /// returned in this API is unknown. + /// + /// could be the value returned by 'StmtInit', that may be a valid object or NULL. + /// error string + static public string StmtErrorStr(IntPtr stmt) + { + IntPtr stmtErrPrt = StmtErrPtr(stmt); + return Marshal.PtrToStringAnsi(stmtErrPrt); + } + } +} diff --git a/src/connector/C#/src/TDengineDriver/TDengineDriver.csproj b/src/connector/C#/src/TDengineDriver/TDengineDriver.csproj new file mode 100644 index 0000000000000000000000000000000000000000..f208d303c9811fa05807ef8f72685b8ebb536a37 --- /dev/null +++ b/src/connector/C#/src/TDengineDriver/TDengineDriver.csproj @@ -0,0 +1,7 @@ + + + + net5.0 + + + diff --git a/src/connector/C#/src/TDengineDriver/TaosBind.cs b/src/connector/C#/src/TDengineDriver/TaosBind.cs new file mode 100644 index 0000000000000000000000000000000000000000..694dcd900bccedc913ce9d1956650f97957965da --- /dev/null +++ b/src/connector/C#/src/TDengineDriver/TaosBind.cs @@ -0,0 +1,332 @@ +using System; +using System.Runtime.InteropServices; + + +namespace TDengineDriver +{ + /// + /// this class used to get an instance of struct of TAO_BIND or TAOS_MULTI_BIND + /// And the instance is corresponding with TDengine data type. For example, calling + /// "bindBinary" will return a TAOS_BIND object that is corresponding with TDengine's + /// binary type. + /// + public class TaosBind + { + public static TAOS_BIND BindBool(bool val) + { + TAOS_BIND bind = new TAOS_BIND(); + byte[] boolByteArr = BitConverter.GetBytes(val); + int boolByteArrSize = Marshal.SizeOf(boolByteArr[0]) * boolByteArr.Length; + IntPtr bo = Marshal.AllocHGlobal(1); + Marshal.Copy(boolByteArr, 0, bo, boolByteArr.Length); + + int length = sizeof(Boolean); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BOOL; + bind.buffer = bo; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + public static TAOS_BIND BindTinyInt(sbyte val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] tinyIntByteArr = BitConverter.GetBytes(val); + int tinyIntByteArrSize = Marshal.SizeOf(tinyIntByteArr[0]) * tinyIntByteArr.Length; + IntPtr uManageTinyInt = Marshal.AllocHGlobal(tinyIntByteArrSize); + Marshal.Copy(tinyIntByteArr, 0, uManageTinyInt, tinyIntByteArr.Length); + + int length = sizeof(sbyte); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_TINYINT; + bind.buffer = uManageTinyInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + return bind; + + } + + public static TAOS_BIND BindSmallInt(short val) + { + + TAOS_BIND bind = new TAOS_BIND(); + IntPtr uManageSmallInt = Marshal.AllocHGlobal(sizeof(short)); + Marshal.WriteInt16(uManageSmallInt, val); + + int length = sizeof(short); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_SMALLINT; + bind.buffer = uManageSmallInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindInt(int val) + { + TAOS_BIND bind = new TAOS_BIND(); + IntPtr uManageInt = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(uManageInt, val); + + int length = sizeof(int); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_INT; + bind.buffer = uManageInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindBigInt(long val) + { + + TAOS_BIND bind = new TAOS_BIND(); + IntPtr uManageBigInt = Marshal.AllocHGlobal(sizeof(long)); + Marshal.WriteInt64(uManageBigInt, val); + + int length = sizeof(long); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BIGINT; + bind.buffer = uManageBigInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindUTinyInt(byte val) + { + TAOS_BIND bind = new TAOS_BIND(); + + IntPtr uManageTinyInt = Marshal.AllocHGlobal(sizeof(byte)); + Marshal.WriteByte(uManageTinyInt, val); + + int length = sizeof(byte); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UTINYINT; + bind.buffer = uManageTinyInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindUSmallInt(UInt16 val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] uSmallIntByteArr = BitConverter.GetBytes(val); + int usmallSize = Marshal.SizeOf(uSmallIntByteArr[0]) * uSmallIntByteArr.Length; + IntPtr uManageUnsignSmallInt = Marshal.AllocHGlobal(usmallSize); + Marshal.Copy(uSmallIntByteArr, 0, uManageUnsignSmallInt, uSmallIntByteArr.Length); + + int length = sizeof(UInt16); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_USMALLINT; + bind.buffer = uManageUnsignSmallInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindUInt(uint val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] uManageIntByteArr = BitConverter.GetBytes(val); + int usmallSize = Marshal.SizeOf(uManageIntByteArr[0]) * uManageIntByteArr.Length; + IntPtr uManageInt = Marshal.AllocHGlobal(usmallSize); + Marshal.Copy(uManageIntByteArr, 0, uManageInt, uManageIntByteArr.Length); + + int length = sizeof(uint); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UINT; + bind.buffer = uManageInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindUBigInt(ulong val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] uManageBigIntByteArr = BitConverter.GetBytes(val); + int usmallSize = Marshal.SizeOf(uManageBigIntByteArr[0]) * uManageBigIntByteArr.Length; + IntPtr uManageBigInt = Marshal.AllocHGlobal(usmallSize); + Marshal.Copy(uManageBigIntByteArr, 0, uManageBigInt, uManageBigIntByteArr.Length); + + int length = sizeof(ulong); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UBIGINT; + bind.buffer = uManageBigInt; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindFloat(float val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] floatByteArr = BitConverter.GetBytes(val); + int floatByteArrSize = Marshal.SizeOf(floatByteArr[0]) * floatByteArr.Length; + IntPtr uManageFloat = Marshal.AllocHGlobal(floatByteArrSize); + Marshal.Copy(floatByteArr, 0, uManageFloat, floatByteArr.Length); + + int length = sizeof(float); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_FLOAT; + bind.buffer = uManageFloat; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindDouble(Double val) + { + TAOS_BIND bind = new TAOS_BIND(); + + byte[] doubleByteArr = BitConverter.GetBytes(val); + int doubleByteArrSize = Marshal.SizeOf(doubleByteArr[0]) * doubleByteArr.Length; + IntPtr uManageDouble = Marshal.AllocHGlobal(doubleByteArrSize); + Marshal.Copy(doubleByteArr, 0, uManageDouble, doubleByteArr.Length); + + int length = sizeof(Double); + IntPtr lengPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_DOUBLE; + bind.buffer = uManageDouble; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindBinary(String val) + { + + TAOS_BIND bind = new TAOS_BIND(); + IntPtr umanageBinary = Marshal.StringToHGlobalAnsi(val); + + int leng = val.Length; + IntPtr lenPtr = Marshal.AllocHGlobal(sizeof(ulong)); + Marshal.WriteInt64(lenPtr, leng); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BINARY; + bind.buffer = umanageBinary; + bind.buffer_length = leng; + bind.length = lenPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + public static TAOS_BIND BindNchar(String val) + { + TAOS_BIND bind = new TAOS_BIND(); + IntPtr umanageNchar = (IntPtr)Marshal.StringToHGlobalAnsi(val); + + int leng = val.Length; + IntPtr lenPtr = Marshal.AllocHGlobal(sizeof(ulong)); + Marshal.WriteInt64(lenPtr, leng); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_NCHAR; + bind.buffer = umanageNchar; + bind.buffer_length = leng; + bind.length = lenPtr; + bind.is_null = IntPtr.Zero; + + return bind; + } + + public static TAOS_BIND BindNil() + { + TAOS_BIND bind = new TAOS_BIND(); + + int isNull = 1;//IntPtr.Size; + IntPtr lenPtr = Marshal.AllocHGlobal(sizeof(int)); + Marshal.WriteInt32(lenPtr, isNull); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_NULL; + bind.is_null = lenPtr; + return bind; + } + + public static TAOS_BIND BindTimestamp(long ts) + { + + TAOS_BIND bind = new TAOS_BIND(); + IntPtr uManageTs = Marshal.AllocHGlobal(sizeof(long)); + Marshal.WriteInt64(uManageTs, ts); + + int length = sizeof(long); + IntPtr lengPtr = Marshal.AllocHGlobal(4); + Marshal.WriteInt32(lengPtr, length); + + bind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP; + bind.buffer = uManageTs; + bind.buffer_length = length; + bind.length = lengPtr; + bind.is_null = IntPtr.Zero; + + return bind; + + } + + public static void FreeTaosBind(TAOS_BIND[] binds) + { + foreach (TAOS_BIND bind in binds) + { + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + if (bind.is_null != IntPtr.Zero) + { + // Console.WriteLine(bind.is_null); + Marshal.FreeHGlobal(bind.is_null); + } + + } + } + } + +} \ No newline at end of file diff --git a/src/connector/C#/src/TDengineDriver/TaosMultiBind.cs b/src/connector/C#/src/TDengineDriver/TaosMultiBind.cs new file mode 100644 index 0000000000000000000000000000000000000000..e01558caeb0905826c77fe97ee6d7147ff8b923e --- /dev/null +++ b/src/connector/C#/src/TDengineDriver/TaosMultiBind.cs @@ -0,0 +1,616 @@ +using System; +using System.Text; +using System.Runtime.InteropServices; + + +namespace TDengineDriver +{ + public class TaosMultiBind + { + public static TAOS_MULTI_BIND MultiBindBool(bool?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + //the size of buffer array element + int typeSize = sizeof(bool); + //size of int + int intSize = sizeof(int); + int byteSize = sizeof(byte); + + //TAOS_MULTI_BIND.buffer + IntPtr unmanagedBoolArr = Marshal.AllocHGlobal(elementCount * typeSize); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(unmanagedBoolArr, typeSize * i, Convert.ToByte(arr[i] ?? false)); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BOOL; + multiBind.buffer = unmanagedBoolArr; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindTinyInt(sbyte?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + //the size of buffer array element + int typeSize = sizeof(byte); + int byteSize = sizeof(byte); + //size of int + int intSize = sizeof(int); + + //TAOS_MULTI_BIND.buffer + IntPtr unmanagedTintIntArr = Marshal.AllocHGlobal(elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(intSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + Byte[] toByteArr = BitConverter.GetBytes(arr[i] ?? sbyte.MinValue); + + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(unmanagedTintIntArr, typeSize * i, toByteArr[0]); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_TINYINT; + multiBind.buffer = unmanagedTintIntArr; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindSmallInt(short?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + //the size of buffer array element + int typeSize = sizeof(short); + //size of int + int intSize = sizeof(int); + int byteSize = sizeof(byte); + + //TAOS_MULTI_BIND.buffer + IntPtr unmanagedSmallIntArr = Marshal.AllocHGlobal(elementCount * typeSize); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteInt16(unmanagedSmallIntArr, typeSize * i, arr[i] ?? short.MinValue); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + + } + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_SMALLINT; + multiBind.buffer = unmanagedSmallIntArr; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindInt(int?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(int); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + + //TAOS_MULTI_BIND.buffer + IntPtr intBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteInt32(intBuff, typeSize * i, arr[i] ?? int.MinValue); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + + } + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_INT; + multiBind.buffer = intBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindBigint(long?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(long); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + + //TAOS_MULTI_BIND.buffer + IntPtr intBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteInt64(intBuff, typeSize * i, arr[i] ?? long.MinValue); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + + + } + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BIGINT; + multiBind.buffer = intBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindFloat(float?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(float); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + float[] arrTmp = new float[elementCount]; + + //TAOS_MULTI_BIND.buffer + IntPtr floatBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + arrTmp[i] = arr[i] ?? float.MinValue; + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + //set TAOS_MULTI_BIND.buffer + Marshal.Copy(arrTmp, 0, floatBuff, elementCount); + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_FLOAT; + multiBind.buffer = floatBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindDouble(double?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(double); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + double[] arrTmp = new double[elementCount]; + + //TAOS_MULTI_BIND.buffer + IntPtr doubleBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + arrTmp[i] = arr[i] ?? double.MinValue; + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + //set TAOS_MULTI_BIND.buffer + Marshal.Copy(arrTmp, 0, doubleBuff, elementCount); + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_DOUBLE; + multiBind.buffer = doubleBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindUTinyInt(byte?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(byte); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + + //TAOS_MULTI_BIND.buffer + IntPtr uTinyIntBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(uTinyIntBuff, typeSize * i, arr[i] ?? byte.MaxValue); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UTINYINT; + multiBind.buffer = uTinyIntBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindUSmallInt(ushort?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(ushort); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + + //TAOS_MULTI_BIND.buffer + IntPtr uSmallIntBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + byte[] byteArr = BitConverter.GetBytes(arr[i] ?? ushort.MaxValue); + for (int j = 0; j < byteArr.Length; j++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(uSmallIntBuff, typeSize * i + j * byteSize, byteArr[j]); + } + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_USMALLINT; + multiBind.buffer = uSmallIntBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindUInt(uint?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(uint); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + + //TAOS_MULTI_BIND.buffer + IntPtr uIntBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + byte[] byteArr = BitConverter.GetBytes(arr[i] ?? uint.MaxValue); + for (int j = 0; j < byteArr.Length; j++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(uIntBuff, typeSize * i + j * byteSize, byteArr[j]); + } + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UINT; + multiBind.buffer = uIntBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindUBigInt(ulong?[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(ulong); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //used to replace null + + //TAOS_MULTI_BIND.buffer + IntPtr uBigIntBuff = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + + for (int i = 0; i < elementCount; i++) + { + byte[] byteArr = BitConverter.GetBytes(arr[i] ?? ulong.MaxValue); + for (int j = 0; j < byteArr.Length; j++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteByte(uBigIntBuff, typeSize * i + j * byteSize, byteArr[j]); + } + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(arr[i].Equals(null) ? 1 : 0)); + } + + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_UBIGINT; + multiBind.buffer = uBigIntBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + public static TAOS_MULTI_BIND MultiBindBinary(string[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = MaxElementLength(arr); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + StringBuilder arrStrBuilder = new StringBuilder(); ; + + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + int itemLength = 0; + // if element if not null and element length is less then typeSize + // fill the memory with default char.Since arr element memory need align. + if (!String.IsNullOrEmpty(arr[i]) && typeSize <= arr[i].Length) + { + itemLength = arr[i].Length; + arrStrBuilder.Append(arr[i]); + } + else if (!String.IsNullOrEmpty(arr[i]) && typeSize > arr[i].Length) + { + itemLength = arr[i].Length; + arrStrBuilder.Append(arr[i]); + arrStrBuilder.Append(AlignCharArr(typeSize - arr[i].Length)); + } + else + { + // if is null value,fill the memory with default values. + itemLength = 0; + arrStrBuilder.Append(AlignCharArr(typeSize)); + } + + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, itemLength); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(String.IsNullOrEmpty(arr[i]) ? 1 : 0)); + } + //set TAOS_MULTI_BIND.buffer + IntPtr uBinaryBuff = (IntPtr)Marshal.StringToHGlobalAnsi(arrStrBuilder.ToString()); + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BINARY; + multiBind.buffer = uBinaryBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + + public static TAOS_MULTI_BIND MultiBindNchar(string[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = MaxElementLength(arr); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + StringBuilder arrStrBuilder = new StringBuilder(); ; + + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + int itemLength = 0; + // if element if not null and element length is less then typeSize + // fill the memory with default char.Since arr element memory need align. + if (!String.IsNullOrEmpty(arr[i]) && typeSize <= arr[i].Length) + { + itemLength = arr[i].Length; + arrStrBuilder.Append(arr[i]); + } + else if (!String.IsNullOrEmpty(arr[i]) && typeSize > arr[i].Length) + { + itemLength = arr[i].Length; + arrStrBuilder.Append(arr[i]); + arrStrBuilder.Append(AlignCharArr(typeSize - arr[i].Length)); + } + else + { + // if is null value,fill the memory with default values. + itemLength = 0; + arrStrBuilder.Append(AlignCharArr(typeSize)); + } + + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, itemLength); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(String.IsNullOrEmpty(arr[i]) ? 1 : 0)); + } + //set TAOS_MULTI_BIND.buffer + IntPtr uNcharBuff = (IntPtr)Marshal.StringToHGlobalAnsi(arrStrBuilder.ToString()); + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_NCHAR; + multiBind.buffer = uNcharBuff; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + + public static TAOS_MULTI_BIND MultiBindTimestamp(long[] arr) + { + TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND(); + int elementCount = arr.Length; + int typeSize = sizeof(long); + int intSize = sizeof(int); + int byteSize = sizeof(byte); + //TAOS_MULTI_BIND.buffer + IntPtr unmanagedTsArr = Marshal.AllocHGlobal(typeSize * elementCount); + //TAOS_MULTI_BIND.length + IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount); + //TAOS_MULTI_BIND.is_null + IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount); + + for (int i = 0; i < elementCount; i++) + { + //set TAOS_MULTI_BIND.buffer + Marshal.WriteInt64(unmanagedTsArr, typeSize * i, arr[i]); + //set TAOS_MULTI_BIND.length + Marshal.WriteInt32(lengthArr, intSize * i, typeSize); + //set TAOS_MULTI_BIND.is_null + Marshal.WriteByte(nullArr, byteSize * i, 0); + } + + //config TAOS_MULTI_BIND + multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP; + multiBind.buffer = unmanagedTsArr; + multiBind.buffer_length = (ulong)typeSize; + multiBind.length = lengthArr; + multiBind.is_null = nullArr; + multiBind.num = elementCount; + + return multiBind; + } + + public static void FreeTaosBind(TAOS_MULTI_BIND[] mBinds) + { + foreach (TAOS_MULTI_BIND bind in mBinds) + { + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + Marshal.FreeHGlobal(bind.is_null); + } + } + + private static char[] AlignCharArr(int offSet) + { + char[] alignChar = new char[offSet]; + for (int i = 0; i < offSet; i++) + { + alignChar[i] = char.MinValue; + } + return alignChar; + } + + private static int MaxElementLength(String[] strArr) + { + int max = 0; + for (int i = 0; i < strArr.Length; i++) + { + if (!String.IsNullOrEmpty(strArr[i]) && max < strArr[i].Length) + { + max = strArr[i].Length; + } + } + return max; + } + } + +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/Cases.csproj b/src/connector/C#/src/test/Cases/Cases.csproj new file mode 100644 index 0000000000000000000000000000000000000000..f2ae6938fb4b8c58d9bb657e5fb504814068e92e --- /dev/null +++ b/src/connector/C#/src/test/Cases/Cases.csproj @@ -0,0 +1,12 @@ + + + + + + + + Exe + net5.0 + + + diff --git a/src/connector/C#/src/test/Cases/DataSource.cs b/src/connector/C#/src/test/Cases/DataSource.cs new file mode 100644 index 0000000000000000000000000000000000000000..e422c70bf1d4b45a752984e3290fa8751d8ff41c --- /dev/null +++ b/src/connector/C#/src/test/Cases/DataSource.cs @@ -0,0 +1,103 @@ +using System; +using Test.UtilsTools; +using TDengineDriver; + +namespace Test.UtilsTools.DataSource +{ + public class DataSource + { + public static long[] tsArr = new long[5] { 1637064040000, 1637064041000, 1637064042000, 1637064043000, 1637064044000 }; + public static bool?[] boolArr = new bool?[5] { true, false, null, true, true }; + public static sbyte?[] tinyIntArr = new sbyte?[5] { -127, 0, null, 8, 127 }; + public static short?[] shortArr = new short?[5] { short.MinValue + 1, -200, null, 100, short.MaxValue }; + public static int?[] intArr = new int?[5] { -200, -100, null, 0, 300 }; + public static long?[] longArr = new long?[5] { long.MinValue + 1, -2000, null, 1000, long.MaxValue }; + public static float?[] floatArr = new float?[5] { float.MinValue + 1, -12.1F, null, 0F, float.MaxValue }; + public static double?[] doubleArr = new double?[5] { double.MinValue + 1, -19.112D, null, 0D, double.MaxValue }; + public static byte?[] uTinyIntArr = new byte?[5] { byte.MinValue, 12, null, 89, byte.MaxValue - 1 }; + public static ushort?[] uShortArr = new ushort?[5] { ushort.MinValue, 200, null, 400, ushort.MaxValue - 1 }; + public static uint?[] uIntArr = new uint?[5] { uint.MinValue, 100, null, 2, uint.MaxValue - 1 }; + public static ulong?[] uLongArr = new ulong?[5] { ulong.MinValue, 2000, null, 1000, long.MaxValue - 1 }; + public static string[] binaryArr = new string[5] { "1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", String.Empty, null, "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM", "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890~!@#$%^&*()_+=-`[]{}:,./<>?" }; + public static string[] ncharArr = new string[5] { "1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", null, "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM", "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", string.Empty }; + + + public static TAOS_BIND[] getTags() + { + TAOS_BIND[] binds = new TAOS_BIND[13]; + binds[0] = TaosBind.BindBool(true); + binds[1] = TaosBind.BindTinyInt(-2); + binds[2] = TaosBind.BindSmallInt(short.MaxValue); + binds[3] = TaosBind.BindInt(int.MaxValue); + binds[4] = TaosBind.BindBigInt(Int64.MaxValue); + binds[5] = TaosBind.BindUTinyInt(byte.MaxValue - 1); + binds[6] = TaosBind.BindUSmallInt(UInt16.MaxValue - 1); + binds[7] = TaosBind.BindUInt(uint.MinValue + 1); + binds[8] = TaosBind.BindUBigInt(UInt64.MinValue + 1); + binds[9] = TaosBind.BindFloat(11.11F); + binds[10] = TaosBind.BindDouble(22.22D); + binds[11] = TaosBind.BindBinary("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + binds[12] = TaosBind.BindNchar("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + return binds; + } + public static TAOS_BIND[] getNtableRow() + { + TAOS_BIND[] binds = new TAOS_BIND[15]; + binds[0] = TaosBind.BindTimestamp(1637064040000); + binds[1] = TaosBind.BindTinyInt(-2); + binds[2] = TaosBind.BindSmallInt(short.MaxValue); + binds[3] = TaosBind.BindInt(int.MaxValue); + binds[4] = TaosBind.BindBigInt(Int64.MaxValue); + binds[5] = TaosBind.BindUTinyInt(byte.MaxValue - 1); + binds[6] = TaosBind.BindUSmallInt(UInt16.MaxValue - 1); + binds[7] = TaosBind.BindUInt(uint.MinValue + 1); + binds[8] = TaosBind.BindUBigInt(UInt64.MinValue + 1); + binds[9] = TaosBind.BindFloat(11.11F); + binds[10] = TaosBind.BindDouble(22.22D); + binds[11] = TaosBind.BindBinary("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + binds[12] = TaosBind.BindNchar("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + binds[13] = TaosBind.BindBool(true); + binds[14] = TaosBind.BindNil(); + return binds; + } + + public static TAOS_MULTI_BIND[] GetMultiBindArr() + { + TAOS_MULTI_BIND[] mBinds = new TAOS_MULTI_BIND[14]; + mBinds[0] = TaosMultiBind.MultiBindTimestamp(tsArr); + mBinds[1] = TaosMultiBind.MultiBindBool(boolArr); + mBinds[2] = TaosMultiBind.MultiBindTinyInt(tinyIntArr); + mBinds[3] = TaosMultiBind.MultiBindSmallInt(shortArr); + mBinds[4] = TaosMultiBind.MultiBindInt(intArr); + mBinds[5] = TaosMultiBind.MultiBindBigint(longArr); + mBinds[6] = TaosMultiBind.MultiBindFloat(floatArr); + mBinds[7] = TaosMultiBind.MultiBindDouble(doubleArr); + mBinds[8] = TaosMultiBind.MultiBindUTinyInt(uTinyIntArr); + mBinds[9] = TaosMultiBind.MultiBindUSmallInt(uShortArr); + mBinds[10] = TaosMultiBind.MultiBindUInt(uIntArr); + mBinds[11] = TaosMultiBind.MultiBindUBigInt(uLongArr); + mBinds[12] = TaosMultiBind.MultiBindBinary(binaryArr); + mBinds[13] = TaosMultiBind.MultiBindNchar(ncharArr); + return mBinds; + } + public static TAOS_BIND[] GetQueryCondition() + { + TAOS_BIND[] queryCondition = new TAOS_BIND[2]; + queryCondition[0] = TaosBind.BindTinyInt(0); + queryCondition[1] = TaosBind.BindInt(1000); + return queryCondition; + + } + public static void FreeTaosBind(TAOS_BIND[] binds) + { + TaosBind.FreeTaosBind(binds); + } + + public static void FreeTaosMBind(TAOS_MULTI_BIND[] mbinds) + { + TaosMultiBind.FreeTaosBind(mbinds); + } + + + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/Program.cs b/src/connector/C#/src/test/Cases/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..3d3b765b5bfb80c61b85d974bcc240d7a234d75d --- /dev/null +++ b/src/connector/C#/src/test/Cases/Program.cs @@ -0,0 +1,59 @@ +using System; +using Test.UtilsTools; +using Cases; + +namespace Cases.EntryPoint +{ + class Program + { + + static void Main(string[] args) + { + IntPtr conn = IntPtr.Zero; + IntPtr stmt = IntPtr.Zero; + IntPtr res = IntPtr.Zero; + + conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); + UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); + UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650"); + UtilsTools.ExecuteQuery(conn, "use csharp"); + + Console.WriteLine("====================StableColumnByColumn==================="); + StableColumnByColumn columnByColumn = new StableColumnByColumn(); + columnByColumn.Test(conn, "stablecolumnbycolumn"); + Console.WriteLine("====================StmtStableQuery==================="); + StmtStableQuery stmtStableQuery = new StmtStableQuery(); + stmtStableQuery.Test(conn, "stablecolumnbycolumn"); + + Console.WriteLine("====================StableMutipleLine==================="); + StableMutipleLine mutipleLine = new StableMutipleLine(); + mutipleLine.Test(conn, "stablemutipleline"); + + //================================================================================ + + Console.WriteLine("====================NtableSingleLine==================="); + NtableSingleLine ntableSingleLine = new NtableSingleLine(); + ntableSingleLine.Test(conn, "stablesingleline"); + + Console.WriteLine("====================NtableMutipleLine==================="); + NtableMutipleLine ntableMutipleLine = new NtableMutipleLine(); + ntableMutipleLine.Test(conn, "ntablemutipleline"); + Console.WriteLine("====================StmtNtableQuery==================="); + StmtNtableQuery stmtNtableQuery = new StmtNtableQuery(); + stmtNtableQuery.Test(conn, "ntablemutipleline"); + + Console.WriteLine("====================NtableColumnByColumn==================="); + NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn(); + ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn"); + + Console.WriteLine("====================fetchfeilds==================="); + FetchFields fetchFields = new FetchFields(); + fetchFields.Test(conn, "fetchfeilds"); + + UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); + UtilsTools.CloseConnection(conn); + UtilsTools.ExitProgram(); + + } + } +} diff --git a/src/connector/C#/src/test/Cases/StmtNormalTable.cs b/src/connector/C#/src/test/Cases/StmtNormalTable.cs new file mode 100644 index 0000000000000000000000000000000000000000..a918f6bada153bc64d0c31d10597526503d696f8 --- /dev/null +++ b/src/connector/C#/src/test/Cases/StmtNormalTable.cs @@ -0,0 +1,89 @@ +using System; +using Test.UtilsTools; +using TDengineDriver; +using Test.UtilsTools.DataSource; + +namespace Cases +{ + public class NtableSingleLine + { + + public void Test(IntPtr conn, string tableName) + { + String createTb = "create table " + tableName + "(ts timestamp,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200),bo bool,nullVal int);"; + String insertSql = "insert into ? values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + TAOS_BIND[] valuesRow = DataSource.getNtableRow(); + UtilsTools.ExecuteQuery(conn, createTb); + + IntPtr stmt = StmtUtilTools.StmtInit(conn); + StmtUtilTools.StmtPrepare(stmt, insertSql); + StmtUtilTools.SetTableName(stmt, tableName); + StmtUtilTools.BindParam(stmt, valuesRow); + StmtUtilTools.AddBatch(stmt); + StmtUtilTools.StmtExecute(stmt); + StmtUtilTools.StmtClose(stmt); + DataSource.FreeTaosBind(valuesRow); + } + } + + public class NtableMutipleLine + { + TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr(); + public void Test(IntPtr conn, string tableName) + { + String createTb = "create table " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200));"; + String insertSql = "insert into ? values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + UtilsTools.ExecuteQuery(conn, createTb); + String[] loadList = { tableName }; + IntPtr stmt = StmtUtilTools.StmtInit(conn); + StmtUtilTools.loadTableInfo(conn, loadList); + StmtUtilTools.StmtPrepare(stmt, insertSql); + StmtUtilTools.SetTableName(stmt, tableName); + StmtUtilTools.BindParamBatch(stmt, mbind); + StmtUtilTools.AddBatch(stmt); + StmtUtilTools.StmtExecute(stmt); + StmtUtilTools.StmtClose(stmt); + DataSource.FreeTaosMBind(mbind); + } + } + public class NtableColumnByColumn + { + DataSource data = new DataSource(); + TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr(); + public void Test(IntPtr conn, string tableName) + { + String createTb = "create table " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200));"; + String insertSql = "insert into ? values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + + UtilsTools.ExecuteQuery(conn, createTb); + IntPtr stmt = StmtUtilTools.StmtInit(conn); + + StmtUtilTools.StmtPrepare(stmt, insertSql); + + StmtUtilTools.SetTableName(stmt, tableName); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[0], 0); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[1], 1); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[2], 2); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[3], 3); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[4], 4); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[5], 5); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[6], 6); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[7], 7); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[8], 8); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[9], 9); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[10], 10); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[11], 11); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[12], 12); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[13], 13); + + StmtUtilTools.AddBatch(stmt); + StmtUtilTools.StmtExecute(stmt); + StmtUtilTools.StmtClose(stmt); + + DataSource.FreeTaosMBind(mbind); + + } + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/StmtQuery.cs b/src/connector/C#/src/test/Cases/StmtQuery.cs new file mode 100644 index 0000000000000000000000000000000000000000..7909376ad8d2166acc51e9f683bfbbac1dcd652e --- /dev/null +++ b/src/connector/C#/src/test/Cases/StmtQuery.cs @@ -0,0 +1,49 @@ +using System; +using Test.UtilsTools; +using TDengineDriver; +using Test.UtilsTools.DataSource; + +namespace Cases +{ + public class StmtStableQuery + { + public void Test(IntPtr conn, string tableName) + { + string selectSql = "SELECT * FROM " + tableName + " WHERE v1 > ? AND v4 < ?"; + TAOS_BIND[] queryCondition = DataSource.GetQueryCondition(); + + IntPtr stmt = StmtUtilTools.StmtInit(conn); + StmtUtilTools.StmtPrepare(stmt, selectSql); + + StmtUtilTools.BindParam(stmt, queryCondition); + StmtUtilTools.StmtExecute(stmt); + IntPtr res = StmtUtilTools.StmtUseResult(stmt); + UtilsTools.DisplayRes(res); + + StmtUtilTools.StmtClose(stmt); + DataSource.FreeTaosBind(queryCondition); + + } + } + + public class StmtNtableQuery + { + public void Test(IntPtr conn, string tableName) + { + string selectSql = "SELECT * FROM " + tableName + " WHERE v1 > ? AND v4 < ?"; + TAOS_BIND[] queryCondition = DataSource.GetQueryCondition(); + + IntPtr stmt = StmtUtilTools.StmtInit(conn); + StmtUtilTools.StmtPrepare(stmt, selectSql); + + StmtUtilTools.BindParam(stmt, queryCondition); + StmtUtilTools.StmtExecute(stmt); + IntPtr res = StmtUtilTools.StmtUseResult(stmt); + UtilsTools.DisplayRes(res); + + StmtUtilTools.StmtClose(stmt); + DataSource.FreeTaosBind(queryCondition); + + } + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/StmtStable.cs b/src/connector/C#/src/test/Cases/StmtStable.cs new file mode 100644 index 0000000000000000000000000000000000000000..f6024909d04b2a239f0b49ba5bba65eba3d2a718 --- /dev/null +++ b/src/connector/C#/src/test/Cases/StmtStable.cs @@ -0,0 +1,76 @@ +using System; +using Test.UtilsTools; +using TDengineDriver; +using Test.UtilsTools.DataSource; + +namespace Cases +{ + + public class StableMutipleLine + { + TAOS_BIND[] tags = DataSource.getTags(); + TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr(); + public void Test(IntPtr conn, string tableName) + { + String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));"; + String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + UtilsTools.ExecuteQuery(conn, createTb); + String[] loadList = { tableName }; + IntPtr stmt = StmtUtilTools.StmtInit(conn); + StmtUtilTools.loadTableInfo(conn, loadList); + + StmtUtilTools.StmtPrepare(stmt, insertSql); + StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags); + StmtUtilTools.BindParamBatch(stmt, mbind); + StmtUtilTools.AddBatch(stmt); + StmtUtilTools.StmtExecute(stmt); + + StmtUtilTools.StmtClose(stmt); + DataSource.FreeTaosBind(tags); + DataSource.FreeTaosMBind(mbind); + } + } + public class StableColumnByColumn + { + DataSource data = new DataSource(); + + TAOS_BIND[] tags = DataSource.getTags(); + TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr(); + public void Test(IntPtr conn, string tableName) + { + String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));"; + String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + + UtilsTools.ExecuteQuery(conn, createTb); + IntPtr stmt = StmtUtilTools.StmtInit(conn); + + StmtUtilTools.StmtPrepare(stmt, insertSql); + + StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[0], 0); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[1], 1); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[2], 2); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[3], 3); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[4], 4); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[5], 5); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[6], 6); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[7], 7); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[8], 8); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[9], 9); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[10], 10); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[11], 11); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[12], 12); + StmtUtilTools.BindSingleParamBatch(stmt, mbind[13], 13); + StmtUtilTools.AddBatch(stmt); + StmtUtilTools.StmtExecute(stmt); + StmtUtilTools.StmtClose(stmt); + + DataSource.FreeTaosBind(tags); + DataSource.FreeTaosMBind(mbind); + + } + } + +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/StmtUtil.cs b/src/connector/C#/src/test/Cases/StmtUtil.cs new file mode 100644 index 0000000000000000000000000000000000000000..8f68157e095fd3aa3e47bb5aa19dc08d42be4628 --- /dev/null +++ b/src/connector/C#/src/test/Cases/StmtUtil.cs @@ -0,0 +1,198 @@ +using System; +using TDengineDriver; +using System.Runtime.InteropServices; + +namespace Test.UtilsTools +{ + public class StmtUtilTools + { + public static IntPtr StmtInit(IntPtr conn) + { + IntPtr stmt = TDengine.StmtInit(conn); + if (stmt == IntPtr.Zero) + { + Console.WriteLine("Init stmt failed"); + UtilsTools.CloseConnection(conn); + UtilsTools.ExitProgram(); + } + else + { + Console.WriteLine("Init stmt success"); + } + return stmt; + } + + public static void StmtPrepare(IntPtr stmt, string sql) + { + int res = TDengine.StmtPrepare(stmt, sql); + if (res == 0) + { + Console.WriteLine("stmt prepare success"); + } + else + { + Console.WriteLine("stmt prepare failed " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void SetTableName(IntPtr stmt, String tableName) + { + int res = TDengine.StmtSetTbname(stmt, tableName); + if (res == 0) + { + Console.WriteLine("set_tbname success"); + } + else + { + Console.Write("set_tbname failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void SetTableNameTags(IntPtr stmt, String tableName, TAOS_BIND[] tags) + { + int res = TDengine.StmtSetTbnameTags(stmt, tableName, tags); + if (res == 0) + { + Console.WriteLine("set tbname && tags success"); + + } + else + { + Console.Write("set tbname && tags failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void SetSubTableName(IntPtr stmt, string name) + { + int res = TDengine.StmtSetSubTbname(stmt, name); + if (res == 0) + { + Console.WriteLine("set subtable name success"); + } + else + { + Console.Write("set subtable name failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + + } + + public static void BindParam(IntPtr stmt, TAOS_BIND[] binds) + { + int res = TDengine.StmtBindParam(stmt, binds); + if (res == 0) + { + Console.WriteLine("bind para success"); + } + else + { + Console.Write("bind para failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void BindSingleParamBatch(IntPtr stmt, TAOS_MULTI_BIND bind, int index) + { + int res = TDengine.StmtBindSingleParamBatch(stmt, ref bind, index); + if (res == 0) + { + Console.WriteLine("single bind batch success"); + } + else + { + Console.Write("single bind batch failed: " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void BindParamBatch(IntPtr stmt, TAOS_MULTI_BIND[] bind) + { + int res = TDengine.StmtBindParamBatch(stmt, bind); + if (res == 0) + { + Console.WriteLine("bind parameter batch success"); + } + else + { + Console.WriteLine("bind parameter batch failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static void AddBatch(IntPtr stmt) + { + int res = TDengine.StmtAddBatch(stmt); + if (res == 0) + { + Console.WriteLine("stmt add batch success"); + } + else + { + Console.Write("stmt add batch failed,reason: " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + public static void StmtExecute(IntPtr stmt) + { + int res = TDengine.StmtExecute(stmt); + if (res == 0) + { + Console.WriteLine("Execute stmt success"); + } + else + { + Console.Write("Execute stmt failed,reason: " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + public static void StmtClose(IntPtr stmt) + { + int res = TDengine.StmtClose(stmt); + if (res == 0) + { + Console.WriteLine("close stmt success"); + } + else + { + Console.WriteLine("close stmt failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(stmt); + } + } + + public static IntPtr StmtUseResult(IntPtr stmt) + { + IntPtr res = TDengine.StmtUseResult(stmt); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + StmtClose(stmt); + } + else + { + Console.WriteLine("StmtUseResult success"); + + } + return res; + } + + public static void loadTableInfo(IntPtr conn, string[] arr) + { + if (TDengine.LoadTableInfo(conn, arr) == 0) + { + Console.WriteLine("load table info success"); + } + else + { + Console.WriteLine("load table info failed"); + } + } + + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/TaosFeild.cs b/src/connector/C#/src/test/Cases/TaosFeild.cs new file mode 100644 index 0000000000000000000000000000000000000000..b8c6d37ef5e3f14640d5e87504148c4ea7748e23 --- /dev/null +++ b/src/connector/C#/src/test/Cases/TaosFeild.cs @@ -0,0 +1,37 @@ +using System; +using Test.UtilsTools; +using TDengineDriver; +using System.Collections.Generic; +using System.Runtime.InteropServices; +namespace Cases +{ + public class FetchFields + { + public void Test(IntPtr conn, string tableName) + { + IntPtr res = IntPtr.Zero; + String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(id int);"; + String insertSql = "insert into " + tableName + "_t1 using " + tableName + " tags(1) values(1637064040000,true,1,2,3,4,5,6,7,8,9,10,'XI','XII')"; + String selectSql = "select * from " + tableName; + String dropSql = "drop table " + tableName; + UtilsTools.ExecuteQuery(conn, createTb); + UtilsTools.ExecuteQuery(conn, insertSql); + res = UtilsTools.ExecuteQuery(conn, selectSql); + UtilsTools.ExecuteQuery(conn, dropSql); + + List metas = new List(); + metas = TDengine.FetchFields(res); + if (metas.Capacity == 0) + { + Console.WriteLine("empty result"); + } + else + { + foreach(TDengineMeta meta in metas){ + Console.WriteLine("col_name:{0},col_type_code:{1},col_type:{2}({3})",meta.name,meta.type,meta.TypeName(),meta.size); + } + } + + } + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/Cases/Utils.cs b/src/connector/C#/src/test/Cases/Utils.cs new file mode 100644 index 0000000000000000000000000000000000000000..1c2ab137930384c0a85ae2103ff6be0893d7733b --- /dev/null +++ b/src/connector/C#/src/test/Cases/Utils.cs @@ -0,0 +1,167 @@ +using System; +using TDengineDriver; +using System.Runtime.InteropServices; +using System.Text; +using System.Collections.Generic; + +namespace Test.UtilsTools +{ + public class UtilsTools + { + + static string configDir = "C:/TDengine/cfg"; + + public static IntPtr TDConnection(string ip, string user, string password, string db, short port) + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + return TDengine.Connect(ip, user, password, db, port); + } + + public static IntPtr ExecuteQuery(IntPtr conn, String sql) + { + IntPtr res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + + } + return res; + } + public static void DisplayRes(IntPtr res) + { + long queryRows = 0; + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + TDengine.FreeResult(res); Console.WriteLine(""); + } + + public static void CloseConnection(IntPtr conn) + { + if (conn != IntPtr.Zero) + { + if (TDengine.Close(conn) == 0) + { + Console.WriteLine("close connection sucess"); + } + else + { + Console.WriteLine("close Connection failed"); + } + } + } + public static List getField(IntPtr res) + { + List metas = TDengine.FetchFields(res); + return metas; + } + public static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + } +} diff --git a/src/connector/C#/src/test/XUnitTest/TestTDengineMeta.cs b/src/connector/C#/src/test/XUnitTest/TestTDengineMeta.cs new file mode 100644 index 0000000000000000000000000000000000000000..fcf86c994e9097168786c1803901866918806098 --- /dev/null +++ b/src/connector/C#/src/test/XUnitTest/TestTDengineMeta.cs @@ -0,0 +1,177 @@ +using System; +using Xunit; +using TDengineDriver; + +namespace TDengineDriver.Test +{ + public class TestTDengineMeta + { + [Fact] + public void TestTypeNameBool() + { + string typeName = "BOOL"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 1; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + + [Fact] + public void TestTypeNameTINYINT() + { + string typeName = "TINYINT"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 2; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameSMALLINT() + { + string typeName = "SMALLINT"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 3; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameINT() + { + string typeName = "INT"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 4; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameBIGINT() + { + string typeName = "BIGINT"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 5; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameUTINYINT() + { + string typeName = "TINYINT UNSIGNED"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 11; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameUSMALLINT() + { + string typeName = "SMALLINT UNSIGNED"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 12; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameUINT() + { + string typeName = "INT UNSIGNED"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 13; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameUBIGINT() + { + string typeName = "BIGINT UNSIGNED"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 14; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + + [Fact] + public void TestTypeNameFLOAT() + { + string typeName = "FLOAT"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 6; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameDOUBLE() + { + string typeName = "DOUBLE"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 7; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameSTRING() + { + string typeName = "STRING"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 8; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameTIMESTAMP() + { + string typeName = "TIMESTAMP"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 9; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameNCHAR() + { + string typeName = "NCHAR"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + meta.type = 10; + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + [Fact] + public void TestTypeNameUndefined() + { + string typeName = "undefine"; + TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta(); + + string metaTypeName = meta.TypeName(); + + Assert.Equal(metaTypeName, typeName); + + } + } +} diff --git a/src/connector/C#/src/test/XUnitTest/TestTaosBind.cs b/src/connector/C#/src/test/XUnitTest/TestTaosBind.cs new file mode 100644 index 0000000000000000000000000000000000000000..208bdcc02cf84db4af149ddc314d67db7b92b848 --- /dev/null +++ b/src/connector/C#/src/test/XUnitTest/TestTaosBind.cs @@ -0,0 +1,863 @@ +using System; +using Xunit; +using TDengineDriver; +using System.Runtime.InteropServices; + +namespace TDengineDriver.Test +{ + public class TestTaosBind + { + [Fact] + public void TestBindBoolTrue() + { + int bufferType = 1; + bool buffer = true; + int bufferLength = sizeof(bool); + int length = sizeof(bool); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBool(true); + int BindLengPtr = Marshal.ReadInt32(bind.length); + bool bindBuffer = Convert.ToBoolean(Marshal.ReadByte(bind.buffer)); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + + } + + [Fact] + public void TestBindBoolFalse() + { + int bufferType = 1; + bool buffer = false; + int bufferLength = sizeof(bool); + int length = sizeof(bool); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBool(false); + int BindLengPtr = Marshal.ReadInt32(bind.length); + bool bindBuffer = Convert.ToBoolean(Marshal.ReadByte(bind.buffer)); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + + } + + [Fact] + public void TestBindTinyIntZero() + { + + int bufferType = 2; + sbyte buffer = 0; + int bufferLength = sizeof(sbyte); + int length = sizeof(sbyte); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTinyInt(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + sbyte bindBuffer = Convert.ToSByte(Marshal.ReadByte(bind.buffer)); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindTinyIntPositive() + { + + int bufferType = 2; + sbyte buffer = sbyte.MaxValue; + int bufferLength = sizeof(sbyte); + int length = sizeof(sbyte); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTinyInt(sbyte.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + sbyte bindBuffer = Convert.ToSByte(Marshal.ReadByte(bind.buffer)); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindTinyIntNegative() + { + + int bufferType = 2; + short buffer = sbyte.MinValue; + int bufferLength = sizeof(sbyte); + int length = sizeof(sbyte); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTinyInt(sbyte.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + short bindBuffer = Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindSmallIntNegative() + { + + int bufferType = 3; + short buffer = short.MinValue; + int bufferLength = sizeof(short); + int length = sizeof(short); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindSmallInt(short.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + short bindBuffer = Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindSmallIntZero() + { + + int bufferType = 3; + short buffer = 0; + int bufferLength = sizeof(short); + int length = sizeof(short); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindSmallInt(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + short bindBuffer = Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindSmallIntPositive() + { + + int bufferType = 3; + short buffer = short.MaxValue; + int bufferLength = sizeof(short); + int length = sizeof(short); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindSmallInt(short.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + short bindBuffer = Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindIntNegative() + { + + int bufferType = 4; + int buffer = int.MinValue; + int bufferLength = sizeof(int); + int length = sizeof(int); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindInt(int.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + int bindBuffer = Marshal.ReadInt32(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindIntZero() + { + + int bufferType = 4; + int buffer = 0; + int bufferLength = sizeof(int); + int length = sizeof(int); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindInt(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + int bindBuffer = Marshal.ReadInt32(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindIntPositive() + { + + int bufferType = 4; + int buffer = int.MaxValue; + int bufferLength = sizeof(int); + int length = sizeof(int); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindInt(int.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + int bindBuffer = Marshal.ReadInt32(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindBigIntNegative() + { + + int bufferType = 5; + long buffer = long.MinValue; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBigInt(long.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindBigIntZero() + { + + int bufferType = 5; + long buffer = 0; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBigInt(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindBigIntPositive() + { + + int bufferType = 5; + long buffer = long.MaxValue; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBigInt(long.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUTinyZero() + { + + + int bufferType = 11; + byte buffer = 0; + int bufferLength = sizeof(sbyte); + int length = sizeof(sbyte); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUTinyInt(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + byte bindBuffer = Marshal.ReadByte(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUTinyPositive() + { + + + int bufferType = 11; + byte buffer = byte.MaxValue; + int bufferLength = sizeof(sbyte); + int length = sizeof(sbyte); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUTinyInt(byte.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + byte bindBuffer = Marshal.ReadByte(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUSmallIntZero() + { + + int bufferType = 12; + ushort buffer = ushort.MinValue; + int bufferLength = sizeof(ushort); + int length = sizeof(ushort); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUSmallInt(ushort.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + ushort bindBuffer = (ushort)Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindUSmallIntPositive() + { + + int bufferType = 12; + ushort buffer = ushort.MaxValue; + int bufferLength = sizeof(ushort); + int length = sizeof(ushort); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUSmallInt(ushort.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + ushort bindBuffer = (ushort)Marshal.ReadInt16(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUIntZero() + { + int bufferType = 13; + uint buffer = uint.MinValue; + int bufferLength = sizeof(uint); + int length = sizeof(uint); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUInt(uint.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + uint bindBuffer = (uint)Marshal.ReadInt32(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUIntPositive() + { + int bufferType = 13; + uint buffer = uint.MaxValue; + int bufferLength = sizeof(uint); + int length = sizeof(uint); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUInt(uint.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + uint bindBuffer = (uint)Marshal.ReadInt32(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUBigIntZero() + { + int bufferType = 14; + ulong buffer = ulong.MinValue; + int bufferLength = sizeof(ulong); + int length = sizeof(ulong); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUBigInt(ulong.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + ulong bindBuffer = (ulong)Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindUBigIntPositive() + { + int bufferType = 14; + ulong buffer = ulong.MaxValue; + int bufferLength = sizeof(ulong); + int length = sizeof(ulong); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindUBigInt(ulong.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + ulong bindBuffer = (ulong)Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindFloatNegative() + { + int bufferType = 6; + float buffer = float.MinValue; + int bufferLength = sizeof(float); + int length = sizeof(float); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindFloat(float.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + float[] bindBufferArr = new float[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindFloatZero() + { + int bufferType = 6; + float buffer = 0; + int bufferLength = sizeof(float); + int length = sizeof(float); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindFloat(0F); + int BindLengPtr = Marshal.ReadInt32(bind.length); + float[] bindBufferArr = new float[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindFloatPositive() + { + int bufferType = 6; + float buffer = float.MaxValue; + int bufferLength = sizeof(float); + int length = sizeof(float); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindFloat(float.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + float[] bindBufferArr = new float[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindDoubleZero() + { + int bufferType = 7; + double buffer = 0; + int bufferLength = sizeof(double); + int length = sizeof(double); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindDouble(0D); + int BindLengPtr = Marshal.ReadInt32(bind.length); + double[] bindBufferArr = new double[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindDoublePositive() + { + int bufferType = 7; + double buffer = double.MaxValue; + int bufferLength = sizeof(double); + int length = sizeof(double); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindDouble(double.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + double[] bindBufferArr = new double[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindDoubleNegative() + { + int bufferType = 7; + double buffer = double.MinValue; + int bufferLength = sizeof(double); + int length = sizeof(double); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindDouble(double.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + double[] bindBufferArr = new double[1]; + Marshal.Copy(bind.buffer, bindBufferArr, 0, bindBufferArr.Length); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBufferArr[0], buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindBinaryEn() + { + int bufferType = 8; + String buffer = "qwertyuiopasdghjklzxcvbnm<>?:\"{}+_)(*&^%$#@!~QWERTYUIOP[]\\ASDFGHJKL;'ZXCVBNM,./`1234567890-="; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBinary("qwertyuiopasdghjklzxcvbnm<>?:\"{}+_)(*&^%$#@!~QWERTYUIOP[]\\ASDFGHJKL;'ZXCVBNM,./`1234567890-="); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindBinaryCn() + { + int bufferType = 8; + String buffer = "一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./"; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBinary("一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./"); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindBinaryCnAndEn() + { + int bufferType = 8; + String buffer = "一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindBinary("一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindNcharEn() + { + int bufferType = 10; + String buffer = "qwertyuiopasdghjklzxcvbnm<>?:\"{}+_)(*&^%$#@!~QWERTYUIOP[]\\ASDFGHJKL;'ZXCVBNM,./`1234567890-="; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindNchar("qwertyuiopasdghjklzxcvbnm<>?:\"{}+_)(*&^%$#@!~QWERTYUIOP[]\\ASDFGHJKL;'ZXCVBNM,./`1234567890-="); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindNcharCn() + { + int bufferType = 10; + String buffer = "一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./"; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindNchar("一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./"); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindNcharCnAndEn() + { + int bufferType = 10; + String buffer = "一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"; + int bufferLength = buffer.Length; + int length = buffer.Length; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindNchar("一二两三四五六七八九十廿毛另壹贰叁肆伍陆柒捌玖拾佰仟万亿元角分零整1234567890`~!@#$%^&*()_+[]{};':<>?,./qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"); + int BindLengPtr = Marshal.ReadInt32(bind.length); + string bindBuffer = Marshal.PtrToStringAnsi(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindNil() + { + int bufferType = 0; + int isNull = 1; + + TDengineDriver.TAOS_BIND bind = TaosBind.BindNil(); + + int bindIsNull = Marshal.ReadInt32(bind.is_null); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindIsNull, isNull); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + [Fact] + public void TestBindTimestampNegative() + { + int bufferType = 9; + long buffer = long.MinValue; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTimestamp(long.MinValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindTimestampZero() + { + int bufferType = 9; + long buffer = 0; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTimestamp(0); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + [Fact] + public void TestBindTimestampPositive() + { + int bufferType = 9; + long buffer = long.MaxValue; + int bufferLength = sizeof(long); + int length = sizeof(long); + + TDengineDriver.TAOS_BIND bind = TaosBind.BindTimestamp(long.MaxValue); + int BindLengPtr = Marshal.ReadInt32(bind.length); + long bindBuffer = Marshal.ReadInt64(bind.buffer); + Console.WriteLine("bind.buffer_type:{0},bufferType:{1}", bind.buffer_type, bufferType); + + Assert.Equal(bind.buffer_type, bufferType); + Assert.Equal(bindBuffer, buffer); + Assert.Equal(bind.buffer_length, bufferLength); + Assert.Equal(BindLengPtr, length); + + Marshal.FreeHGlobal(bind.buffer); + Marshal.FreeHGlobal(bind.length); + } + + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj b/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj new file mode 100644 index 0000000000000000000000000000000000000000..97d13e5e9e74abada2efa27d64e57adbe5459023 --- /dev/null +++ b/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj @@ -0,0 +1,30 @@ + + + + net5.0 + + false + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + diff --git a/src/connector/go b/src/connector/go index b8f76da4a708d158ec3cc4b844571dc4414e36b4..25f8683ece07897fea12c347d369602b2235665f 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 +Subproject commit 25f8683ece07897fea12c347d369602b2235665f diff --git a/src/connector/grafana-pluain-is-removed-from-TDengine.md b/src/connector/grafana-pluain-is-removed-from-TDengine.md new file mode 100644 index 0000000000000000000000000000000000000000..6e7833d5ac6bf24af353eb8fc709875e27d2297e --- /dev/null +++ b/src/connector/grafana-pluain-is-removed-from-TDengine.md @@ -0,0 +1 @@ +TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin. diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin deleted file mode 160000 index 792ef7c3036f15068796e09883d3f4d47a038fe2..0000000000000000000000000000000000000000 --- a/src/connector/grafanaplugin +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 792ef7c3036f15068796e09883d3f4d47a038fe2 diff --git a/src/connector/grafanaplugin/README.md b/src/connector/grafanaplugin/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e7833d5ac6bf24af353eb8fc709875e27d2297e --- /dev/null +++ b/src/connector/grafanaplugin/README.md @@ -0,0 +1 @@ +TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin. diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index c992cf58ba43eb0e052d9bc80824d94e98b725ca..15695ae9204c40db16c9f4d367c80a285335cbef 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -107,16 +107,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti public void setCatalog(String catalog) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - /* - try (Statement stmt = createStatement()) { - boolean execute = stmt.execute("use " + catalog); - if (execute) - this.catalog = catalog; - } catch (SQLException e) { - // do nothing - } - */ - this.catalog = catalog; } @@ -416,7 +406,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } catch (InterruptedException | ExecutionException ignored) { } catch (TimeoutException e) { future.cancel(true); - status = false; } finally { executor.shutdownNow(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java index e17548055c792e900a1e2fb5b510de8bf65de7a7..07553d7ef4e1ea0745d25523d0fd1612086c3826 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -11,6 +11,11 @@ import java.util.Map; public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { private int fetchSize; protected boolean wasNull; + protected int timestampPrecision; + + public void setTimestampPrecision(int timestampPrecision) { + this.timestampPrecision = timestampPrecision; + } protected void checkAvailability(int columnIndex, int bounds) throws SQLException { if (isClosed()) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java deleted file mode 100644 index 0b46226d1113b82d9333204427eaad074d3572cb..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.taosdata.jdbc; - -import java.sql.*; - -public class AbstractStatementWrapper extends AbstractStatement{ - protected Statement statement; - - public AbstractStatementWrapper(Statement statement) { - this.statement = statement; - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - return statement.executeQuery(sql); - } - - @Override - public int executeUpdate(String sql) throws SQLException { - return statement.executeUpdate(sql); - } - - @Override - public void close() throws SQLException { - statement.close(); - } - - @Override - public boolean execute(String sql) throws SQLException { - return statement.execute(sql); - } - - @Override - public ResultSet getResultSet() throws SQLException { - return statement.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return statement.getUpdateCount(); - } - - @Override - public Connection getConnection() throws SQLException { - return statement.getConnection(); - } - - @Override - public boolean isClosed() throws SQLException { - return statement.isClosed(); - } -} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java deleted file mode 100644 index 748891d943536b3cb6ebd6adffd295573adee4d1..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java +++ /dev/null @@ -1,58 +0,0 @@ -package com.taosdata.jdbc; - -import com.taosdata.jdbc.enums.SchemalessProtocolType; -import com.taosdata.jdbc.enums.SchemalessTimestampType; -import com.taosdata.jdbc.rs.RestfulConnection; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; - -/** - * @author huolibo@qq.com - * @version v1.0.0 - * @JDK: 1.8 - * @description: this class is an extension of {@link Statement}. use like: - * Statement statement = conn.createStatement(); - * SchemalessStatement schemalessStatement = new SchemalessStatement(statement); - * schemalessStatement.execute(sql); - * schemalessStatement.executeSchemaless(lines, SchemalessProtocolType, SchemalessTimestampType); - * @since 2021-11-03 17:10 - */ -public class SchemalessStatement extends AbstractStatementWrapper { - public SchemalessStatement(Statement statement) { - super(statement); - } - - /** - * batch insert schemaless lines - * - * @param lines schemaless data - * @param protocolType schemaless type {@link SchemalessProtocolType} - * @param timestampType Time precision {@link SchemalessTimestampType} - * @throws SQLException execute insert exception - */ - public void executeSchemaless(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { - Connection connection = this.getConnection(); - if (connection instanceof TSDBConnection) { - TSDBConnection tsdbConnection = (TSDBConnection) connection; - tsdbConnection.getConnector().insertLines(lines, protocolType, timestampType); - } else if (connection instanceof RestfulConnection) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "restful connection is not supported currently"); - } else { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown connection:" + connection.getMetaData().getURL()); - } - } - - /** - * only one insert - * - * @param line schemaless line - * @param protocolType schemaless type {@link SchemalessProtocolType} - * @param timestampType Time precision {@link SchemalessTimestampType} - * @throws SQLException execute insert exception - */ - public void executeSchemaless(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { - executeSchemaless(new String[]{line}, protocolType, timestampType); - } -} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessWriter.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..d8cb5795f05e0fad785fc0c4ffcdaea7be411be6 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessWriter.java @@ -0,0 +1,67 @@ +package com.taosdata.jdbc; + +import com.taosdata.jdbc.enums.SchemalessProtocolType; +import com.taosdata.jdbc.enums.SchemalessTimestampType; +import com.taosdata.jdbc.rs.RestfulConnection; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; + +/** + * This class is for schemaless lines(line/telnet/json) write to tdengine. + * e.g.: + * SchemalessWriter writer = new SchemalessWriter(connection); + * writer.write(lines, SchemalessProtocolType, SchemalessTimestampType); + */ +public class SchemalessWriter { + protected Connection connection; + + public SchemalessWriter(Connection connection) { + this.connection = connection; + } + + /** + * batch schemaless lines write to db + * + * @param lines schemaless lines + * @param protocolType schemaless type {@link SchemalessProtocolType} + * @param timestampType Time precision {@link SchemalessTimestampType} + * @throws SQLException execute exception + */ + public void write(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { + if (connection instanceof TSDBConnection) { + TSDBConnection tsdbConnection = (TSDBConnection) connection; + tsdbConnection.getConnector().insertLines(lines, protocolType, timestampType); + } else if (connection instanceof RestfulConnection) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "restful connection is not supported currently"); + } else { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown connection:" + connection.getMetaData().getURL()); + } + } + + /** + * only one line writes to db + * + * @param line schemaless line + * @param protocolType schemaless type {@link SchemalessProtocolType} + * @param timestampType Time precision {@link SchemalessTimestampType} + * @throws SQLException execute exception + */ + public void write(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { + write(new String[]{line}, protocolType, timestampType); + } + + /** + * batch schemaless lines write to db with list + * + * @param lines schemaless list + * @param protocolType schemaless type {@link SchemalessProtocolType} + * @param timestampType Time precision {@link SchemalessTimestampType} + * @throws SQLException execute exception + */ + public void write(List lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { + String[] strings = lines.toArray(new String[0]); + write(strings, protocolType, timestampType); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 8cd8da6de4f7d5324afbc6d5a5d54d6b8dcc7a8d..77a97d644ca3da3a51bce021ab7904883ed885f4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -1,17 +1,3 @@ -/*************************************************************************** - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - *****************************************************************************/ package com.taosdata.jdbc; import java.sql.*; @@ -66,7 +52,7 @@ public class TSDBConnection extends AbstractConnection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - long id = this.connector.subscribe(topic, sql, restart, 0); + long id = this.connector.subscribe(topic, sql, restart); if (id == 0) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 0fef64a6f82706e30677ad4e74604924c5cc2e60..00eff99f45cb6aa8cc0fbc7bce40e0d82f401e05 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -1,23 +1,8 @@ -/*************************************************************************** - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - *****************************************************************************/ package com.taosdata.jdbc; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; import java.sql.*; -import java.util.*; +import java.util.Properties; +import java.util.StringTokenizer; import java.util.logging.Logger; /** @@ -139,7 +124,7 @@ public class TSDBDriver extends AbstractDriver { } catch (SQLException sqlEx) { throw sqlEx; } catch (Exception ex) { - throw new SQLException("SQLException:" + ex.toString(), ex); + throw new SQLException("SQLException:" + ex, ex); } } @@ -152,7 +137,7 @@ public class TSDBDriver extends AbstractDriver { public boolean acceptsURL(String url) throws SQLException { if (url == null) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); - return url.length() > 0 && url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); + return url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); } public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index ee2c8141a81bb9dc2aa51ba14247dfbb834ec746..247ae929dabc9aba4d50309433a9b1866125909d 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -1,19 +1,3 @@ -/** - * ************************************************************************* - * Copyright (c) 2019 TAOS Data, Inc. - *

    - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - *

    - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - *

    - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * *************************************************************************** - */ package com.taosdata.jdbc; import com.alibaba.fastjson.JSONObject; @@ -261,8 +245,8 @@ public class TSDBJNIConnector { /** * Create a subscription */ - long subscribe(String topic, String sql, boolean restart, int period) { - return subscribeImp(this.taos, restart, topic, sql, period); + long subscribe(String topic, String sql, boolean restart) { + return subscribeImp(this.taos, restart, topic, sql, 0); } private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); @@ -285,16 +269,6 @@ public class TSDBJNIConnector { private native void unsubscribeImp(long subscription, boolean isKeep); - /** - * Validate if a create table SQL statement is correct without actually creating that table - */ - public boolean validateCreateTableSql(String sql) { - int res = validateCreateTableSqlImp(taos, sql.getBytes()); - return res == 0; - } - - private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); - public long prepareStmt(String sql) throws SQLException { long stmt = prepareStmtImp(sql.getBytes(), this.taos); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index 3814186f779203741001943efe47b85c0be83acb..003324d27a57c3557f0bb3205fcee208aa776ed5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -74,9 +74,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { public boolean next() throws SQLException { if (this.getBatchFetch()) { - if (this.blockData.forward()) { + if (this.blockData.forward()) return true; - } int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData); this.blockData.reset(); @@ -214,7 +213,18 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { if (!lastWasNull) { Object value = this.rowData.getObject(columnIndex); if (value instanceof Timestamp) { - res = ((Timestamp) value).getTime(); + Timestamp ts = (Timestamp) value; + long epochSec = ts.getTime() / 1000; + long nanoAdjustment = ts.getNanos(); + switch (this.timestampPrecision) { + case 0: + default: // ms + return ts.getTime(); + case 1: // us + return epochSec * 1000_000L + nanoAdjustment / 1000L; + case 2: // ns + return epochSec * 1000_000_000L + nanoAdjustment; + } } else { int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType(); res = this.rowData.getLong(columnIndex, nativeType); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 2c77df2981e18931d6cb56cca84bb2115716b349..ce877987e6e9073defbff62e283910ee34366c4d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -47,6 +47,8 @@ public class TSDBStatement extends AbstractStatement { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY); } TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql); + int timestampPrecision = this.connection.getConnector().getResultTimePrecision(pSql); + res.setTimestampPrecision(timestampPrecision); res.setBatchFetch(this.connection.getBatchFetch()); return res; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java index 79350076c7f4b31743ab9fb61226e506186f0f17..4558dfa84bfccacf9f0d4fa2d7991c8bd0546b30 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java @@ -1,8 +1,7 @@ package com.taosdata.jdbc.enums; -public enum TimestampPrecision { - MS, - US, - NS, - UNKNOWN +public class TimestampPrecision { + public static final int MS = 0; + public static final int US = 1; + public static final int NS = 2; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 36714893e3ca519dea07910a95d5ee1c1b6fb731..fdd034a641d7fd829059c73061305bdf38eae1bf 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -39,7 +39,7 @@ public class RestfulDriver extends AbstractDriver { String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null; - String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; + String loginUrl; try { if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); @@ -53,8 +53,8 @@ public class RestfulDriver extends AbstractDriver { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } - int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); - boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); + int poolSize = Integer.parseInt(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); + boolean keepAlive = Boolean.parseBoolean(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); HttpClientPoolUtil.init(poolSize, keepAlive); String result = HttpClientPoolUtil.execute(loginUrl); @@ -79,7 +79,7 @@ public class RestfulDriver extends AbstractDriver { public boolean acceptsURL(String url) throws SQLException { if (url == null) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); - return url.length() > 0 && url.trim().length() > 0 && url.startsWith(URL_PREFIX); + return url.trim().length() > 0 && url.startsWith(URL_PREFIX); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index f3e3f138df8fc854817c0adf57c5f5453f52bf05..d4c30115f851aa0f8b6f80994bbece609649428d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -168,11 +168,22 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case TIMESTAMP: { Long value = row.getLong(colIndex); //TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9 - if (value < 1_0000_0000_0000_0L) + if (value < 1_0000_0000_0000_0L) { + this.timestampPrecision = TimestampPrecision.MS; return new Timestamp(value); - long epochSec = value / 1000_000L; - long nanoAdjustment = value % 1000_000L * 1000L; - return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if (value >= 1_0000_0000_0000_0L && value < 1_000_000_000_000_000_0l) { + this.timestampPrecision = TimestampPrecision.US; + long epochSec = value / 1000_000L; + long nanoAdjustment = value % 1000_000L * 1000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if (value >= 1_000_000_000_000_000_0l) { + this.timestampPrecision = TimestampPrecision.NS; + long epochSec = value / 1000_000_000L; + long nanoAdjustment = value % 1000_000_000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } } case UTC: { String value = row.getString(colIndex); @@ -182,12 +193,15 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { if (value.length() > 31) { // ns timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSSSSS+0x00 nanoAdjustment = fractionalSec; + this.timestampPrecision = TimestampPrecision.NS; } else if (value.length() > 28) { // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 nanoAdjustment = fractionalSec * 1000L; + this.timestampPrecision = TimestampPrecision.US; } else { // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 nanoAdjustment = fractionalSec * 1000_000L; + this.timestampPrecision = TimestampPrecision.MS; } ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); @@ -196,7 +210,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case STRING: default: { String value = row.getString(colIndex); - TimestampPrecision precision = Utils.guessTimestampPrecision(value); + int precision = Utils.guessTimestampPrecision(value); + this.timestampPrecision = precision; + if (precision == TimestampPrecision.MS) { // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS return row.getTimestamp(colIndex); @@ -338,8 +354,18 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { wasNull = value == null; if (value == null) return 0; - if (value instanceof Timestamp) - return ((Timestamp) value).getTime(); + if (value instanceof Timestamp) { + Timestamp ts = (Timestamp) value; + switch (this.timestampPrecision) { + case TimestampPrecision.MS: + default: + return ts.getTime(); + case TimestampPrecision.US: + return ts.getTime() * 1000 + ts.getNanos() / 1000 % 1000; + case TimestampPrecision.NS: + return ts.getTime() * 1000_000 + ts.getNanos() % 1000_000; + } + } long valueAsLong = 0; try { valueAsLong = Long.parseLong(value.toString()); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index fb8b82271b02b70b348b43a7c88a0084adaa5ab5..cdcd2eec482cc39e940bf20f6ae636568257faf2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -65,7 +65,11 @@ public class RestfulStatement extends AbstractStatement { boolean result = true; if (SqlSyntaxValidator.isUseSql(sql)) { - HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); + String ret = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); + JSONObject resultJson = JSON.parseObject(ret); + if (resultJson.getString("status").equals("error")) { + throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc")); + } this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; @@ -114,7 +118,7 @@ public class RestfulStatement extends AbstractStatement { String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { - throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc")); } this.resultSet = new RestfulResultSet(database, this, resultJson); this.affectedRows = -1; @@ -125,7 +129,7 @@ public class RestfulStatement extends AbstractStatement { String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject jsonObject = JSON.parseObject(result); if (jsonObject.getString("status").equals("error")) { - throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); + throw TSDBError.createSQLException(jsonObject.getInteger("code"), "sql: " + sql + ", desc: " + jsonObject.getString("desc")); } this.resultSet = null; this.affectedRows = getAffectedRows(jsonObject); @@ -133,16 +137,14 @@ public class RestfulStatement extends AbstractStatement { } private int getAffectedRows(JSONObject jsonObject) throws SQLException { - // create ... SQLs should return 0 , and Restful result like this: - // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} JSONArray head = jsonObject.getJSONArray("head"); if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + head.toJSONString() + "]"); JSONArray data = jsonObject.getJSONArray("data"); if (data != null) { return data.getJSONArray(0).getInteger(0); } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + jsonObject.toJSONString() + "]"); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java index 6cd1ff7200962b7347969e0b8b10443083505912..6ec76fffd93751b0cb57e116085de9da550f214e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -194,14 +194,14 @@ public class Utils { return timestamp.toLocalDateTime().format(milliSecFormatter); } - public static TimestampPrecision guessTimestampPrecision(String value) { + public static int guessTimestampPrecision(String value) { if (isMilliSecFormat(value)) return TimestampPrecision.MS; if (isMicroSecFormat(value)) return TimestampPrecision.US; if (isNanoSecFormat(value)) return TimestampPrecision.NS; - return TimestampPrecision.UNKNOWN; + return TimestampPrecision.MS; } private static boolean isMilliSecFormat(String timestampStr) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/JsonTagTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/JsonTagTest.java new file mode 100644 index 0000000000000000000000000000000000000000..501c7e17c837ce311ec0f7b43f63122e53b8a0d9 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/JsonTagTest.java @@ -0,0 +1,1220 @@ +package com.taosdata.jdbc; + +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.MethodSorters; + +import java.sql.*; + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@RunWith(CatalogRunner.class) +@TestTarget(alias = "JsonTag", author = "huolibo", version = "2.0.36") +public class JsonTagTest { + private static final String dbName = "json_tag_test"; + private static Connection connection; + private static Statement statement; + private static final String superSql = "create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"; + private static final String[] sql = { + "insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(now, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')", + "insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')", + "insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')", + "insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')", + "insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')", + "insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')", + "insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')", + // test duplicate key using the first one. + "CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90}')", + + }; + + private static final String[] invalidJsonInsertSql = { + // test empty json string, save as tag is NULL + "insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')", + }; + + private static final String[] invalidJsonCreateSql = { + "CREATE TABLE if not exists jsons1_10 using jsons1 tags('')", + "CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')", + "CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')", + "CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')", + }; + + // test invalidate json + private static final String[] errorJsonInsertSql = { + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')", + }; + + private static final String[] errorSelectSql = { + "select * from jsons1 where jtag->tag1='beijing'", + "select * from jsons1 where jtag->'location'", + "select * from jsons1 where jtag->''", + "select * from jsons1 where jtag->''=9", + "select -> from jsons1", + "select ? from jsons1", + "select * from jsons1 where contains", + "select * from jsons1 where jtag->", + "select jtag->location from jsons1", + "select jtag contains location from jsons1", + "select * from jsons1 where jtag contains location", + "select * from jsons1 where jtag contains ''", + "select * from jsons1 where jtag contains 'location'='beijing'", + // test where with json tag + "select * from jsons1_1 where jtag is not null", + "select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'", + "select * from jsons1 where jtag->'tag1'={}" + }; + + @Test + @Description("insert json tag") + public void case01_InsertTest() throws SQLException { + for (String sql : sql) { + statement.execute(sql); + } + for (String sql : invalidJsonInsertSql) { + statement.execute(sql); + } + for (String sql : invalidJsonCreateSql) { + statement.execute(sql); + } + } + + @Test + @Description("error json tag insert") + public void case02_ErrorJsonInsertTest() { + int count = 0; + for (String sql : errorJsonInsertSql) { + try { + statement.execute(sql); + } catch (SQLException e) { + count++; + } + } + Assert.assertEquals(errorJsonInsertSql.length, count); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json value is array") + public void case02_ArrayErrorTest() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json value is empty") + public void case02_EmptyValueErrorTest() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is not ASCII") + public void case02_AbnormalKeyErrorTest1() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is '\\t'") + public void case02_AbnormalKeyErrorTest2() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is chinese") + public void case02_AbnormalKeyErrorTest3() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')"); + } + + @Test + @Description("alter json tag") + public void case03_AlterTag() throws SQLException { + statement.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when add json tag") + public void case03_AddTagErrorTest() throws SQLException { + statement.execute("ALTER STABLE jsons1 add tag tag2 nchar(20)"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when delete json tag") + public void case03_dropTagErrorTest() throws SQLException { + statement.execute("ALTER STABLE jsons1 drop tag jtag"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when set some json tag value") + public void case03_AlterTagErrorTest() throws SQLException { + statement.execute("ALTER TABLE jsons1_1 SET TAG jtag=4"); + } + + @Test + @Description("exception will throw when select syntax error") + public void case04_SelectErrorTest() { + int count = 0; + for (String sql : errorSelectSql) { + try { + statement.execute(sql); + } catch (SQLException e) { + count++; + } + } + Assert.assertEquals(errorSelectSql.length, count); + } + + @Test + @Description("normal select stable") + public void case04_select01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select dataint from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("select all column from stable") + public void case04_select02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("select json tag from stable") + public void case04_select03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length + invalidJsonCreateSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition tag is null") + public void case04_select04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1 where jtag is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(invalidJsonInsertSql.length + invalidJsonCreateSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition tag is not null") + public void case04_select05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1 where jtag is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length, count); + close(resultSet); + } + + @Test + @Description("select json tag") + public void case04_select06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_8"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("{\"tag1\":null,\"1tag$\":2,\" \":90}", result); + close(resultSet); + } + + @Test + @Description("select json tag") + public void case04_select07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_1"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}", result); + close(resultSet); + } + + @Test + @Description("select not exist json tag") + public void case04_select08() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_9"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertNull(result); + close(resultSet); + } + + @Test + @Description("select a json tag") + public void case04_select09() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_1"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("\"femail\"", result); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is empty") + public void case04_select10() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag2' from jsons1_6"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("\"\"", result); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is int") + public void case04_select11() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag2' from jsons1_1"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("35", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is boolean") + public void case04_select12() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag3' from jsons1_1"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("true", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is null") + public void case04_select13() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_4"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("null", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is double") + public void case04_select14() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_5"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("1.232000000", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the key is not exist") + public void case04_select15() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag10' from jsons1_4"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertNull(string); + close(resultSet); + } + + @Test + @Description("select a json tag, the result number equals tables number") + public void case04_select16() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonCreateSql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition '=' for string") + public void case04_select19() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("select and where conditon '=' for string") + public void case04_select20() throws SQLException { + ResultSet resultSet = statement.executeQuery("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition result is null") + public void case04_select21() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition equation has chinese") + public void case04_select23() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='收到货'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>' for character") + public void case05_symbolOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'>'beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for character") + public void case05_symbolOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'>='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '<' for character") + public void case05_symbolOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'<'beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' in character") + public void case05_symbolOperation04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'<='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' in character") + public void case05_symbolOperation05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'!='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' empty") + public void case05_symbolOperation06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'=''"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + // where json value is int + @Test + @Description("where condition support '=' for int") + public void case06_selectValue01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where conditional support '<' for int") + public void case06_selectValue02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<54"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' for int") + public void case06_selectValue03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<=11"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where conditional support '>' for int") + public void case06_selectValue04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>4"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for int") + public void case06_selectValue05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int") + public void case06_selectValue06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int") + public void case06_selectValue07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=55"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int and result is nothing") + public void case06_selectValue08() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=10"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' for double") + public void case07_selectValue01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '<' for double") + public void case07_doubleOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' for double") + public void case07_doubleOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>' for double") + public void case07_doubleOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>1.23"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for double") + public void case07_doubleOperation04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for double") + public void case07_doubleOperation05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for double") + public void case07_doubleOperation06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=3.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when denominator is zero") + public void case07_doubleOperation07() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'/0=3"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when invalid operation") + public void case07_doubleOperation08() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'/5=1"); + } + + @Test + @Description("where condition support '=' for boolean") + public void case08_boolOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=true"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' for boolean") + public void case08_boolOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for boolean") + public void case08_boolOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=false"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when '>' operation for boolean") + public void case08_boolOperation04() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'>false"); + } + + @Test + @Description("where conditional support '=null'") + public void case09_select01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where conditional support 'is null'") + public void case09_select02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support 'is not null'") + public void case09_select03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag '='") + public void case09_select04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag_no_exist'=3"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is null'") + public void case09_select05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is null'") + public void case09_select06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag4' is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is not null'") + public void case09_select07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag3' is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("contains") + public void case09_select10() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag1'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("contains") + public void case09_select11() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("contains with no exist tag") + public void case09_select12() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag_no_exist'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with and") + public void case10_selectAndOr01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or'") + public void case10_selectAndOr02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition with 'and'") + public void case10_selectAndOr03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or'") + public void case10_selectAndOr04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or' and contains") + public void case10_selectAndOr05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("where condition with 'and' and contains") + public void case10_selectAndOr06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition like") + public void case12_selectWhere01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition like") + public void case12_selectWhere02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("where condition in no support in") + public void case12_selectWhere03() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1' in ('beijing')"); + } + + @Test + @Description("where condition match") + public void case12_selectWhere04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match 'ma'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match 'ma$'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2' match 'jing$'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match '收到'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("insert distinct") + public void case13_selectDistinct01() throws SQLException { + statement.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')"); + } + + @Test + @Description("distinct json tag") + public void case13_selectDistinct02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select distinct jtag->'tag1' from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("distinct json tag") + public void case13_selectDistinct03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select distinct jtag from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(9, count); + close(resultSet); + } + + @Test + @Description("insert json tag") + public void case14_selectDump01() throws SQLException { + statement.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")"); + } + + @Test + @Description("test duplicate key with normal column") + public void case14_selectDump02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("test duplicate key with normal column") + public void case14_selectDump03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("insert json tag for join test") + public void case15_selectJoin01() throws SQLException { + statement.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"); + statement.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')"); + statement.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')"); + + statement.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"); + statement.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')"); + statement.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')"); + } + + @Test + @Description("select json tag from join") + public void case15_selectJoin02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'"); + resultSet.next(); + Assert.assertEquals("sss", resultSet.getString(1)); + close(resultSet); + } + + @Test + @Description("group by and order by json tag desc") + public void case16_selectGroupOrder01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("group by and order by json tag asc") + public void case16_selectGroupOrder02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("stddev with group by json tag") + public void case17_selectStddev01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select stddev(dataint) from jsons1 group by jtag->'tag1'"); + String s = ""; + int count = 0; + while (resultSet.next()) { + count++; + s = resultSet.getString(2); + + } + Assert.assertEquals(8, count); + Assert.assertEquals("\"femail\"", s); + close(resultSet); + } + + @Test + @Description("subquery json tag") + public void case18_selectSubquery01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from (select jtag, dataint from jsons1)"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + @Test + @Description("subquery some json tags") + public void case18_selectSubquery02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)"); + + ResultSetMetaData metaData = resultSet.getMetaData(); + String columnName = metaData.getColumnName(1); + Assert.assertEquals("jtag->'tag1'", columnName); + + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + @Test + @Description("query some json tags from subquery") + public void case18_selectSubquery04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + private void close(ResultSet resultSet) { + try { + if (null != resultSet) { + resultSet.close(); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + String host = "127.0.0.1"; + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try { + connection = DriverManager.getConnection(url); + statement = connection.createStatement(); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("use " + dbName); + statement.execute(superSql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (null != statement) { + statement.execute("drop database " + dbName); + statement.close(); + } + if (null != connection) { + connection.close(); + } + } catch (SQLException e) { + e.printStackTrace(); + } + + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java index fd4ac12ce40dc02f2b6ffbf91e33b0e0bd2398a9..712cc0c8c1b0b094a20db0fe36d33f553878d71d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java @@ -2,15 +2,23 @@ package com.taosdata.jdbc; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; import com.taosdata.jdbc.enums.SchemalessProtocolType; import com.taosdata.jdbc.enums.SchemalessTimestampType; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; import java.sql.*; +import java.util.ArrayList; +import java.util.List; +@RunWith(CatalogRunner.class) +@TestTarget(alias = "Schemaless",author = "huolibo", version = "2.0.36") public class SchemalessInsertTest { private final String dbname = "test_schemaless_insert"; private Connection conn; @@ -21,16 +29,15 @@ public class SchemalessInsertTest { * @throws SQLException execute error */ @Test + @Description("line insert") public void schemalessInsert() throws SQLException { // given String[] lines = new String[]{ "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000"}; // when - try (Statement statement = conn.createStatement(); - SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { - schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); - } + SchemalessWriter writer = new SchemalessWriter(conn); + writer.write(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); // then Statement statement = conn.createStatement(); @@ -53,6 +60,7 @@ public class SchemalessInsertTest { * @throws SQLException execute error */ @Test + @Description("telnet insert") public void telnetInsert() throws SQLException { // given String[] lines = new String[]{ @@ -62,10 +70,9 @@ public class SchemalessInsertTest { }; // when - try (Statement statement = conn.createStatement(); - SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { - schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED); - } + + SchemalessWriter writer = new SchemalessWriter(conn); + writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED); // then Statement statement = conn.createStatement(); @@ -88,6 +95,7 @@ public class SchemalessInsertTest { * @throws SQLException execute error */ @Test + @Description("json insert") public void jsonInsert() throws SQLException { // given String json = "[\n" + @@ -114,10 +122,8 @@ public class SchemalessInsertTest { "]"; // when - try (Statement statement = conn.createStatement(); - SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { - schemalessStatement.executeSchemaless(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); - } + SchemalessWriter writer = new SchemalessWriter(conn); + writer.write(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); // then Statement statement = conn.createStatement(); @@ -135,6 +141,33 @@ public class SchemalessInsertTest { statement.close(); } + @Test + public void telnetListInsert() throws SQLException { + // given + List list = new ArrayList<>(); + list.add("stb0_0 1626006833 4 host=host0 interface=eth0"); + list.add("stb0_1 1626006833 4 host=host0 interface=eth0"); + list.add("stb0_2 1626006833 4 host=host0 interface=eth0 id=\"special_name\""); + // when + + SchemalessWriter writer = new SchemalessWriter(conn); + writer.write(list, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED); + + // then + Statement statement = conn.createStatement(); + ResultSet rs = statement.executeQuery("show tables"); + Assert.assertNotNull(rs); + ResultSetMetaData metaData = rs.getMetaData(); + Assert.assertTrue(metaData.getColumnCount() > 0); + int rowCnt = 0; + while (rs.next()) { + rowCnt++; + } + Assert.assertEquals(list.size(), rowCnt); + rs.close(); + statement.close(); + } + @Before public void before() { String host = "127.0.0.1"; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogClass.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogClass.java new file mode 100644 index 0000000000000000000000000000000000000000..490346e401dba956c8743abb452bcc943df67904 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogClass.java @@ -0,0 +1,86 @@ +package com.taosdata.jdbc.annotation; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test class + */ +public class CatalogClass { + + private String name; + private String alias; + private String author; + private String version; + private List methods = new ArrayList<>(); + private int total; + private int failure; + + public void setTotal(int total) { + this.total = total; + } + + public void setFailure(int failure) { + this.failure = failure; + } + + public void setAuthor(String author) { + this.author = author; + } + + public void setVersion(String version) { + this.version = version; + } + + public void setName(String name) { + this.name = name; + } + + public void setAlias(String alias) { + this.alias = alias; + } + + public void setMethods(List methods) { + this.methods = methods; + } + + @Override + public String toString() { + if (methods.size() < 1) + return null; + StringBuilder sb = new StringBuilder(); + sb.append("ClassName: ").append(name); + String msg = trim(alias); + if (null != msg) + sb.append("\tAlias:").append(alias); + sb.append("\tTotal:").append(total) + .append("\tFailure:").append(failure).append("\n"); + for (CatalogMethod method : methods) { + sb.append("\t").append(method.getName()); + sb.append("\t").append(method.isSuccess()); + sb.append("\t").append(method.getMessage()); + String mAuthor = trim(method.getAuthor()); + if (null == mAuthor) { + sb.append("\t").append(author); + } else { + sb.append("\t").append(method.getAuthor()); + } + String mVersion = trim(method.getVersion()); + if (null == mVersion) { + sb.append("\t").append(version); + } else { + sb.append("\t").append(mVersion); + } + sb.append("\n"); + } + return sb.toString(); + } + + private String trim(String s) { + if (null == s || s.trim().equals("")) { + return null; + } else { + return s.trim(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogListener.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogListener.java new file mode 100644 index 0000000000000000000000000000000000000000..2d22302d02f531eca055fa76dea18d8de9f7371f --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogListener.java @@ -0,0 +1,104 @@ +package com.taosdata.jdbc.annotation; + +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; + +import java.io.File; +import java.io.FileWriter; +import java.util.LinkedList; + +public class CatalogListener extends RunListener { + public static final String CATALOG_FILE = "target/TestCaseCatalog.txt"; + CatalogClass catalogClass = null; + private final LinkedList methods = new LinkedList<>(); + + @Override + public void testRunStarted(Description description) throws Exception { + catalogClass = new CatalogClass(); + TestTarget target = description.getAnnotation(TestTarget.class); + if (target != null) { + catalogClass.setAlias(target.alias()); + catalogClass.setAuthor(target.author()); + catalogClass.setVersion(target.version()); + } + catalogClass.setName(getClassName(description.getClassName())); + } + + private String getClassName(String name) { + if (null == name || name.trim().equals("")) { + return null; + } + name = name.trim(); + int pos = name.lastIndexOf("."); + if (-1 == pos) { + return name; + } + return name.substring(pos + 1); + } + + @Override + public void testRunFinished(Result result) throws Exception { + catalogClass.setMethods(methods); + catalogClass.setTotal(result.getRunCount()); + catalogClass.setFailure(result.getFailureCount()); + File file = new File(CATALOG_FILE); + if (!file.exists()) { + synchronized (CatalogListener.class) { + if (!file.exists()) { + file.createNewFile(); + try (FileWriter writer = new FileWriter(file, true)) { + writer.write("\tName\tPass\tMessage\tAuthor\tVersion\n"); + writer.write(catalogClass.toString()); + } + } + } + } else { + try (FileWriter writer = new FileWriter(file, true)) { + writer.write(catalogClass.toString()); + } + } + } + + @Override + public void testStarted(Description description) throws Exception { + } + + @Override + public void testFinished(Description description) throws Exception { + com.taosdata.jdbc.annotation.Description annotation + = description.getAnnotation(com.taosdata.jdbc.annotation.Description.class); + if (annotation != null) { + CatalogMethod method = new CatalogMethod(); + method.setMessage(annotation.value()); + method.setAuthor(annotation.author()); + method.setVersion(annotation.version()); + method.setSuccess(true); + method.setName(description.getMethodName()); + methods.addLast(method); + } + } + + @Override + public void testFailure(Failure failure) throws Exception { + com.taosdata.jdbc.annotation.Description annotation + = failure.getDescription().getAnnotation(com.taosdata.jdbc.annotation.Description.class); + CatalogMethod method = new CatalogMethod(); + method.setMessage(annotation.value()); + method.setAuthor(annotation.author()); + method.setVersion(annotation.version()); + method.setSuccess(false); + method.setName(failure.getDescription().getMethodName()); + methods.addFirst(method); + } + + @Override + public void testAssumptionFailure(Failure failure) { + } + + @Override + public void testIgnored(Description description) throws Exception { + super.testIgnored(description); + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogMethod.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogMethod.java new file mode 100644 index 0000000000000000000000000000000000000000..1dd074df2d1298781bbbfa7e7709113db2c6ca01 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogMethod.java @@ -0,0 +1,52 @@ +package com.taosdata.jdbc.annotation; + +/** + * Test method + */ +public class CatalogMethod { + private String name; + private boolean success; + private String message; + private String author; + private String version; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isSuccess() { + return success; + } + + public void setSuccess(boolean success) { + this.success = success; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public String getAuthor() { + return author; + } + + public void setAuthor(String author) { + this.author = author; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogRunner.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogRunner.java new file mode 100644 index 0000000000000000000000000000000000000000..08e5f9212287d517838448d0122ab0876812cc1d --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/CatalogRunner.java @@ -0,0 +1,36 @@ +package com.taosdata.jdbc.annotation; + +import org.junit.internal.AssumptionViolatedException; +import org.junit.internal.runners.model.EachTestNotifier; +import org.junit.runner.notification.RunNotifier; +import org.junit.runner.notification.StoppedByUserException; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.model.InitializationError; +import org.junit.runners.model.Statement; + +public class CatalogRunner extends BlockJUnit4ClassRunner { + + public CatalogRunner(Class testClass) throws InitializationError { + super(testClass); + } + + @Override + public void run(RunNotifier notifier) { + //add user-defined listener + notifier.addListener(new CatalogListener()); + EachTestNotifier testNotifier = new EachTestNotifier(notifier, getDescription()); + + notifier.fireTestRunStarted(getDescription()); + + try { + Statement statement = classBlock(notifier); + statement.evaluate(); + } catch (AssumptionViolatedException av) { + testNotifier.addFailedAssumption(av); + } catch (StoppedByUserException exception) { + throw exception; + } catch (Throwable e) { + testNotifier.addFailure(e); + } + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/Description.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/Description.java new file mode 100644 index 0000000000000000000000000000000000000000..669b0a088656c030281e82620117469b3a375c75 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/Description.java @@ -0,0 +1,19 @@ +package com.taosdata.jdbc.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +public @interface Description { + + String value(); + + // git blame author + String author() default ""; + + // since which version; + String version() default ""; +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/TestTarget.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/TestTarget.java new file mode 100644 index 0000000000000000000000000000000000000000..3d1db681647d3b23818143156ffd513c46a6e495 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/annotation/TestTarget.java @@ -0,0 +1,18 @@ +package com.taosdata.jdbc.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE}) +public @interface TestTarget { + + String alias() default ""; + + String author(); + + String version() default ""; + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java index 5b38f9b0640bb6eec6d1c9749db0abf0388c04ce..d2f5b915ee1b39146ccc91131fae801c291d08cc 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java @@ -2,7 +2,6 @@ package com.taosdata.jdbc.cases; import com.taosdata.jdbc.TSDBErrorNumbers; import org.junit.Assert; -import org.junit.Before; import org.junit.Ignore; import org.junit.Test; @@ -59,38 +58,31 @@ public class AuthenticationTest { @Test public void test() throws SQLException { // change password - String url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=taosdata"; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement();) { - stmt.execute("alter user " + user + " pass '" + password + "'"); - } + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("alter user " + user + " pass '" + password + "'"); + stmt.close(); + conn.close(); // use new to login and execute query - url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement()) { - stmt.execute("show databases"); - ResultSet rs = stmt.getResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - while (rs.next()) { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password); + stmt = conn.createStatement(); + stmt.execute("show databases"); + ResultSet rs = stmt.getResultSet(); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ":" + rs.getString(i) + "\t"); } + System.out.println(); } // change password back - url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement()) { - stmt.execute("alter user " + user + " pass 'taosdata'"); - } - } - - @Before - public void before() { - try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - } + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password); + stmt = conn.createStatement(); + stmt.execute("alter user " + user + " pass 'taosdata'"); + stmt.close(); + conn.close(); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java index d60ee14fbc87ba5d2bd2e851b5195b513fc4e028..5f821c5cc34dde0050d8e62afb6fc8ab17534a17 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java @@ -8,8 +8,13 @@ import java.sql.SQLException; public class ConnectWrongDatabaseTest { @Test(expected = SQLException.class) - public void connect() throws SQLException { + public void connectByJni() throws SQLException { DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata"); } + @Test(expected = SQLException.class) + public void connectByRestful() throws SQLException { + DriverManager.getConnection("jdbc:TAOS-RS://localhost:6041/wrong_db?user=root&password=taosdata"); + } + } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/GetLongWithDifferentTimestampPrecision.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/GetLongWithDifferentTimestampPrecision.java new file mode 100644 index 0000000000000000000000000000000000000000..1ba7bdc4057e5c9e2977d3723fe329e761e7258c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/GetLongWithDifferentTimestampPrecision.java @@ -0,0 +1,59 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.*; +import java.text.SimpleDateFormat; + +public class GetLongWithDifferentTimestampPrecision { + + private final String host = "127.0.0.1"; + + @Test + public void testRestful() throws SQLException { + // given + String url = "jdbc:TAOS-RS://" + host + ":6041/"; + Connection conn = DriverManager.getConnection(url, "root", "taosdata"); + long ts = System.currentTimeMillis(); + + // when and then + assertResultSet(conn, "ms", ts, ts); + assertResultSet(conn, "us", ts, ts * 1000); + assertResultSet(conn, "ns", ts, ts * 1000_000); + } + + @Test + public void testJni() throws SQLException { + // given + String url = "jdbc:TAOS://" + host + ":6030/"; + Connection conn = DriverManager.getConnection(url, "root", "taosdata"); + long ts = System.currentTimeMillis(); + + // when and then + assertResultSet(conn, "ms", ts, ts); + assertResultSet(conn, "us", ts, ts * 1000); + assertResultSet(conn, "ns", ts, ts * 1000_000); + } + + private void assertResultSet(Connection conn, String precision, long timestamp, long expect) throws SQLException { + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test"); + stmt.execute("create database if not exists test precision '" + precision + "'"); + stmt.execute("create table test.weather(ts timestamp, f1 int)"); + + String dateTimeStr = sdf.format(new Date(timestamp)); + stmt.execute("insert into test.weather values('" + dateTimeStr + "', 1)"); + + ResultSet rs = stmt.executeQuery("select * from test.weather"); + rs.next(); + long actual = rs.getLong("ts"); + Assert.assertEquals(expect, actual); + stmt.execute("drop database if exists test"); + } + } + + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java index beea990456ec98c2ab51fc2086034e0b31b570b6..05c7b0feca21f3f5b9062f9cbc26921aa607732a 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java @@ -18,9 +18,8 @@ public class InsertDbwithoutUseDbTest { private static final Random random = new Random(System.currentTimeMillis()); @Test - public void case001() throws ClassNotFoundException, SQLException { + public void case001() throws SQLException { // prepare schema - Class.forName("com.taosdata.jdbc.TSDBDriver"); String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); try (Statement stmt = conn.createStatement()) { @@ -51,9 +50,8 @@ public class InsertDbwithoutUseDbTest { } @Test - public void case002() throws ClassNotFoundException, SQLException { + public void case002() throws SQLException { // prepare the schema - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); final String url = "jdbc:TAOS-RS://" + host + ":6041/inWithoutDb?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); try (Statement stmt = conn.createStatement()) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java index 2ae03b4e5cd92056ce0ea995c8edcd21e51e24bb..cfd6a066acc2c2abd94e525fb69d4027a317134c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java @@ -25,7 +25,7 @@ public class TimestampPrecisonInNanoRestTest { private static final String date4 = format.format(new Date(timestamp1 + 10L)); private static final String date2 = date1 + "123455"; private static final String date3 = date4 + "123456"; - + private static Connection conn; @@ -43,7 +43,7 @@ public class TimestampPrecisonInNanoRestTest { stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); - stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.close(); } @@ -54,7 +54,7 @@ public class TimestampPrecisonInNanoRestTest { stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); - stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.close(); } @@ -105,7 +105,7 @@ public class TimestampPrecisonInNanoRestTest { @Test public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { - stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); checkCount(1l, rs); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); @@ -139,7 +139,7 @@ public class TimestampPrecisonInNanoRestTest { public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { long timestamp4 = timestamp1 * 1000_000 + 123123; - stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); checkCount(1l, rs); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); @@ -215,7 +215,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLargerThanInNumberTypeForFirstCol() { @@ -279,7 +279,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLessThanInDateTypeForFirstCol() { @@ -347,7 +347,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { @@ -466,7 +466,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ + public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); @@ -477,7 +477,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ + public void canIntervalAndSlidingAcceptNsUnitForFirstCol() { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); rs.next(); @@ -492,7 +492,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ + public void canIntervalAndSlidingAcceptNsUnitForSecondCol() { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); rs.next(); @@ -506,21 +506,17 @@ public class TimestampPrecisonInNanoRestTest { } } - @Test - public void testDataOutOfRangeExceptionForFirstCol() { + @Test(expected = SQLException.class) + public void testDataOutOfRangeExceptionForFirstCol() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); - } catch (SQLException e) { - Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); } } - @Test - public void testDataOutOfRangeExceptionForSecondCol() { + @Test(expected = SQLException.class) + public void testDataOutOfRangeExceptionForSecondCol() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); - } catch (SQLException e) { - Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java index b08f8ff227dc16e1b413391e58a9de8fd0182c42..e7ce1d76f123a043d49eb64931c0d537d09664df 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java @@ -373,11 +373,12 @@ public class RestfulConnectionTest { properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/log?user=root&password=taosdata", properties); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties); // create test database for test cases try (Statement stmt = conn.createStatement()) { stmt.execute("create database if not exists test"); } + } @AfterClass diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index 858f7b32f0d8a72be5b6cfa68aa120b08909df6c..da30bbd568c7043af493baeecc118f256ad73b10 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -10,27 +10,28 @@ import java.util.Random; public class RestfulJDBCTest { private static final String host = "127.0.0.1"; - private final Random random = new Random(System.currentTimeMillis()); - private Connection connection; + private static final Random random = new Random(System.currentTimeMillis()); + private static Connection connection; + private static final String dbname = "restful_test"; @Test public void testCase001() throws SQLException { // given - String sql = "drop database if exists restful_test"; + String sql = "drop database if exists " + dbname; // when boolean execute = execute(connection, sql); // then Assert.assertFalse(execute); // given - sql = "create database if not exists restful_test"; + sql = "create database if not exists " + dbname; // when execute = execute(connection, sql); // then Assert.assertFalse(execute); // given - sql = "use restful_test"; + sql = "use " + dbname; // when execute = execute(connection, sql); // then @@ -40,7 +41,7 @@ public class RestfulJDBCTest { @Test public void testCase002() throws SQLException { // given - String sql = "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + String sql = "create table " + dbname + ".weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; // when boolean execute = execute(connection, sql); // then @@ -51,7 +52,7 @@ public class RestfulJDBCTest { public void testCase004() throws SQLException { for (int i = 1; i <= 100; i++) { // given - String sql = "create table t" + i + " using weather tags('beijing', '" + i + "')"; + String sql = "create table " + dbname + ".t" + i + " using " + dbname + ".weather tags('beijing', '" + i + "')"; // when boolean execute = execute(connection, sql); // then @@ -67,7 +68,7 @@ public class RestfulJDBCTest { // given long currentTimeMillis = System.currentTimeMillis(); - String sql = "insert into t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")"; + String sql = "insert into " + dbname + ".t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")"; // when int affectRows = executeUpdate(connection, sql); // then @@ -82,7 +83,7 @@ public class RestfulJDBCTest { @Test public void testCase006() throws SQLException { // given - String sql = "select * from weather"; + String sql = "select * from " + dbname + ".weather"; // when ResultSet rs = executeQuery(connection, sql); ResultSetMetaData meta = rs.getMetaData(); @@ -101,7 +102,7 @@ public class RestfulJDBCTest { @Test public void testCase007() throws SQLException { // given - String sql = "drop database restful_test"; + String sql = "drop database " + dbname; // when boolean execute = execute(connection, sql); @@ -129,15 +130,23 @@ public class RestfulJDBCTest { } } - @Before - public void before() throws SQLException { - connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata&httpKeepAlive=false"); + @BeforeClass + public static void beforeClass() { + try { + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + } catch (SQLException e) { + e.printStackTrace(); + } } - @After - public void after() throws SQLException { - if (connection != null) + @AfterClass + public static void afterClass() throws SQLException { + if (connection != null) { + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.close(); connection.close(); + } } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java index c7fc81297264f3cf38795d9d5a3b7eccc51574c9..f3011af799c987ed399920875ae512fd8533ec77 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java @@ -186,22 +186,17 @@ public class RestfulResultSetMetaDataTest { } @BeforeClass - public static void beforeClass() { - try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - stmt = conn.createStatement(); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("drop table if exists weather"); - stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); - stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); - rs = stmt.executeQuery("select * from restful_test.weather"); - rs.next(); - meta = rs.getMetaData(); - } catch (ClassNotFoundException | SQLException e) { - e.printStackTrace(); - } + public static void beforeClass() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + stmt = conn.createStatement(); + stmt.execute("create database if not exists restful_test"); + stmt.execute("use restful_test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); + stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); + rs = stmt.executeQuery("select * from restful_test.weather"); + rs.next(); + meta = rs.getMetaData(); } @AfterClass diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index 86b0f1be9e7ee99f50201dc98f197c07f5bb9aef..c1ca31ae388f577a33cc6f3a6bc943ce52112507 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -22,6 +22,20 @@ public class RestfulResultSetTest { private static Statement stmt; private static ResultSet rs; + @BeforeClass + public static void beforeClass() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + stmt = conn.createStatement(); + stmt.execute("drop database if exists restful_test"); + stmt.execute("create database if not exists restful_test"); + stmt.execute("use restful_test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); + stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); + rs = stmt.executeQuery("select * from restful_test.weather"); + rs.next(); + } + @Test public void wasNull() throws SQLException { Assert.assertFalse(rs.wasNull()); @@ -657,36 +671,16 @@ public class RestfulResultSetTest { Assert.assertTrue(rs.isWrapperFor(RestfulResultSet.class)); } - @BeforeClass - public static void beforeClass() { - try { - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - stmt = conn.createStatement(); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("drop table if exists weather"); - stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); - stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); - rs = stmt.executeQuery("select * from restful_test.weather"); - rs.next(); - } catch (SQLException e) { - e.printStackTrace(); - } - - } - @AfterClass - public static void afterClass() { - try { - if (rs != null) - rs.close(); - if (stmt != null) - stmt.close(); - if (conn != null) - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); + public static void afterClass() throws SQLException { + if (rs != null) + rs.close(); + if (stmt != null) { + stmt.execute("drop database if exists restful_test"); + stmt.close(); } + if (conn != null) + conn.close(); } } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java index a28bdbe2e5f6e0d545241a80071d85b0964a4102..4893e6062f8719152539d80a6da21730d47dfa92 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -572,11 +572,14 @@ public class SQLTest { @BeforeClass public static void before() throws SQLException { - connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); } @AfterClass public static void after() throws SQLException { + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists restful_test"); + stmt.close(); connection.close(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java index a78284b7a2ecf1b43b96180fa9d819e89ecdc595..f0cd200e04bc66bb0571534c99a348c3a823fcb3 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java @@ -1,6 +1,9 @@ package com.taosdata.jdbc.rs; -import org.junit.*; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; import java.sql.*; @@ -9,9 +12,8 @@ public class WasNullTest { private static final String host = "127.0.0.1"; private Connection conn; - @Test - public void testGetTimestamp() { + public void testGetTimestamp() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.execute("drop table if exists weather"); stmt.execute("create table if not exists weather(f1 timestamp, f2 timestamp, f3 int)"); @@ -34,14 +36,11 @@ public class WasNullTest { } } } - - } catch (SQLException e) { - e.printStackTrace(); } } @Test - public void testGetObject() { + public void testGetObject() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.execute("drop table if exists weather"); stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); @@ -63,32 +62,25 @@ public class WasNullTest { } } - } catch (SQLException e) { - e.printStackTrace(); } } @Before - public void before() { - try { - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - Statement stmt = conn.createStatement(); + public void before() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { stmt.execute("drop database if exists restful_test"); stmt.execute("create database if not exists restful_test"); - } catch (SQLException e) { - e.printStackTrace(); + stmt.execute("use restful_test"); } } @After - public void after() { - try { + public void after() throws SQLException { + if (conn != null) { Statement statement = conn.createStatement(); statement.execute("drop database if exists restful_test"); - if (conn != null) - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); + conn.close(); } } } diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 3c395ec205a9c39b3c6e62532de536feef093544..fa0eb20055df38c2a8092637e30ae807e473fa30 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -10,9 +10,8 @@ const ArrayType = require('ref-array-napi'); const Struct = require('ref-struct-napi'); const FieldTypes = require('./constants'); const errors = require('./error'); +const _ = require('lodash') const TaosObjects = require('./taosobjects'); -const { NULL_POINTER } = require('ref-napi'); -const { Console } = require('console'); module.exports = CTaosInterface; @@ -223,6 +222,8 @@ TaosField.fields.name.type.size = 65; TaosField.defineProperty('type', ref.types.char); TaosField.defineProperty('bytes', ref.types.short); +//define schemaless line array +var smlLine = ArrayType(ref.coerceType('char *')) /** * @@ -238,7 +239,6 @@ function CTaosInterface(config = null, pass = false) { ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); /*Declare a bunch of functions first*/ /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ - if ('win32' == os.platform()) { taoslibname = 'taos'; } else { @@ -303,9 +303,15 @@ function CTaosInterface(config = null, pass = false) { // int64_t stime, void *param, void (*callback)(void *)); 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], //void taos_close_stream(TAOS_STREAM *tstr); - 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] + 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]], + + //Schemaless insert + //TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol,int precision) + // 'taos_schemaless_insert': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.int, ref.types.int, ref.types.int]] + 'taos_schemaless_insert': [ref.types.void_ptr, [ref.types.void_ptr, smlLine, 'int', 'int', 'int']] }); + if (pass == false) { if (config == null) { this._config = ref.alloc(ref.types.char_ptr, ref.NULL); @@ -664,3 +670,38 @@ CTaosInterface.prototype.closeStream = function closeStream(stream) { this.libtaos.taos_close_stream(stream); console.log("Closed stream"); } +//Schemaless insert API +/** + * TAOS* taos, char* lines[], int numLines, int protocol,int precision) + * using taos_errstr get error info, taos_errno get error code. Remmember + * to release taos_res, otherwile will lead memory leak. + * TAOS schemaless insert api + * @param {*} connection a valid database connection + * @param {*} lines string data, which statisfied with line proctocol + * @param {*} numLines number of rows in param lines. + * @param {*} protocal Line protocol, enum type (0,1,2,3),indicate different line protocol + * @param {*} precision timestamp precision in lines, enum type (0,1,2,3,4,5,6) + * @returns TAOS_RES + * + */ +CTaosInterface.prototype.schemalessInsert = function schemalessInsert(connection,lines, protocal, precision) { + let _numLines = null; + let _lines = null; + + if(_.isString(lines)){ + _numLines = 1; + _lines = Buffer.alloc(_numLines * ref.sizeof.pointer); + ref.set(_lines,0,ref.allocCString(lines),ref.types.char_ptr); + } + else if(_.isArray(lines)){ + _numLines = lines.length; + _lines = Buffer.alloc(_numLines * ref.sizeof.pointer); + for(let i = 0; i < _numLines ; i++){ + ref.set(_lines,i*ref.sizeof.pointer,ref.allocCString(lines[i]),ref.types.char_ptr) + } + } + else{ + throw new errors.InterfaceError("Unsupport lines input") + } + return this.libtaos.taos_schemaless_insert(connection, _lines, _numLines, protocal, precision); +} diff --git a/src/connector/nodejs/nodetaos/constants.js b/src/connector/nodejs/nodetaos/constants.js index 3a866315507371fdfc69efb6de550b7c21f660b7..551cfce71677fbd6635a76884474e1b1aeac7ab9 100644 --- a/src/connector/nodejs/nodetaos/constants.js +++ b/src/connector/nodejs/nodetaos/constants.js @@ -1,3 +1,45 @@ + +const SCHEMALESS_PROTOCOL = { + TSDB_SML_UNKNOWN_PROTOCOL: 0, + TSDB_SML_LINE_PROTOCOL: 1, + TSDB_SML_TELNET_PROTOCOL: 2, + TSDB_SML_JSON_PROTOCOL: 3 +} +const SCHEMALESS_PRECISION = { + TSDB_SML_TIMESTAMP_NOT_CONFIGURED : 0, + TSDB_SML_TIMESTAMP_HOURS : 1, + TSDB_SML_TIMESTAMP_MINUTES : 2, + TSDB_SML_TIMESTAMP_SECONDS : 3, + TSDB_SML_TIMESTAMP_MILLI_SECONDS : 4, + TSDB_SML_TIMESTAMP_MICRO_SECONDS : 5, + TSDB_SML_TIMESTAMP_NANO_SECONDS : 6 +} +const typeCodesToName = { + 0: 'Null', + 1: 'Boolean', + 2: 'Tiny Int', + 3: 'Small Int', + 4: 'Int', + 5: 'Big Int', + 6: 'Float', + 7: 'Double', + 8: 'Binary', + 9: 'Timestamp', + 10: 'Nchar', + 11: 'Tinyint Unsigned', + 12: 'Smallint Unsigned', + 13: 'Int Unsigned', + 14: 'Bigint Unsigned', +} + +/** + * @function + * @param {number} typecode - The code to get the name of the type for + * @return {string} Name of the field type + */ +function getType(typecode) { + return typeCodesToName[typecode]; +} /** * Contains the the definitions/values assigned to various field types * @module FieldTypes @@ -18,71 +60,45 @@ * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after 1970-01-01 08:00:00.000 GMT. * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string. - * - * - * + * * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result). * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result). */ module.exports = { - C_NULL : 0, - C_BOOL : 1, - C_TINYINT : 2, - C_SMALLINT : 3, - C_INT : 4, - C_BIGINT : 5, - C_FLOAT : 6, - C_DOUBLE : 7, - C_BINARY : 8, - C_TIMESTAMP : 9, - C_NCHAR : 10, - C_TINYINT_UNSIGNED : 11, - C_SMALLINT_UNSIGNED : 12, - C_INT_UNSIGNED : 13, - C_BIGINT_UNSIGNED : 14, - // NULL value definition - // NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL : 2, - C_TINYINT_NULL : -128, - C_TINYINT_UNSIGNED_NULL : 255, - C_SMALLINT_NULL : -32768, - C_SMALLINT_UNSIGNED_NULL : 65535, - C_INT_NULL : -2147483648, - C_INT_UNSIGNED_NULL : 4294967295, - C_BIGINT_NULL : -9223372036854775808n, - C_BIGINT_UNSIGNED_NULL : 18446744073709551615n, - C_FLOAT_NULL : 2146435072, - C_DOUBLE_NULL : -9223370937343148032, - C_NCHAR_NULL : 4294967295, - C_BINARY_NULL : 255, - C_TIMESTAMP_MILLI : 0, - C_TIMESTAMP_MICRO : 1, - getType, -} - -const typeCodesToName = { - 0 : 'Null', - 1 : 'Boolean', - 2 : 'Tiny Int', - 3 : 'Small Int', - 4 : 'Int', - 5 : 'Big Int', - 6 : 'Float', - 7 : 'Double', - 8 : 'Binary', - 9 : 'Timestamp', - 10 : 'Nchar', - 11 : 'TINYINT_UNSIGNED', - 12 : 'SMALLINT_UNSIGNED', - 13 : 'INT_UNSIGNED', - 14 : 'BIGINT_UNSIGNED', + C_NULL: 0, + C_BOOL: 1, + C_TINYINT: 2, + C_SMALLINT: 3, + C_INT: 4, + C_BIGINT: 5, + C_FLOAT: 6, + C_DOUBLE: 7, + C_BINARY: 8, + C_TIMESTAMP: 9, + C_NCHAR: 10, + C_TINYINT_UNSIGNED: 11, + C_SMALLINT_UNSIGNED: 12, + C_INT_UNSIGNED: 13, + C_BIGINT_UNSIGNED: 14, + // NULL value definition + // NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL: 2, + C_TINYINT_NULL: -128, + C_TINYINT_UNSIGNED_NULL: 255, + C_SMALLINT_NULL: -32768, + C_SMALLINT_UNSIGNED_NULL: 65535, + C_INT_NULL: -2147483648, + C_INT_UNSIGNED_NULL: 4294967295, + C_BIGINT_NULL: -9223372036854775808n, + C_BIGINT_UNSIGNED_NULL: 18446744073709551615n, + C_FLOAT_NULL: 2146435072, + C_DOUBLE_NULL: -9223370937343148032, + C_NCHAR_NULL: 4294967295, + C_BINARY_NULL: 255, + C_TIMESTAMP_MILLI: 0, + C_TIMESTAMP_MICRO: 1, + getType, + SCHEMALESS_PROTOCOL, + SCHEMALESS_PRECISION } -/** - * @function - * @param {number} typecode - The code to get the name of the type for - * @return {string} Name of the field type - */ -function getType(typecode) { - return typeCodesToName[typecode]; -} diff --git a/src/connector/nodejs/nodetaos/cursor.js b/src/connector/nodejs/nodetaos/cursor.js index f879d89d487eae9290fd9fc70259699f27937928..3c01dc51b4aa658a8c5eb0bea06083bd57c7561e 100644 --- a/src/connector/nodejs/nodetaos/cursor.js +++ b/src/connector/nodejs/nodetaos/cursor.js @@ -211,7 +211,7 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { } } - + performance.mark('B'); performance.measure('query', 'A', 'B'); let response = this._createSetResponse(this._rowcount, time) @@ -474,3 +474,21 @@ TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = TDengineCursor.prototype.closeStream = function closeStream(stream) { this._chandle.closeStream(stream); } +/** + * schemaless insert + * @param {*} connection a valid database connection + * @param {*} lines string data, which statisfied with line proctocol + * @param {*} protocal Line protocol, enum type (0,1,2,3),indicate different line protocol + * @param {*} precision timestamp precision in lines, enum type (0,1,2,3,4,5,6) + * @returns TAOS_RES + * + */ +TDengineCursor.prototype.schemalessInsert = function schemalessInsert(lines, protocol, precision) { + this._result = this._chandle.schemalessInsert(this._connection._conn, lines, protocol, precision); + let errorNo = this._chandle.errno(this._result); + if (errorNo != 0) { + throw new errors.InterfaceError(errorNo + ":" + this._chandle.errStr(this._result)); + this._chandle.freeResult(this._result); + } + this._chandle.freeResult(this._result); +} diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index 711db94b84fab40d8d1809a44c45b24a9ab5bafb..d7eba48a463643dd293960251f6eebcb253d93c4 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -7,7 +7,7 @@ "test": "test" }, "scripts": { - "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js " + "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js && node test/testSchemalessInsert.js " }, "repository": { "type": "git", @@ -27,6 +27,7 @@ "homepage": "https://github.com/taosdata/tdengine#readme", "dependencies": { "ffi-napi": "^3.1.0", + "lodash": "^4.17.21", "ref-array-napi": "^1.2.1", "ref-napi": "^1.5.2", "ref-struct-napi": "^1.1.1" diff --git a/src/connector/nodejs/tdengine.js b/src/connector/nodejs/tdengine.js index 047c744a4fc90c6306e851eaa529a7f9f578fe12..ccc66b2c095a92b0b1f0f129015b606667df4712 100644 --- a/src/connector/nodejs/tdengine.js +++ b/src/connector/nodejs/tdengine.js @@ -1,4 +1,9 @@ var TDengineConnection = require('./nodetaos/connection.js') -module.exports.connect = function (connection={}) { - return new TDengineConnection(connection); -} +const TDengineConstant = require('./nodetaos/constants.js') +module.exports = { + connect: function (connection = {}) { + return new TDengineConnection(connection); + }, + SCHEMALESS_PROTOCOL: TDengineConstant.SCHEMALESS_PROTOCOL, + SCHEMALESS_PRECISION: TDengineConstant.SCHEMALESS_PRECISION, +} \ No newline at end of file diff --git a/src/connector/nodejs/test/testNanoseconds.js b/src/connector/nodejs/test/testNanoseconds.js index 85a7600b01f2c908f22e621488f22678083149ea..c3089aab3eaa4621b36297a70f2698dd08ed5988 100644 --- a/src/connector/nodejs/test/testNanoseconds.js +++ b/src/connector/nodejs/test/testNanoseconds.js @@ -3,7 +3,6 @@ var conn = taos.connect(); var c1 = conn.cursor(); let stime = new Date(); let interval = 1000; - function convertDateToTS(date) { let tsArr = date.toISOString().split("T") return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; diff --git a/src/connector/nodejs/test/testSchemalessInsert.js b/src/connector/nodejs/test/testSchemalessInsert.js new file mode 100644 index 0000000000000000000000000000000000000000..16998425ecda4226e56e5c2a9e49e83ba34bc34a --- /dev/null +++ b/src/connector/nodejs/test/testSchemalessInsert.js @@ -0,0 +1,84 @@ +const _ = require('lodash'); +const taos = require('../tdengine'); + +var conn = taos.connect({ host: "127.0.0.1", user: "root", password: "taosdata", config: "/etc/taos", port: 10 }); +var c1 = conn.cursor(); +executeUpdate("drop database if exists nodedb;"); +executeUpdate("create database if not exists nodedb ;"); +executeUpdate("use nodedb;"); + +let tbname1 = "line_protocol_arr"; +let tbname2 = "json_protocol_arr"; +let tbname3 = "json_protocol_str"; +let tbname4 = "line_protocol_str"; + + +let line1 = [tbname1 + ",t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", +tbname1 + ",t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833641000000" +]; +let line2 = ["{" + + "\"metric\": \"" + tbname2 + "\"," + + "\"timestamp\": 1626006833," + + "\"value\": 10," + + "\"tags\": {" + + " \"t1\": true," + + "\"t2\": false," + + "\"t3\": 10," + + "\"t4\": \"123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>\"" + + "}" + + "}" +]; + +let line3 = "{" + + "\"metric\": \"" + tbname3 + "\"," + + "\"timestamp\": 1626006833000," + + "\"value\": 10," + + "\"tags\": {" + + " \"t1\": true," + + "\"t2\": false," + + "\"t3\": 10," + + "\"t4\": \"123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>\"" + + "}" + + "}"; + +let line4 = tbname4 + ",t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639"; + + +try { + + c1.schemalessInsert(line1, taos.SCHEMALESS_PROTOCOL.TSDB_SML_LINE_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_NANO_SECONDS); + testSchemaless(tbname1, line1.length); + + c1.schemalessInsert(line2, taos.SCHEMALESS_PROTOCOL.TSDB_SML_JSON_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_SECONDS); + testSchemaless(tbname2, line2.length); + + c1.schemalessInsert(line3, taos.SCHEMALESS_PROTOCOL.TSDB_SML_JSON_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_MILLI_SECONDS); + testSchemaless(tbname3, 1); + + c1.schemalessInsert(line4, taos.SCHEMALESS_PROTOCOL.TSDB_SML_LINE_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_MILLI_SECONDS); + testSchemaless(tbname4, 1); + +} catch (err) { + console.log(err) +} +function executeUpdate(sql) { + console.log(sql); + c1.execute(sql); +} + +function testSchemaless(tbname, numLines) { + let sql = "select count(*) from " + tbname + ";"; + executeUpdate(sql); + let affectRows = _.first(c1.fetchall()); + if (affectRows != numLines) { + console.log(1); + console.log(line2); + throw "protocol " + tbname + " schemaless insert success,but can't select as expect." + } + else { + console.log("protocol " + tbname + " schemaless insert success, can select as expect.") + } + console.log("===================") +} + +setTimeout(() => conn.close(), 2000); diff --git a/src/connector/nodejs/test/testUnsignedType.js b/src/connector/nodejs/test/testUnsignedType.js index 82413afebad0b75116fe3ea46e50716843d87c84..14b102972a898c582e0011698bfd7b3cd771bc42 100644 --- a/src/connector/nodejs/test/testUnsignedType.js +++ b/src/connector/nodejs/test/testUnsignedType.js @@ -1,14 +1,13 @@ const taos = require('../tdengine'); var conn = taos.connect({ host: "127.0.0.1", user: "root", password: "taosdata", config: "/etc/taos", port: 10 }); var c1 = conn.cursor(); -executeUpdate("create database nodedb;"); +executeUpdate("create database if not exists nodedb;"); executeUpdate("use nodedb;"); -executeUpdate("create table unsigntest(ts timestamp,ut tinyint unsigned,us smallint unsigned,ui int unsigned,ub bigint unsigned,bi bigint);"); +executeUpdate("create table if not exists unsigntest(ts timestamp,ut tinyint unsigned,us smallint unsigned,ui int unsigned,ub bigint unsigned,bi bigint);"); executeUpdate("insert into unsigntest values (now, 254,65534,4294967294,18446744073709551614,9223372036854775807);"); executeUpdate("insert into unsigntest values (now, 0,0,0,0,-9223372036854775807);"); executeQuery("select * from unsigntest;"); -executeUpdate("drop database nodedb;"); - +executeUpdate("drop database if exists nodedb;"); function executeUpdate(sql) { console.log(sql); diff --git a/src/connector/python/README.md b/src/connector/python/README.md index 679735131105739ae59940c29b51f57496a2057d..1bde964828f1c52bf65e62ef67f2fdb7fc90c355 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -51,7 +51,7 @@ conn.close() import taos conn = taos.connect() -conn.exec("create database if not exists pytest") +conn.execute("create database if not exists pytest") result = conn.query("show databases") num_of_fields = result.field_count @@ -60,7 +60,7 @@ for field in result.fields: for row in result: print(row) result.close() -conn.exec("drop database pytest") +conn.execute("drop database pytest") conn.close() ``` @@ -136,11 +136,11 @@ from taos import * conn = connect() dbname = "pytest_taos_stmt" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec( +conn.execute( "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \ @@ -196,11 +196,11 @@ from taos import * conn = connect() dbname = "pytest_taos_stmt" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec( +conn.execute( "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \ @@ -249,12 +249,12 @@ import taos conn = taos.connect() dbname = "pytest_taos_subscribe_callback" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec("create table if not exists log(ts timestamp, n int)") +conn.execute("create table if not exists log(ts timestamp, n int)") for i in range(10): - conn.exec("insert into log values(now, %d)" % i) + conn.execute("insert into log values(now, %d)" % i) sub = conn.subscribe(True, "test", "select * from log", 1000) print("# consume from begin") @@ -263,14 +263,14 @@ for ts, n in sub.consume(): print("# consume new data") for i in range(5): - conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i)) + conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i)) result = sub.consume() for ts, n in result: print(ts, n) print("# consume with a stop condition") for i in range(10): - conn.exec("insert into log values(now, %d)" % int(random() * 10)) + conn.execute("insert into log values(now, %d)" % int(random() * 10)) result = sub.consume() try: ts, n = next(result) @@ -284,7 +284,7 @@ for i in range(10): sub.close() -conn.exec("drop database if exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) conn.close() ``` @@ -311,23 +311,23 @@ def test_subscribe_callback(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_subscribe_callback" try: - conn.exec("drop database if exists %s" % dbname) - conn.exec("create database if not exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) - conn.exec("create table if not exists log(ts timestamp, n int)") + conn.execute("create table if not exists log(ts timestamp, n int)") print("# subscribe with callback") sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) for i in range(10): - conn.exec("insert into log values(now, %d)" % i) + conn.execute("insert into log values(now, %d)" % i) time.sleep(0.7) sub.close() - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() except Exception as err: - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() raise err @@ -374,10 +374,10 @@ def test_stream(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_stream" try: - conn.exec("drop database if exists %s" % dbname) - conn.exec("create database if not exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) - conn.exec("create table if not exists log(ts timestamp, n int)") + conn.execute("create table if not exists log(ts timestamp, n int)") result = conn.query("select count(*) from log interval(5s)") assert result.field_count == 2 @@ -386,13 +386,13 @@ def test_stream(conn): stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) for _ in range(0, 20): - conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") + conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") time.sleep(2) stream.close() - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() except Exception as err: - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() raise err @@ -408,8 +408,8 @@ import taos conn = taos.connect() dbname = "pytest_line" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s precision 'us'" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) conn.select_db(dbname) lines = [ @@ -431,7 +431,7 @@ for row in result: result.close() -conn.exec("drop database if exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) conn.close() ``` diff --git a/src/connector/python/examples/subscribe-async.py b/src/connector/python/examples/subscribe-async.py index 3782ce5505152e78838406e313094eb911bea4a2..49156de7edfb4322d7888727c28b76868cf6a16a 100644 --- a/src/connector/python/examples/subscribe-async.py +++ b/src/connector/python/examples/subscribe-async.py @@ -7,7 +7,7 @@ import time def subscribe_callback(p_sub, p_result, p_param, errno): # type: (c_void_p, c_void_p, c_void_p, c_int) -> None print("# fetch in callback") - result = TaosResult(p_result) + result = TaosResult(c_void_p(p_result)) result.check_error(errno) for row in result.rows_iter(): ts, n = row() @@ -18,18 +18,21 @@ def test_subscribe_callback(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_subscribe_callback" try: + print("drop if exists") conn.execute("drop database if exists %s" % dbname) + print("create database") conn.execute("create database if not exists %s" % dbname) - conn.select_db(dbname) - conn.execute("create table if not exists log(ts timestamp, n int)") + print("create table") + # conn.execute("use %s" % dbname) + conn.execute("create table if not exists %s.log(ts timestamp, n int)" % dbname) print("# subscribe with callback") - sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) + sub = conn.subscribe(False, "test", "select * from %s.log" % dbname, 1000, subscribe_callback) for i in range(10): - conn.execute("insert into log values(now, %d)" % i) + conn.execute("insert into %s.log values(now, %d)" % (dbname, i)) time.sleep(0.7) - # sub.close() + sub.close() conn.execute("drop database if exists %s" % dbname) # conn.close() diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py index 083ddc99aea8dc6f39b1f22ac5f77d2584a2fe69..05659714ef86da3bda383bfe7d7b25403848637f 100644 --- a/src/connector/python/taos/bind.py +++ b/src/connector/python/taos/bind.py @@ -124,6 +124,21 @@ class TaosBind(ctypes.Structure): self.buffer_length = length self.length = pointer(c_size_t(self.buffer_length)) + def json(self, value): + buffer = None + length = 0 + if isinstance(value, str): + bytes = value.encode("utf-8") + buffer = create_string_buffer(bytes) + length = len(bytes) + else: + buffer = value + length = len(value) + self.buffer_type = FieldType.C_JSON + self.buffer = cast(buffer, c_void_p) + self.buffer_length = length + self.length = pointer(c_size_t(self.buffer_length)) + def tinyint_unsigned(self, value): self.buffer_type = FieldType.C_TINYINT_UNSIGNED self.buffer = cast(pointer(c_uint8(value)), c_void_p) @@ -356,6 +371,11 @@ class TaosMultiBind(ctypes.Structure): self.buffer_type = FieldType.C_NCHAR self._str_to_buffer(values) + def json(self, values): + # type: (list[str]) -> None + self.buffer_type = FieldType.C_JSON + self._str_to_buffer(values) + def tinyint_unsigned(self, values): self.buffer_type = FieldType.C_TINYINT_UNSIGNED self.buffer_length = sizeof(c_uint8) diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 37bc90d4c63fe3f75b12d46bb1bf535441869938..740af5838235a6abc41ae27e7c6a462c30977616 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -110,7 +110,7 @@ _libtaos.taos_get_client_info.restype = c_char_p def taos_get_client_info(): # type: () -> str """Get client version info.""" - return _libtaos.taos_get_client_info().decode() + return _libtaos.taos_get_client_info().decode("utf-8") _libtaos.taos_get_server_info.restype = c_char_p @@ -120,7 +120,7 @@ _libtaos.taos_get_server_info.argtypes = (c_void_p,) def taos_get_server_info(connection): # type: (c_void_p) -> str """Get server version as string.""" - return _libtaos.taos_get_server_info(connection).decode() + return _libtaos.taos_get_server_info(connection).decode("utf-8") _libtaos.taos_close.restype = None @@ -308,16 +308,14 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par """ if callback != None: callback = subscribe_callback_type(callback) - if param != None: - param = c_void_p(param) return c_void_p( _libtaos.taos_subscribe( connection, 1 if restart else 0, c_char_p(topic.encode("utf-8")), c_char_p(sql.encode("utf-8")), - callback or None, - param, + callback, + c_void_p(param), interval, ) ) diff --git a/src/connector/python/taos/constants.py b/src/connector/python/taos/constants.py index 8ad5b69fc099718fa4f4b8c08cf689b17663eae0..34044a15fc0cd73323552f1b4b8c280d6cad5a9b 100644 --- a/src/connector/python/taos/constants.py +++ b/src/connector/python/taos/constants.py @@ -25,6 +25,7 @@ class FieldType(object): C_SMALLINT_UNSIGNED = 12 C_INT_UNSIGNED = 13 C_BIGINT_UNSIGNED = 14 + C_JSON = 15 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 diff --git a/src/connector/python/taos/cursor.py b/src/connector/python/taos/cursor.py index 5d21ff95af5d81367e7143d001cc688d90877b67..a8d82bea2ea188f8a08d6603dd33735ea0a0a5af 100644 --- a/src/connector/python/taos/cursor.py +++ b/src/connector/python/taos/cursor.py @@ -188,6 +188,9 @@ class TaosCursor(object): if dataType.upper() == "NCHAR": if self._description[col][1] == FieldType.C_NCHAR: return True + if dataType.upper() == "JSON": + if self._description[col][1] == FieldType.C_JSON: + return True return False diff --git a/src/connector/python/taos/field.py b/src/connector/python/taos/field.py index b0bec58b932f2136b868739bb28fca04de759e3f..a6d64422e238b46b096a5ae62c42566666f226ad 100644 --- a/src/connector/python/taos/field.py +++ b/src/connector/python/taos/field.py @@ -144,7 +144,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_ try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) + res.append(tmpstr.value.decode("utf-8")) else: res.append( ( @@ -172,7 +172,7 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=Field if rbyte == 1 and buffer[0] == b'\xff': res.append(None) else: - res.append(cast(buffer, c_char_p).value.decode()) + res.append(cast(buffer, c_char_p).value.decode("utf-8")) return res @@ -188,7 +188,7 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldT if rbyte == 4 and buffer[:4] == b'\xff'*4: res.append(None) else: - res.append(cast(buffer, c_char_p).value.decode()) + res.append(cast(buffer, c_char_p).value.decode("utf-8")) return res @@ -207,6 +207,7 @@ CONVERT_FUNC = { FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python, + FieldType.C_JSON: _crow_nchar_to_python, } CONVERT_FUNC_BLOCK = { @@ -224,6 +225,7 @@ CONVERT_FUNC_BLOCK = { FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python, + FieldType.C_JSON: _crow_nchar_to_python_block, } # Corresponding TAOS_FIELD structure in C diff --git a/src/connector/python/taos/result.py b/src/connector/python/taos/result.py index c9feb4d6502515cc6e3e2d4be688f2e7fcd895b2..8b8a0cf108cf7c941d0a6476d8a9c1e2c5a41b84 100644 --- a/src/connector/python/taos/result.py +++ b/src/connector/python/taos/result.py @@ -3,6 +3,8 @@ from .cinterface import * # from .connection import TaosConnection from .error import * +from ctypes import c_void_p + class TaosResult(object): """TDengine result interface""" @@ -12,7 +14,11 @@ class TaosResult(object): # to make the __del__ order right self._conn = conn self._close_after = close_after - self._result = result + if isinstance(result, c_void_p): + self._result = result + else: + self._result = c_void_p(result) + self._fields = None self._field_count = None self._precision = None diff --git a/src/connector/python/tests/test_stream.py b/src/connector/python/tests/test_stream.py index de6e20928b176e51bc6d350fb01268459f4e7f95..32ec4c5999c975be907cf69a42a04b5f4dd5d54c 100644 --- a/src/connector/python/tests/test_stream.py +++ b/src/connector/python/tests/test_stream.py @@ -20,7 +20,8 @@ def stream_callback(p_param, p_result, p_row): result = TaosResult(p_result) row = TaosRow(result, p_row) try: - ts, count = row() + ts, count = row.as_tuple() + print(ts, count) p = cast(p_param, POINTER(Counter)) p.contents.count += count print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count)) diff --git a/src/connector/python/tests/test_subscribe.py b/src/connector/python/tests/test_subscribe.py index 99fe5b263625c63200f416ec98fcb561773becd8..d8acd60e4f3b32bb87a9663b3f7dc43a73f2877b 100644 --- a/src/connector/python/tests/test_subscribe.py +++ b/src/connector/python/tests/test_subscribe.py @@ -63,7 +63,7 @@ def test_subscribe(conn): def subscribe_callback(p_sub, p_result, p_param, errno): # type: (c_void_p, c_void_p, c_void_p, c_int) -> None print("callback") - result = TaosResult(p_result) + result = TaosResult(c_void_p(p_result)) result.check_error(errno) for row in result.rows_iter(): ts, n = row() @@ -76,7 +76,7 @@ def test_subscribe_callback(conn): try: conn.execute("drop database if exists %s" % dbname) conn.execute("create database if not exists %s" % dbname) - conn.select_db(dbname) + conn.execute("use %s" % dbname) conn.execute("create table if not exists log(ts timestamp, n int)") print("# subscribe with callback") diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index d9c4a84234184b14d272854838625e023dd55cea..ac91b498309bd951fab5860b223162ef40aff606 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -19,10 +19,14 @@ ENDIF () ADD_EXECUTABLE(taosd ${SRC}) +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(taosd jemalloc) +ENDIF () + IF (TD_BUILD_HTTP) -TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson ${LINK_LUA} lz4 balance sync ${LINK_JEMALLOC}) ELSE () -TARGET_LINK_LIBRARIES(taosd mnode monitor tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(taosd mnode monitor tsdb twal vnode cJson ${LINK_LUA} lz4 balance sync ${LINK_JEMALLOC}) ENDIF () IF (TD_SOMODE_STATIC) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 69c6203ab355e93272113b0cdfbab3e066be8148..420f462051687c72019d7c0697a23c940e4b8ae0 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -78,18 +78,20 @@ static SStep tsDnodeSteps[] = { {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, {"dnode-mread", dnodeInitMRead, NULL}, {"dnode-mwrite", dnodeInitMWrite, NULL}, - {"dnode-mpeer", dnodeInitMPeer, NULL}, + {"dnode-mpeer", dnodeInitMPeer, NULL}, {"dnode-client", dnodeInitClient, dnodeCleanupClient}, {"dnode-server", dnodeInitServer, dnodeCleanupServer}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, {"dnode-mread", NULL, dnodeCleanupMRead}, {"dnode-mwrite", NULL, dnodeCleanupMWrite}, - {"dnode-mpeer", NULL, dnodeCleanupMPeer}, + {"dnode-mpeer", NULL, dnodeCleanupMPeer}, {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, +#ifdef LUA_EMBEDDED {"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup}, +#endif {"dnode-grant", grantInit, grantCleanUp}, }; @@ -119,7 +121,7 @@ static int dnodeCreateDir(const char *dir) { if (mkdir(dir, 0755) != 0 && errno != EEXIST) { return -1; } - + return 0; } @@ -168,7 +170,6 @@ int32_t dnodeInitSystem() { taosResolveCRC(); taosInitGlobalCfg(); taosReadGlobalLogCfg(); - taosSetCoreDump(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -188,6 +189,7 @@ int32_t dnodeInitSystem() { return -1; } + taosSetCoreDump(); dInfo("start to initialize TDengine"); taosInitNotes(); @@ -261,8 +263,8 @@ static int32_t dnodeInitStorage() { if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno)); return -1; - } - + } + if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) { dError("failed to init TFS since %s", tstrerror(terrno)); return -1; @@ -294,7 +296,7 @@ static int32_t dnodeInitStorage() { if (dnodeCreateDir(tsMnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno)); return -1; - } + } if (dnodeCreateDir(tsDnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno)); diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index f62e0c0df41f2fe399d0f4c1c8e661fcd0ef91b9..7676343b37d242c1d174a31959ea4be25a9d5af2 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -120,6 +120,14 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pMsg->pCont == NULL) return; + if (pMsg->msgType >= TSDB_MSG_TYPE_MAX) { + dError("RPC %p, shell msg type:%d is not processed", pMsg->handle, pMsg->msgType); + rpcMsg.code = TSDB_CODE_DND_MSG_NOT_PROCESSED; + rpcSendResponse(&rpcMsg); + rpcFreeCont(pMsg->pCont); + return; + } + SRunStatus dnodeStatus = dnodeGetRunStatus(); if (dnodeStatus == TSDB_RUN_STATUS_STOPPED) { dError("RPC %p, shell msg:%s is ignored since dnode exiting", pMsg->handle, taosMsg[pMsg->msgType]); diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index e4d1d102e0319706c723f2659b843791654b96a7..d41a2c6a8ae442f09b20c1dc55d06d5d5273cd88 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -19,6 +19,7 @@ #include "tconfig.h" #include "dnodeMain.h" +bool dnodeExit = false; static tsem_t exitSem; static void siguser1Handler(int32_t signum, void *sigInfo, void *context); static void siguser2Handler(int32_t signum, void *sigInfo, void *context); @@ -182,6 +183,8 @@ static void sigintHandler(int32_t signum, void *sigInfo, void *context) { syslog(LOG_INFO, "Shut down signal is %d", signum); syslog(LOG_INFO, "Shutting down TDengine service..."); + dnodeExit = true; + // inform main thread to exit tsem_post(&exitSem); #ifdef WINDOWS diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index a5b0e9fe30e88f89af2e79af16602dac9500a305..981c150f1c8a523ae78749560545dd985af73eac 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -30,6 +30,7 @@ typedef struct { int32_t * vnodeList; } SOpenVnodeThread; +extern bool dnodeExit; extern void * tsDnodeTmr; static void * tsStatusTimer = NULL; static uint32_t tsRebootTime = 0; @@ -127,9 +128,20 @@ int32_t dnodeInitVnodes() { int32_t threadNum = tsNumOfCores; int32_t vnodesPerThread = numOfVnodes / threadNum + 1; SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread)); + + if (threads == NULL) { + return TSDB_CODE_DND_OUT_OF_MEMORY; + } + for (int32_t t = 0; t < threadNum; ++t) { threads[t].threadIndex = t; threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t)); + + if (threads[t].vnodeList == NULL) { + dError("vnodeList allocation failed"); + status = TSDB_CODE_DND_OUT_OF_MEMORY; + goto DNODE_INIT_VNODES_OUT; + } } for (int32_t v = 0; v < numOfVnodes; ++v) { @@ -163,18 +175,24 @@ int32_t dnodeInitVnodes() { } openVnodes += pThread->opened; failedVnodes += pThread->failed; - free(pThread->vnodeList); } - free(threads); dInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes); if (failedVnodes != 0) { dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes); - return -1; + status = TSDB_CODE_DND_VNODE_OPEN_FAILED; } - return TSDB_CODE_SUCCESS; +DNODE_INIT_VNODES_OUT: + + for (int32_t t = 0; t < threadNum; ++t) { + SOpenVnodeThread *pThread = &threads[t]; + free(pThread->vnodeList); + } + free(threads); + + return status; } void dnodeCleanupVnodes() { @@ -205,6 +223,22 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { if (clusterId[0] != '\0') { dnodeSetDropped(); dError("exit zombie dropped dnode"); + + // warning: only for k8s! + while (tsDnodeNopLoop) { + if (dnodeExit) { + dInfo("Break loop"); + return; + } + + dInfo("Nop loop"); +#ifdef WINDOWS + Sleep(100); +#else + usleep(100000); +#endif + } + exit(EXIT_FAILURE); } } diff --git a/src/inc/taos.h b/src/inc/taos.h index 6cd62d3177d2490c5c89bf910e258c956c2f69fc..2b74f9c1844641ccef5ad1fb8e9d25a4d3262ecc 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -46,6 +46,7 @@ typedef void **TAOS_ROW; #define TSDB_DATA_TYPE_USMALLINT 12 // 2 bytes #define TSDB_DATA_TYPE_UINT 13 // 4 bytes #define TSDB_DATA_TYPE_UBIGINT 14 // 8 bytes +#define TSDB_DATA_TYPE_JSON 15 // json string typedef enum { TSDB_OPTION_LOCALE, @@ -208,6 +209,8 @@ DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList); DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision); +DLL_EXPORT int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth); + #ifdef __cplusplus } #endif diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 9d48ed59cecfffe1ea36971fa502ed9dae3fb0bc..b7c628a1189c1c9f368d4079de6a2e1078e2cfa8 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -39,7 +39,7 @@ extern "C" { #define TSKEY_INITIAL_VAL INT64_MIN // Bytes for each type. -extern const int32_t TYPE_BYTES[15]; +extern const int32_t TYPE_BYTES[16]; // TODO: replace and remove code below #define CHAR_BYTES sizeof(char) @@ -70,6 +70,11 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN #define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF #define TSDB_DATA_BINARY_NULL 0xFF +#define TSDB_DATA_JSON_PLACEHOLDER 0x7F +#define TSDB_DATA_JSON_NULL 0xFFFFFFFF +#define TSDB_DATA_JSON_null 0xFFFFFFFE +#define TSDB_DATA_JSON_NOT_NULL 0x01 +#define TSDB_DATA_JSON_CAN_NOT_COMPARE 0x7FFFFFFF #define TSDB_DATA_UTINYINT_NULL 0xFF #define TSDB_DATA_USMALLINT_NULL 0xFFFF @@ -80,12 +85,17 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DATA_NULL_STR_L "null" #define TSDB_DEFAULT_USER "root" + #ifdef _TD_POWER_ #define TSDB_DEFAULT_PASS "powerdb" #elif (_TD_TQ_ == true) #define TSDB_DEFAULT_PASS "tqueue" #elif (_TD_PRO_ == true) #define TSDB_DEFAULT_PASS "prodb" +#elif (_TD_KH_ == true) +#define TSDB_DEFAULT_PASS "khroot" +#elif (_TD_JH_ == true) +#define TSDB_DEFAULT_PASS "jhdata" #else #define TSDB_DEFAULT_PASS "taosdata" #endif @@ -171,6 +181,9 @@ do { \ #define TSDB_RELATION_MATCH 14 #define TSDB_RELATION_NMATCH 15 +#define TSDB_RELATION_CONTAINS 16 +#define TSDB_RELATION_ARROW 17 + #define TSDB_BINARY_OP_ADD 30 #define TSDB_BINARY_OP_SUBTRACT 31 #define TSDB_BINARY_OP_MULTIPLY 32 @@ -217,8 +230,11 @@ do { \ */ #define TSDB_MAX_BYTES_PER_ROW 49151 #define TSDB_MAX_TAGS_LEN 16384 +#define TSDB_MAX_JSON_TAGS_LEN (4096*TSDB_NCHAR_SIZE + 2 + 1) // 2->var_header_len 1->type #define TSDB_MAX_TAGS 128 #define TSDB_MAX_TAG_CONDITIONS 1024 +#define TSDB_MAX_JSON_KEY_LEN 256 +#define TSDB_MAX_JSON_KEY_MD5_LEN 16 #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 53c99f05bc44951202e2b673a40aced68c90eda5..8700cf246a91655c307bbb4c3c2c111d3271fc67 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -24,7 +24,7 @@ extern "C" { #include #define TAOS_DEF_ERROR_CODE(mod, code) ((int32_t)((0x80000000 | ((mod)<<16) | (code)))) - + #define TAOS_SYSTEM_ERROR(code) (0x80ff0000 | (code)) #define TAOS_SUCCEEDED(err) ((err) >= 0) #define TAOS_FAILED(err) ((err) < 0) @@ -33,46 +33,46 @@ const char* tstrerror(int32_t err); int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) - + #define TSDB_CODE_SUCCESS 0 #define TSDB_CODE_FAILED -1 // unknown or needn't tell detail error // rpc -#define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") -#define TSDB_CODE_RPC_AUTH_REQUIRED TAOS_DEF_ERROR_CODE(0, 0x0002) //"Authentication required") -#define TSDB_CODE_RPC_AUTH_FAILURE TAOS_DEF_ERROR_CODE(0, 0x0003) //"Authentication failure") -#define TSDB_CODE_RPC_REDIRECT TAOS_DEF_ERROR_CODE(0, 0x0004) //"Redirect") -#define TSDB_CODE_RPC_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0005) //"System not ready") // peer is not ready to process data -#define TSDB_CODE_RPC_ALREADY_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0006) //"Message already processed") -#define TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x0007) //"Last session not finished") -#define TSDB_CODE_RPC_MISMATCHED_LINK_ID TAOS_DEF_ERROR_CODE(0, 0x0008) //"Mismatched meter id") -#define TSDB_CODE_RPC_TOO_SLOW TAOS_DEF_ERROR_CODE(0, 0x0009) //"Processing of request timed out") -#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x000A) //"Number of sessions reached limit") // too many sessions -#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x000B) //"Unable to establish connection") -#define TSDB_CODE_RPC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x000C) //"Unexpected generic error in RPC") -#define TSDB_CODE_RPC_UNEXPECTED_RESPONSE TAOS_DEF_ERROR_CODE(0, 0x000D) //"Unexpected response") -#define TSDB_CODE_RPC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x000E) //"Invalid value") -#define TSDB_CODE_RPC_INVALID_TRAN_ID TAOS_DEF_ERROR_CODE(0, 0x000F) //"Invalid transaction id") -#define TSDB_CODE_RPC_INVALID_SESSION_ID TAOS_DEF_ERROR_CODE(0, 0x0010) //"Invalid session id") -#define TSDB_CODE_RPC_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0011) //"Invalid message type") -#define TSDB_CODE_RPC_INVALID_RESPONSE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0012) //"Invalid response type") -#define TSDB_CODE_RPC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0013) //"Client and server's time is not synchronized") -#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready") -#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN") -#define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version") +#define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress" +#define TSDB_CODE_RPC_AUTH_REQUIRED TAOS_DEF_ERROR_CODE(0, 0x0002) //"Authentication required" +#define TSDB_CODE_RPC_AUTH_FAILURE TAOS_DEF_ERROR_CODE(0, 0x0003) //"Authentication failure" +#define TSDB_CODE_RPC_REDIRECT TAOS_DEF_ERROR_CODE(0, 0x0004) //"Redirect" +#define TSDB_CODE_RPC_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0005) //"System not ready" // peer is not ready to process data +#define TSDB_CODE_RPC_ALREADY_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0006) //"Message already processed" +#define TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x0007) //"Last session not finished" +#define TSDB_CODE_RPC_MISMATCHED_LINK_ID TAOS_DEF_ERROR_CODE(0, 0x0008) //"Mismatched meter id" +#define TSDB_CODE_RPC_TOO_SLOW TAOS_DEF_ERROR_CODE(0, 0x0009) //"Processing of request timed out" +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x000A) //"Number of sessions reached limit" // too many sessions +#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x000B) //"Unable to establish connection" +#define TSDB_CODE_RPC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x000C) //"Unexpected generic error in RPC" +#define TSDB_CODE_RPC_UNEXPECTED_RESPONSE TAOS_DEF_ERROR_CODE(0, 0x000D) //"Unexpected response" +#define TSDB_CODE_RPC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x000E) //"Invalid value" +#define TSDB_CODE_RPC_INVALID_TRAN_ID TAOS_DEF_ERROR_CODE(0, 0x000F) //"Invalid transaction id" +#define TSDB_CODE_RPC_INVALID_SESSION_ID TAOS_DEF_ERROR_CODE(0, 0x0010) //"Invalid session id" +#define TSDB_CODE_RPC_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0011) //"Invalid message type" +#define TSDB_CODE_RPC_INVALID_RESPONSE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0012) //"Invalid response type" +#define TSDB_CODE_RPC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0013) //"Client and server's time is not synchronized" +#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready" +#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN" +#define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version" //common & util -#define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported") -#define TSDB_CODE_COM_MEMORY_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0101) //"Memory corrupted") -#define TSDB_CODE_COM_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0102) //"Out of memory") -#define TSDB_CODE_COM_INVALID_CFG_MSG TAOS_DEF_ERROR_CODE(0, 0x0103) //"Invalid config message") -#define TSDB_CODE_COM_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0104) //"Data file corrupted") -#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0105) //"Ref out of memory") -#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0106) //"too many Ref Objs") -#define TSDB_CODE_REF_ID_REMOVED TAOS_DEF_ERROR_CODE(0, 0x0107) //"Ref ID is removed") -#define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) //"Invalid Ref ID") -#define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) //"Ref is already there") -#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there") +#define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported" +#define TSDB_CODE_COM_MEMORY_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0101) //"Memory corrupted" +#define TSDB_CODE_COM_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0102) //"Out of memory" +#define TSDB_CODE_COM_INVALID_CFG_MSG TAOS_DEF_ERROR_CODE(0, 0x0103) //"Invalid config message" +#define TSDB_CODE_COM_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0104) //"Data file corrupted" +#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0105) //"Ref out of memory" +#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0106) //"too many Ref Objs" +#define TSDB_CODE_REF_ID_REMOVED TAOS_DEF_ERROR_CODE(0, 0x0107) //"Ref ID is removed" +#define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) //"Invalid Ref ID" +#define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) //"Ref is already there" +#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there" //client #define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid Operation") @@ -114,102 +114,103 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0224) //"Value out of range") #define TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x0225) //"Invalid line protocol type") #define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type") +#define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output") // mnode -#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") -#define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) //"Message is progressing") -#define TSDB_CODE_MND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0302) //"Messag need to be reprocessed") -#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) //"Insufficient privilege for operation") -#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0304) //"Unexpected generic error in mnode") -#define TSDB_CODE_MND_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x0305) //"Invalid message connection") -#define TSDB_CODE_MND_INVALID_MSG_VERSION TAOS_DEF_ERROR_CODE(0, 0x0306) //"Incompatible protocol version") -#define TSDB_CODE_MND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0307) //"Invalid message length") -#define TSDB_CODE_MND_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0308) //"Invalid message type") -#define TSDB_CODE_MND_TOO_MANY_SHELL_CONNS TAOS_DEF_ERROR_CODE(0, 0x0309) //"Too many connections") -#define TSDB_CODE_MND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x030A) //"Out of memory in mnode") -#define TSDB_CODE_MND_INVALID_SHOWOBJ TAOS_DEF_ERROR_CODE(0, 0x030B) //"Data expired") -#define TSDB_CODE_MND_INVALID_QUERY_ID TAOS_DEF_ERROR_CODE(0, 0x030C) //"Invalid query id") -#define TSDB_CODE_MND_INVALID_STREAM_ID TAOS_DEF_ERROR_CODE(0, 0x030D) //"Invalid stream id") -#define TSDB_CODE_MND_INVALID_CONN_ID TAOS_DEF_ERROR_CODE(0, 0x030E) //"Invalid connection id") -#define TSDB_CODE_MND_MNODE_IS_RUNNING TAOS_DEF_ERROR_CODE(0, 0x0310) //"mnode is already running") -#define TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC TAOS_DEF_ERROR_CODE(0, 0x0311) //"failed to config sync") -#define TSDB_CODE_MND_FAILED_TO_START_SYNC TAOS_DEF_ERROR_CODE(0, 0x0312) //"failed to start sync") -#define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) //"failed to create mnode dir") -#define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) //"failed to init components") - -#define TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) //"Object already there") -#define TSDB_CODE_MND_SDB_ERROR TAOS_DEF_ERROR_CODE(0, 0x0321) //"Unexpected generic error in sdb") -#define TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0322) //"Invalid table type") -#define TSDB_CODE_MND_SDB_OBJ_NOT_THERE TAOS_DEF_ERROR_CODE(0, 0x0323) //"Object not there") -#define TSDB_CODE_MND_SDB_INVAID_META_ROW TAOS_DEF_ERROR_CODE(0, 0x0324) //"Invalid meta row") -#define TSDB_CODE_MND_SDB_INVAID_KEY_TYPE TAOS_DEF_ERROR_CODE(0, 0x0325) //"Invalid key type") - -#define TSDB_CODE_MND_DNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0330) //"DNode already exists") -#define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0331) //"DNode does not exist") -#define TSDB_CODE_MND_VGROUP_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0332) //"VGroup does not exist") -#define TSDB_CODE_MND_NO_REMOVE_MASTER TAOS_DEF_ERROR_CODE(0, 0x0333) //"Master DNode cannot be removed") -#define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0334) //"Out of DNodes") -#define TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT TAOS_DEF_ERROR_CODE(0, 0x0335) //"Cluster cfg inconsistent") -#define TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION TAOS_DEF_ERROR_CODE(0, 0x0336) //"Invalid dnode cfg option") -#define TSDB_CODE_MND_BALANCE_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0337) //"Balance already enabled") -#define TSDB_CODE_MND_VGROUP_NOT_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0338) //"Vgroup not in dnode") -#define TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0339) //"Vgroup already in dnode") -#define TSDB_CODE_MND_DNODE_NOT_FREE TAOS_DEF_ERROR_CODE(0, 0x033A) //"Dnode not avaliable") -#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x033B) //"Cluster id not match") -#define TSDB_CODE_MND_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x033C) //"Cluster not ready") -#define TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033D) //"Dnode Id not configured") -#define TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033E) //"Dnode Ep not configured") - -#define TSDB_CODE_MND_ACCT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0340) //"Account already exists") -#define TSDB_CODE_MND_INVALID_ACCT TAOS_DEF_ERROR_CODE(0, 0x0341) //"Invalid account") -#define TSDB_CODE_MND_INVALID_ACCT_OPTION TAOS_DEF_ERROR_CODE(0, 0x0342) //"Invalid account options") -#define TSDB_CODE_MND_ACCT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0343) //"Account authorization has expired") - -#define TSDB_CODE_MND_USER_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) //"User already exists") -#define TSDB_CODE_MND_INVALID_USER TAOS_DEF_ERROR_CODE(0, 0x0351) //"Invalid user") -#define TSDB_CODE_MND_INVALID_USER_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0352) //"Invalid user format") -#define TSDB_CODE_MND_INVALID_PASS_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0353) //"Invalid password format") -#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn") -#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users") - -#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists") -#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long") -#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist") -#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb") -#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags") -#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns") -#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series") -#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table -#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long") -#define TSDB_CODE_MND_TAG_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0369) //"Tag already exists") -#define TSDB_CODE_MND_TAG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036A) //"Tag does not exist") -#define TSDB_CODE_MND_FIELD_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x036B) //"Field already exists") -#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist") -#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist") -#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message") -#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes") - -#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name") -#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length") -#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code") -#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists") -#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func") -#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize") - -#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length") -#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length") - -#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available") -#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists") -#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options") -#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) //"Invalid database name") -#define TSDB_CODE_MND_MONITOR_DB_FORBIDDEN TAOS_DEF_ERROR_CODE(0, 0x0384) //"Cannot delete monitor database") -#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) //"Too many databases for account") -#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //"Database not available") -#define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) //"Database unsynced") - -#define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) //"Invalid database option: days out of range") -#define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) //"Invalid database option: keep >= keep1 >= keep0 >= days") +#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed" +#define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) //"Message is progressing" +#define TSDB_CODE_MND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0302) //"Messag need to be reprocessed" +#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) //"Insufficient privilege for operation" +#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0304) //"Unexpected generic error in mnode" +#define TSDB_CODE_MND_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x0305) //"Invalid message connection" +#define TSDB_CODE_MND_INVALID_MSG_VERSION TAOS_DEF_ERROR_CODE(0, 0x0306) //"Incompatible protocol version" +#define TSDB_CODE_MND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0307) //"Invalid message length" +#define TSDB_CODE_MND_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0308) //"Invalid message type" +#define TSDB_CODE_MND_TOO_MANY_SHELL_CONNS TAOS_DEF_ERROR_CODE(0, 0x0309) //"Too many connections" +#define TSDB_CODE_MND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x030A) //"Out of memory in mnode" +#define TSDB_CODE_MND_INVALID_SHOWOBJ TAOS_DEF_ERROR_CODE(0, 0x030B) //"Data expired" +#define TSDB_CODE_MND_INVALID_QUERY_ID TAOS_DEF_ERROR_CODE(0, 0x030C) //"Invalid query id" +#define TSDB_CODE_MND_INVALID_STREAM_ID TAOS_DEF_ERROR_CODE(0, 0x030D) //"Invalid stream id" +#define TSDB_CODE_MND_INVALID_CONN_ID TAOS_DEF_ERROR_CODE(0, 0x030E) //"Invalid connection id" +#define TSDB_CODE_MND_MNODE_IS_RUNNING TAOS_DEF_ERROR_CODE(0, 0x0310) //"mnode is already running" +#define TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC TAOS_DEF_ERROR_CODE(0, 0x0311) //"failed to config sync" +#define TSDB_CODE_MND_FAILED_TO_START_SYNC TAOS_DEF_ERROR_CODE(0, 0x0312) //"failed to start sync" +#define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) //"failed to create mnode dir" +#define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) //"failed to init components" + +#define TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) //"Object already there" +#define TSDB_CODE_MND_SDB_ERROR TAOS_DEF_ERROR_CODE(0, 0x0321) //"Unexpected generic error in sdb" +#define TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0322) //"Invalid table type" +#define TSDB_CODE_MND_SDB_OBJ_NOT_THERE TAOS_DEF_ERROR_CODE(0, 0x0323) //"Object not there" +#define TSDB_CODE_MND_SDB_INVAID_META_ROW TAOS_DEF_ERROR_CODE(0, 0x0324) //"Invalid meta row" +#define TSDB_CODE_MND_SDB_INVAID_KEY_TYPE TAOS_DEF_ERROR_CODE(0, 0x0325) //"Invalid key type" + +#define TSDB_CODE_MND_DNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0330) //"DNode already exists" +#define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0331) //"DNode does not exist" +#define TSDB_CODE_MND_VGROUP_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0332) //"VGroup does not exist" +#define TSDB_CODE_MND_NO_REMOVE_MASTER TAOS_DEF_ERROR_CODE(0, 0x0333) //"Master DNode cannot be removed" +#define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0334) //"Out of DNodes" +#define TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT TAOS_DEF_ERROR_CODE(0, 0x0335) //"Cluster cfg inconsistent" +#define TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION TAOS_DEF_ERROR_CODE(0, 0x0336) //"Invalid dnode cfg option" +#define TSDB_CODE_MND_BALANCE_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0337) //"Balance already enabled" +#define TSDB_CODE_MND_VGROUP_NOT_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0338) //"Vgroup not in dnode" +#define TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0339) //"Vgroup already in dnode" +#define TSDB_CODE_MND_DNODE_NOT_FREE TAOS_DEF_ERROR_CODE(0, 0x033A) //"Dnode not avaliable" +#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x033B) //"Cluster id not match" +#define TSDB_CODE_MND_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x033C) //"Cluster not ready" +#define TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033D) //"Dnode Id not configured" +#define TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033E) //"Dnode Ep not configured" + +#define TSDB_CODE_MND_ACCT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0340) //"Account already exists" +#define TSDB_CODE_MND_INVALID_ACCT TAOS_DEF_ERROR_CODE(0, 0x0341) //"Invalid account" +#define TSDB_CODE_MND_INVALID_ACCT_OPTION TAOS_DEF_ERROR_CODE(0, 0x0342) //"Invalid account options" +#define TSDB_CODE_MND_ACCT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0343) //"Account authorization has expired" + +#define TSDB_CODE_MND_USER_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) //"User already exists" +#define TSDB_CODE_MND_INVALID_USER TAOS_DEF_ERROR_CODE(0, 0x0351) //"Invalid user" +#define TSDB_CODE_MND_INVALID_USER_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0352) //"Invalid user format" +#define TSDB_CODE_MND_INVALID_PASS_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0353) //"Invalid password format" +#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn" +#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users" + +#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists" +#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long" +#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist" +#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb" +#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags" +#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns" +#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series" +#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table" // operation only available for super table +#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long" +#define TSDB_CODE_MND_TAG_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0369) //"Tag already exists" +#define TSDB_CODE_MND_TAG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036A) //"Tag does not exist" +#define TSDB_CODE_MND_FIELD_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x036B) //"Field already exists" +#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist" +#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist" +#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message" +#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes" + +#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name" +#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length" +#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code" +#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists" +#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func" +#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize" + +#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length" +#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length" + +#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available" +#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists" +#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options" +#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) //"Invalid database name" +#define TSDB_CODE_MND_MONITOR_DB_FORBIDDEN TAOS_DEF_ERROR_CODE(0, 0x0384) //"Cannot delete monitor database" +#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) //"Too many databases for account" +#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //"Database not available" +#define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) //"Database unsynced" + +#define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) //"Invalid database option: days out of range" +#define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) //"Invalid database option: keep >= keep1 >= keep0 >= days" #define TSDB_CODE_MND_INVALID_TOPIC TAOS_DEF_ERROR_CODE(0, 0x0392) //"Invalid topic name) #define TSDB_CODE_MND_INVALID_TOPIC_OPTION TAOS_DEF_ERROR_CODE(0, 0x0393) //"Invalid topic option) @@ -217,35 +218,36 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOPIC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0395) //"Topic already exists) // dnode -#define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) //"Message not processed") -#define TSDB_CODE_DND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0401) //"Dnode out of memory") -#define TSDB_CODE_DND_NO_WRITE_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0402) //"No permission for disk files in dnode") -#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length") -#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress") -#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories") +#define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) //"Message not processed" +#define TSDB_CODE_DND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0401) //"Dnode out of memory" +#define TSDB_CODE_DND_NO_WRITE_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0402) //"No permission for disk files in dnode" +#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length" +#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress" +#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories" #define TSDB_CODE_DND_EXITING TAOS_DEF_ERROR_CODE(0, 0x0406) //"Dnode is exiting" +#define TSDB_CODE_DND_VNODE_OPEN_FAILED TAOS_DEF_ERROR_CODE(0, 0x0407) //"Vnode open failed" // vnode -#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress") -#define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) //"Message not processed") -#define TSDB_CODE_VND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0502) //"Action need to be reprocessed") -#define TSDB_CODE_VND_INVALID_VGROUP_ID TAOS_DEF_ERROR_CODE(0, 0x0503) //"Invalid Vgroup ID") -#define TSDB_CODE_VND_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0504) //"Vnode initialization failed") -#define TSDB_CODE_VND_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0505) //"System out of disk space") -#define TSDB_CODE_VND_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0506) //"No write permission for disk files") -#define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file") -#define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory") -#define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode") -#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file") -#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed") -#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") -#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") -#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") -#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") -#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") -#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") -#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") -#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") +#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress" +#define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) //"Message not processed" +#define TSDB_CODE_VND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0502) //"Action need to be reprocessed" +#define TSDB_CODE_VND_INVALID_VGROUP_ID TAOS_DEF_ERROR_CODE(0, 0x0503) //"Invalid Vgroup ID" +#define TSDB_CODE_VND_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0504) //"Vnode initialization failed" +#define TSDB_CODE_VND_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0505) //"System out of disk space" +#define TSDB_CODE_VND_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0506) //"No write permission for disk files" +#define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file" +#define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory" +#define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode" +#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file" +#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed" +#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit" +#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping" +#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing" +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing" +#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended" +#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied" +#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing" +#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state" // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") @@ -272,6 +274,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value") #define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data") #define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet") +#define TSDB_CODE_TDB_NO_JSON_TAG_KEY TAOS_DEF_ERROR_CODE(0, 0x0618) //"TSDB no tag json key") // query #define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") @@ -290,178 +293,177 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error") #define TSDB_CODE_QRY_INVALID_TIME_CONDITION TAOS_DEF_ERROR_CODE(0, 0x070E) //"invalid time condition") - // grant -#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired") -#define TSDB_CODE_GRANT_DNODE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0801) //"DNode creation limited by licence") -#define TSDB_CODE_GRANT_ACCT_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0802) //"Account creation limited by license") -#define TSDB_CODE_GRANT_TIMESERIES_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0803) //"Table creation limited by license") -#define TSDB_CODE_GRANT_DB_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0804) //"DB creation limited by license") -#define TSDB_CODE_GRANT_USER_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0805) //"User creation limited by license") -#define TSDB_CODE_GRANT_CONN_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0806) //"Conn creation limited by license") -#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807) //"Stream creation limited by license") -#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808) //"Write speed limited by license") -#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809) //"Storage capacity limited by license") -#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A) //"Query time limited by license") -#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B) //"CPU cores limited by license") +#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired" +#define TSDB_CODE_GRANT_DNODE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0801) //"DNode creation limited by licence" +#define TSDB_CODE_GRANT_ACCT_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0802) //"Account creation limited by license" +#define TSDB_CODE_GRANT_TIMESERIES_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0803) //"Table creation limited by license" +#define TSDB_CODE_GRANT_DB_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0804) //"DB creation limited by license" +#define TSDB_CODE_GRANT_USER_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0805) //"User creation limited by license" +#define TSDB_CODE_GRANT_CONN_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0806) //"Conn creation limited by license" +#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807) //"Stream creation limited by license" +#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808) //"Write speed limited by license" +#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809) //"Storage capacity limited by license" +#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A) //"Query time limited by license" +#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B) //"CPU cores limited by license" // sync -#define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) //"Invalid Sync Configuration") -#define TSDB_CODE_SYN_NOT_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0901) //"Sync module not enabled") -#define TSDB_CODE_SYN_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0902) //"Invalid Sync version") -#define TSDB_CODE_SYN_CONFIRM_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0903) //"Sync confirm expired") -#define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) //"Too many sync fwd infos") -#define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) //"Mismatched protocol") -#define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) //"Mismatched clusterId") -#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) //"Mismatched signature") -#define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) //"Invalid msg checksum") -#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) //"Invalid msg length") -#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) //"Invalid msg type") +#define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) //"Invalid Sync Configuration" +#define TSDB_CODE_SYN_NOT_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0901) //"Sync module not enabled" +#define TSDB_CODE_SYN_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0902) //"Invalid Sync version" +#define TSDB_CODE_SYN_CONFIRM_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0903) //"Sync confirm expired" +#define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) //"Too many sync fwd infos" +#define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) //"Mismatched protocol" +#define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) //"Mismatched clusterId" +#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) //"Mismatched signature" +#define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) //"Invalid msg checksum" +#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) //"Invalid msg length" +#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) //"Invalid msg type" // wal -#define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") -#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") -#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") +#define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal" +#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted" +#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit" // http -#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online") -#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support") -#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format") -#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory") -#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big") -#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input") -#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty") -#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input") -#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd") -#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full") -#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error") -#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0") -#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip") -#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip") -#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login") - -#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version") -#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length") -#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization") -#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization") -#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization") -#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization") -#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method") -#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target") -#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version") -#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp") -#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status") -#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase") -#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf") -#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header") -#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key") -#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val") -#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size") -#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk") -#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section") -#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state") -#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section") - -#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0") -#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100") -#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error") - -#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null") -#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long") -#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat") -#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0") -#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K") -#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find") -#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string") -#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0") -#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0") -#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find") -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0") -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long") -#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null") -#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null") -#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long") -#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string") -#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null") -#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null") -#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long") -#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find") -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0") -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long") -#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null") -#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null") -#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long") -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string") -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null") -#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string") -#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist") - -#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null") -#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long") -#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat") -#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0") -#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K") -#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find") -#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string") -#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0") -#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0") -#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find") -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0") -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long") -#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null") -#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null") -#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64") -#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find") -#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string") - -#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error") +#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online" +#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support" +#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format" +#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory" +#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big" +#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input" +#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty" +#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input" +#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd" +#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full" +#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error" +#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0" +#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip" +#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip" +#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login" + +#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version" +#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length" +#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization" +#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization" +#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization" +#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization" +#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method" +#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target" +#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version" +#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp" +#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status" +#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase" +#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf" +#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header" +#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key" +#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val" +#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size" +#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk" +#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section" +#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state" +#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section" + +#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0" +#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100" +#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error" + +#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null" +#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long" +#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat" +#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0" +#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K" +#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find" +#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string" +#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0" +#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0" +#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find" +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0" +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long" +#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null" +#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null" +#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long" +#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string" +#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null" +#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null" +#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long" +#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find" +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0" +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long" +#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null" +#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null" +#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long" +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string" +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null" +#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string" +#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist" + +#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null" +#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long" +#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat" +#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0" +#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K" +#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find" +#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string" +#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0" +#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0" +#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find" +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0" +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long" +#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null" +#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null" +#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64" +#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find" +#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string" + +#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error" // odbc -#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory") -#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input") -#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined") -#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated") -#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated") -#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported") -#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range") -#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range") -#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet") -#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle") -#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set") -#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned") -#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor") -#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready") -#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy") -#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string") -#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument") -#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp") -#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large") -#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence") -#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete") -#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general") +#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory" +#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input" +#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined" +#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated" +#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated" +#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported" +#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range" +#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range" +#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet" +#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle" +#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set" +#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned" +#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor" +#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready" +#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy" +#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string" +#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument" +#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp" +#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large" +#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence" +#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete" +#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general" // tfs -#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory") -#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config") -#define TSDB_CODE_FS_TOO_MANY_MOUNT TAOS_DEF_ERROR_CODE(0, 0x2202) //"tfs too many mount") -#define TSDB_CODE_FS_DUP_PRIMARY TAOS_DEF_ERROR_CODE(0, 0x2203) //"tfs duplicate primary mount") -#define TSDB_CODE_FS_NO_PRIMARY_DISK TAOS_DEF_ERROR_CODE(0, 0x2204) //"tfs no primary mount") -#define TSDB_CODE_FS_NO_MOUNT_AT_TIER TAOS_DEF_ERROR_CODE(0, 0x2205) //"tfs no mount at tier") -#define TSDB_CODE_FS_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2206) //"tfs file already exists") -#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level") -#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk") +#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory" +#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config" +#define TSDB_CODE_FS_TOO_MANY_MOUNT TAOS_DEF_ERROR_CODE(0, 0x2202) //"tfs too many mount" +#define TSDB_CODE_FS_DUP_PRIMARY TAOS_DEF_ERROR_CODE(0, 0x2203) //"tfs duplicate primary mount" +#define TSDB_CODE_FS_NO_PRIMARY_DISK TAOS_DEF_ERROR_CODE(0, 0x2204) //"tfs no primary mount" +#define TSDB_CODE_FS_NO_MOUNT_AT_TIER TAOS_DEF_ERROR_CODE(0, 0x2205) //"tfs no mount at tier" +#define TSDB_CODE_FS_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2206) //"tfs file already exists" +#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level" +#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk" // monitor -#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection") +#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection" #ifdef __cplusplus } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 0f291936f5519b1db7f98b098e5f9f82303cd0f5..9dc76466aadbe9781dbdd727a524a32f8103650f 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -230,6 +230,7 @@ typedef struct SSubmitBlk { // Submit message for this TSDB typedef struct SSubmitMsg { SMsgHead header; + int8_t extend; int32_t length; int32_t numOfBlocks; char blocks[]; @@ -243,6 +244,7 @@ typedef struct { } SShellSubmitRspBlock; typedef struct { + int8_t extend; int32_t code; // 0-success, > 0 error code int32_t numOfRows; // number of records the client is trying to write int32_t affectedRows; // number of records actually written @@ -278,6 +280,7 @@ typedef struct { } SMDCreateTableMsg; typedef struct { + int8_t extend; int32_t len; // one create table message char tableName[TSDB_TABLE_FNAME_LEN]; int8_t igExists; @@ -290,11 +293,13 @@ typedef struct { } SCreateTableMsg; typedef struct { + int8_t extend; int32_t numOfTables; int32_t contLen; } SCMCreateTableMsg; typedef struct { + int8_t extend; char name[TSDB_TABLE_FNAME_LEN]; // if user specify DROP STABLE, this flag will be set. And an error will be returned if it is not a super table int8_t supertable; @@ -302,6 +307,7 @@ typedef struct { } SCMDropTableMsg; typedef struct { + int8_t extend; char tableFname[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; int16_t type; /* operation type */ @@ -314,6 +320,7 @@ typedef struct { typedef struct { SMsgHead head; + int8_t extend; int64_t uid; int32_t tid; int16_t tversion; @@ -327,6 +334,7 @@ typedef struct { } SUpdateTableTagValMsg; typedef struct { + int8_t extend; char clientVersion[TSDB_VERSION_LEN]; char msgVersion[TSDB_VERSION_LEN]; char db[TSDB_TABLE_FNAME_LEN]; @@ -335,6 +343,7 @@ typedef struct { } SConnectMsg; typedef struct { + int8_t extend; char acctId[TSDB_ACCT_ID_LEN]; char serverVersion[TSDB_VERSION_LEN]; char clusterId[TSDB_CLUSTER_ID_LEN]; @@ -361,16 +370,19 @@ typedef struct { } SAcctCfg; typedef struct { + int8_t extend; char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; SAcctCfg cfg; } SCreateAcctMsg, SAlterAcctMsg; typedef struct { - char user[TSDB_USER_LEN]; + int8_t extend; + char user[TSDB_USER_LEN]; } SDropUserMsg, SDropAcctMsg; typedef struct { + int8_t extend; char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; int8_t privilege; @@ -400,7 +412,7 @@ typedef struct SColIndex { int16_t colId; // column id int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag uint16_t flag; // denote if it is a tag or a normal column - char name[TSDB_COL_NAME_LEN + TSDB_TABLE_NAME_LEN + 1]; + char name[TSDB_COL_NAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_MAX_JSON_KEY_LEN + 4 + 1]; // 4 meams ->'' for json tag } SColIndex; typedef struct SColumnFilterInfo { @@ -462,6 +474,7 @@ typedef struct { typedef struct { SMsgHead head; + int8_t extend; char version[TSDB_VERSION_LEN]; bool stableQuery; // super table query or not @@ -475,6 +488,7 @@ typedef struct { bool tsCompQuery; // is tscomp query bool simpleAgg; bool pointInterpQuery; // point interpolation query + bool needTableSeqScan; // need scan table by table bool needReverseScan; // need reverse scan bool stateWindow; // state window flag @@ -513,6 +527,7 @@ typedef struct { } SQueryTableMsg; typedef struct { + int8_t extend; int32_t code; union{uint64_t qhandle; uint64_t qId;}; // query handle } SQueryTableRsp; @@ -520,11 +535,13 @@ typedef struct { // todo: the show handle should be replaced with id typedef struct { SMsgHead header; + int8_t extend; union{uint64_t qhandle; uint64_t qId;}; // query handle uint16_t free; } SRetrieveTableMsg; typedef struct SRetrieveTableRsp { + int8_t extend; int32_t numOfRows; int8_t completed; // all results are returned to client int16_t precision; @@ -550,6 +567,7 @@ typedef struct { } SVnodeLoad; typedef struct { + int8_t extend; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; int32_t cacheBlockSize; //MB int32_t totalBlocks; @@ -576,6 +594,7 @@ typedef struct { } SCreateDbMsg, SAlterDbMsg; typedef struct { + int8_t extend; char name[TSDB_FUNC_NAME_LEN]; char path[PATH_MAX]; int32_t funcType; @@ -587,11 +606,13 @@ typedef struct { } SCreateFuncMsg; typedef struct { + int8_t extend; int32_t num; char name[]; } SRetrieveFuncMsg; typedef struct { + int8_t extend; char name[TSDB_FUNC_NAME_LEN]; int32_t funcType; int8_t resType; @@ -602,15 +623,18 @@ typedef struct { } SFunctionInfoMsg; typedef struct { + int8_t extend; int32_t num; char content[]; } SUdfFuncMsg; typedef struct { + int8_t extend; char name[TSDB_FUNC_NAME_LEN]; } SDropFuncMsg; typedef struct { + int8_t extend; char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; } SDropDbMsg, SUseDbMsg, SSyncDbMsg; @@ -743,12 +767,14 @@ typedef struct { } SCreateVnodeMsg, SAlterVnodeMsg; typedef struct { + int8_t extend; char tableFname[TSDB_TABLE_FNAME_LEN]; int16_t createFlag; char tags[]; } STableInfoMsg; typedef struct { + int8_t extend; uint8_t metaClone; // create local clone of the cached table meta int32_t numOfVgroups; int32_t numOfTables; @@ -757,21 +783,25 @@ typedef struct { } SMultiTableInfoMsg; typedef struct SSTableVgroupMsg { + int8_t extend; int32_t numOfTables; } SSTableVgroupMsg, SSTableVgroupRspMsg; typedef struct { + int8_t extend; int32_t vgId; int8_t numOfEps; SEpAddrMsg epAddr[TSDB_MAX_REPLICA]; } SVgroupMsg, SVgroupInfo; typedef struct { + int8_t extend; int32_t numOfVgroups; SVgroupMsg vgroups[]; } SVgroupsMsg, SVgroupsInfo; typedef struct STableMetaMsg { + int8_t extend; int32_t contLen; char tableFname[TSDB_TABLE_FNAME_LEN]; // table id uint8_t numOfTags; @@ -791,6 +821,7 @@ typedef struct STableMetaMsg { } STableMetaMsg; typedef struct SMultiTableMeta { + int8_t extend; int32_t numOfTables; int32_t numOfVgroup; int32_t numOfUdf; @@ -813,6 +844,7 @@ typedef struct { * payloadLen is the length of payload */ typedef struct { + int8_t extend; int8_t type; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; uint16_t payloadLen; @@ -820,17 +852,20 @@ typedef struct { } SShowMsg; typedef struct { + int8_t extend; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; int32_t numOfVgroup; int32_t vgid[]; } SCompactMsg; typedef struct SShowRsp { + int8_t extend; uint64_t qhandle; STableMetaMsg tableMeta; } SShowRsp; typedef struct { + int8_t extend; char ep[TSDB_EP_LEN]; // end point, hostname:port } SCreateDnodeMsg, SDropDnodeMsg; @@ -852,6 +887,7 @@ typedef struct { } SConfigVnodeMsg; typedef struct { + int8_t extend; char ep[TSDB_EP_LEN]; // end point, hostname:port char config[64]; } SCfgDnodeMsg; @@ -883,6 +919,7 @@ typedef struct { } SStreamDesc; typedef struct { + int8_t extend; char clientVer[TSDB_VERSION_LEN]; uint32_t connId; int32_t pid; @@ -893,6 +930,7 @@ typedef struct { } SHeartBeatMsg; typedef struct { + int8_t extend; uint32_t queryId; uint32_t streamId; uint32_t totalDnodes; @@ -903,10 +941,12 @@ typedef struct { } SHeartBeatRsp; typedef struct { + int8_t extend; char queryId[TSDB_KILL_MSG_LEN + 1]; } SKillQueryMsg, SKillStreamMsg, SKillConnMsg; typedef struct { + int8_t extend; int32_t vnode; int32_t sid; uint64_t uid; @@ -931,6 +971,16 @@ typedef struct { char reserved2[64]; } SStartupStep; +typedef struct { + int16_t type; + int32_t len; + char value[]; +} STLV; + +enum { + TLV_TYPE_DUMMY = 1, +}; + #pragma pack(pop) #ifdef __cplusplus diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index f98e7eec31f9cea99505bada1e0e3e8729b8d139..a44e958be4345d4aa131cab8f616e0460624e8c1 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -118,7 +118,7 @@ typedef struct { void tsdbClearTableCfg(STableCfg *config); -void *tsdbGetTableTagVal(const void *pTable, int32_t colId, int16_t type, int16_t bytes); +void *tsdbGetTableTagVal(const void *pTable, int32_t colId, int16_t type); char *tsdbGetTableName(void *pTable); #define TSDB_TABLEID(_table) ((STableId*) (_table)) @@ -418,10 +418,14 @@ int tsdbCompact(STsdbRepo *pRepo); // no problem return true bool tsdbNoProblem(STsdbRepo* pRepo); - // unit of walSize: MB int tsdbCheckWal(STsdbRepo *pRepo, uint32_t walSize); +// for json tag +void* getJsonTagValueElment(void* data, char* key, int32_t keyLen, char* out, int16_t bytes); +void getJsonTagValueAll(void* data, void* dst, int16_t bytes); +char* parseTagDatatoJson(void *p); + #ifdef __cplusplus } #endif diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 4b6602b4124fe5e16c60700aebf3a1a2d55c77fd..7d721d21a0b85bb3e7119977f74f3e05e69cc361 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -28,195 +28,194 @@ #define TK_TIMESTAMP 10 #define TK_BINARY 11 #define TK_NCHAR 12 -#define TK_OR 13 -#define TK_AND 14 -#define TK_NOT 15 -#define TK_EQ 16 -#define TK_NE 17 -#define TK_ISNULL 18 -#define TK_NOTNULL 19 -#define TK_IS 20 -#define TK_LIKE 21 -#define TK_MATCH 22 -#define TK_NMATCH 23 -#define TK_GLOB 24 -#define TK_BETWEEN 25 -#define TK_IN 26 -#define TK_GT 27 -#define TK_GE 28 -#define TK_LT 29 -#define TK_LE 30 -#define TK_BITAND 31 -#define TK_BITOR 32 -#define TK_LSHIFT 33 -#define TK_RSHIFT 34 -#define TK_PLUS 35 -#define TK_MINUS 36 -#define TK_DIVIDE 37 -#define TK_TIMES 38 -#define TK_STAR 39 -#define TK_SLASH 40 -#define TK_REM 41 -#define TK_CONCAT 42 -#define TK_UMINUS 43 -#define TK_UPLUS 44 -#define TK_BITNOT 45 -#define TK_SHOW 46 -#define TK_DATABASES 47 -#define TK_TOPICS 48 -#define TK_FUNCTIONS 49 -#define TK_MNODES 50 -#define TK_DNODES 51 -#define TK_ACCOUNTS 52 -#define TK_USERS 53 -#define TK_MODULES 54 -#define TK_QUERIES 55 -#define TK_CONNECTIONS 56 -#define TK_STREAMS 57 -#define TK_VARIABLES 58 -#define TK_SCORES 59 -#define TK_GRANTS 60 -#define TK_VNODES 61 -#define TK_DOT 62 -#define TK_CREATE 63 -#define TK_TABLE 64 -#define TK_STABLE 65 -#define TK_DATABASE 66 -#define TK_TABLES 67 -#define TK_STABLES 68 -#define TK_VGROUPS 69 -#define TK_DROP 70 -#define TK_TOPIC 71 -#define TK_FUNCTION 72 -#define TK_DNODE 73 -#define TK_USER 74 -#define TK_ACCOUNT 75 -#define TK_USE 76 -#define TK_DESCRIBE 77 -#define TK_DESC 78 -#define TK_ALTER 79 -#define TK_PASS 80 -#define TK_PRIVILEGE 81 -#define TK_LOCAL 82 -#define TK_COMPACT 83 -#define TK_LP 84 -#define TK_RP 85 -#define TK_IF 86 -#define TK_EXISTS 87 -#define TK_AS 88 -#define TK_OUTPUTTYPE 89 -#define TK_AGGREGATE 90 -#define TK_BUFSIZE 91 -#define TK_PPS 92 -#define TK_TSERIES 93 -#define TK_DBS 94 -#define TK_STORAGE 95 -#define TK_QTIME 96 -#define TK_CONNS 97 -#define TK_STATE 98 -#define TK_COMMA 99 -#define TK_KEEP 100 -#define TK_CACHE 101 -#define TK_REPLICA 102 -#define TK_QUORUM 103 -#define TK_DAYS 104 -#define TK_MINROWS 105 -#define TK_MAXROWS 106 -#define TK_BLOCKS 107 -#define TK_CTIME 108 -#define TK_WAL 109 -#define TK_FSYNC 110 -#define TK_COMP 111 -#define TK_PRECISION 112 -#define TK_UPDATE 113 -#define TK_CACHELAST 114 -#define TK_PARTITIONS 115 -#define TK_UNSIGNED 116 -#define TK_TAGS 117 -#define TK_USING 118 -#define TK_NULL 119 -#define TK_NOW 120 -#define TK_SELECT 121 -#define TK_UNION 122 -#define TK_ALL 123 -#define TK_DISTINCT 124 -#define TK_FROM 125 -#define TK_VARIABLE 126 -#define TK_RANGE 127 -#define TK_INTERVAL 128 -#define TK_EVERY 129 -#define TK_SESSION 130 -#define TK_STATE_WINDOW 131 -#define TK_FILL 132 -#define TK_SLIDING 133 -#define TK_ORDER 134 -#define TK_BY 135 -#define TK_ASC 136 -#define TK_GROUP 137 -#define TK_HAVING 138 -#define TK_LIMIT 139 -#define TK_OFFSET 140 -#define TK_SLIMIT 141 -#define TK_SOFFSET 142 -#define TK_WHERE 143 -#define TK_RESET 144 -#define TK_QUERY 145 -#define TK_SYNCDB 146 -#define TK_ADD 147 -#define TK_COLUMN 148 -#define TK_MODIFY 149 -#define TK_TAG 150 -#define TK_CHANGE 151 -#define TK_SET 152 -#define TK_KILL 153 -#define TK_CONNECTION 154 -#define TK_STREAM 155 -#define TK_COLON 156 -#define TK_ABORT 157 -#define TK_AFTER 158 -#define TK_ATTACH 159 -#define TK_BEFORE 160 -#define TK_BEGIN 161 -#define TK_CASCADE 162 -#define TK_CLUSTER 163 -#define TK_CONFLICT 164 -#define TK_COPY 165 -#define TK_DEFERRED 166 -#define TK_DELIMITERS 167 -#define TK_DETACH 168 -#define TK_EACH 169 -#define TK_END 170 -#define TK_EXPLAIN 171 -#define TK_FAIL 172 -#define TK_FOR 173 -#define TK_IGNORE 174 -#define TK_IMMEDIATE 175 -#define TK_INITIALLY 176 -#define TK_INSTEAD 177 -#define TK_KEY 178 -#define TK_OF 179 -#define TK_RAISE 180 -#define TK_REPLACE 181 -#define TK_RESTRICT 182 -#define TK_ROW 183 -#define TK_STATEMENT 184 -#define TK_TRIGGER 185 -#define TK_VIEW 186 -#define TK_IPTOKEN 187 -#define TK_SEMI 188 -#define TK_NONE 189 -#define TK_PREV 190 -#define TK_LINEAR 191 -#define TK_IMPORT 192 -#define TK_TBNAME 193 -#define TK_JOIN 194 -#define TK_INSERT 195 -#define TK_INTO 196 -#define TK_VALUES 197 -#define TK_FILE 198 - - - +#define TK_JSON 13 +#define TK_OR 14 +#define TK_AND 15 +#define TK_NOT 16 +#define TK_EQ 17 +#define TK_NE 18 +#define TK_ISNULL 19 +#define TK_NOTNULL 20 +#define TK_IS 21 +#define TK_LIKE 22 +#define TK_MATCH 23 +#define TK_NMATCH 24 +#define TK_CONTAINS 25 +#define TK_GLOB 26 +#define TK_BETWEEN 27 +#define TK_IN 28 +#define TK_GT 29 +#define TK_GE 30 +#define TK_LT 31 +#define TK_LE 32 +#define TK_BITAND 33 +#define TK_BITOR 34 +#define TK_LSHIFT 35 +#define TK_RSHIFT 36 +#define TK_PLUS 37 +#define TK_MINUS 38 +#define TK_DIVIDE 39 +#define TK_TIMES 40 +#define TK_STAR 41 +#define TK_SLASH 42 +#define TK_REM 43 +#define TK_UMINUS 44 +#define TK_UPLUS 45 +#define TK_BITNOT 46 +#define TK_ARROW 47 +#define TK_SHOW 48 +#define TK_DATABASES 49 +#define TK_TOPICS 50 +#define TK_FUNCTIONS 51 +#define TK_MNODES 52 +#define TK_DNODES 53 +#define TK_ACCOUNTS 54 +#define TK_USERS 55 +#define TK_MODULES 56 +#define TK_QUERIES 57 +#define TK_CONNECTIONS 58 +#define TK_STREAMS 59 +#define TK_VARIABLES 60 +#define TK_SCORES 61 +#define TK_GRANTS 62 +#define TK_VNODES 63 +#define TK_DOT 64 +#define TK_CREATE 65 +#define TK_TABLE 66 +#define TK_STABLE 67 +#define TK_DATABASE 68 +#define TK_TABLES 69 +#define TK_STABLES 70 +#define TK_VGROUPS 71 +#define TK_DROP 72 +#define TK_TOPIC 73 +#define TK_FUNCTION 74 +#define TK_DNODE 75 +#define TK_USER 76 +#define TK_ACCOUNT 77 +#define TK_USE 78 +#define TK_DESCRIBE 79 +#define TK_DESC 80 +#define TK_ALTER 81 +#define TK_PASS 82 +#define TK_PRIVILEGE 83 +#define TK_LOCAL 84 +#define TK_COMPACT 85 +#define TK_LP 86 +#define TK_RP 87 +#define TK_IF 88 +#define TK_EXISTS 89 +#define TK_AS 90 +#define TK_OUTPUTTYPE 91 +#define TK_AGGREGATE 92 +#define TK_BUFSIZE 93 +#define TK_PPS 94 +#define TK_TSERIES 95 +#define TK_DBS 96 +#define TK_STORAGE 97 +#define TK_QTIME 98 +#define TK_CONNS 99 +#define TK_STATE 100 +#define TK_COMMA 101 +#define TK_KEEP 102 +#define TK_CACHE 103 +#define TK_REPLICA 104 +#define TK_QUORUM 105 +#define TK_DAYS 106 +#define TK_MINROWS 107 +#define TK_MAXROWS 108 +#define TK_BLOCKS 109 +#define TK_CTIME 110 +#define TK_WAL 111 +#define TK_FSYNC 112 +#define TK_COMP 113 +#define TK_PRECISION 114 +#define TK_UPDATE 115 +#define TK_CACHELAST 116 +#define TK_PARTITIONS 117 +#define TK_UNSIGNED 118 +#define TK_TAGS 119 +#define TK_USING 120 +#define TK_NULL 121 +#define TK_NOW 122 +#define TK_VARIABLE 123 +#define TK_SELECT 124 +#define TK_UNION 125 +#define TK_ALL 126 +#define TK_DISTINCT 127 +#define TK_FROM 128 +#define TK_RANGE 129 +#define TK_INTERVAL 130 +#define TK_EVERY 131 +#define TK_SESSION 132 +#define TK_STATE_WINDOW 133 +#define TK_FILL 134 +#define TK_SLIDING 135 +#define TK_ORDER 136 +#define TK_BY 137 +#define TK_ASC 138 +#define TK_GROUP 139 +#define TK_HAVING 140 +#define TK_LIMIT 141 +#define TK_OFFSET 142 +#define TK_SLIMIT 143 +#define TK_SOFFSET 144 +#define TK_WHERE 145 +#define TK_RESET 146 +#define TK_QUERY 147 +#define TK_SYNCDB 148 +#define TK_ADD 149 +#define TK_COLUMN 150 +#define TK_MODIFY 151 +#define TK_TAG 152 +#define TK_CHANGE 153 +#define TK_SET 154 +#define TK_KILL 155 +#define TK_CONNECTION 156 +#define TK_STREAM 157 +#define TK_COLON 158 +#define TK_ABORT 159 +#define TK_AFTER 160 +#define TK_ATTACH 161 +#define TK_BEFORE 162 +#define TK_BEGIN 163 +#define TK_CASCADE 164 +#define TK_CLUSTER 165 +#define TK_CONFLICT 166 +#define TK_COPY 167 +#define TK_DEFERRED 168 +#define TK_DELIMITERS 169 +#define TK_DETACH 170 +#define TK_EACH 171 +#define TK_END 172 +#define TK_EXPLAIN 173 +#define TK_FAIL 174 +#define TK_FOR 175 +#define TK_IGNORE 176 +#define TK_IMMEDIATE 177 +#define TK_INITIALLY 178 +#define TK_INSTEAD 179 +#define TK_KEY 180 +#define TK_OF 181 +#define TK_RAISE 182 +#define TK_REPLACE 183 +#define TK_RESTRICT 184 +#define TK_ROW 185 +#define TK_STATEMENT 186 +#define TK_TRIGGER 187 +#define TK_VIEW 188 +#define TK_IPTOKEN 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_TBNAME 195 +#define TK_JOIN 196 +#define TK_INSERT 197 +#define TK_INTO 198 +#define TK_VALUES 199 +#define TK_FILE 200 #define TK_SPACE 300 diff --git a/src/inc/ttype.h b/src/inc/ttype.h index 095b593ab7d1fb74effd7991feeeb60f54cbc1b8..1b7d07262e50da893e1bc3009df94b49ee306637 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -166,6 +166,42 @@ typedef struct { } \ } while (0) +#define NUM_TO_STRING(_inputType, _input, _outputBytes, _output) \ + do { \ + switch (_inputType) { \ + case TSDB_DATA_TYPE_TINYINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int8_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint8_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int16_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint16_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%" PRId64, *(int64_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \ + break; \ + default: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int32_t *)(_input)); \ + break; \ + } \ + } while (0) + #define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT) #define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT) #define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE) @@ -200,6 +236,8 @@ static FORCE_INLINE bool isNull(const void *val, int32_t type) { return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; case TSDB_DATA_TYPE_DOUBLE: return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; + case TSDB_DATA_TYPE_JSON: + return varDataLen(val) == sizeof(int32_t) && *(uint32_t *) varDataVal(val) == TSDB_DATA_JSON_NULL; case TSDB_DATA_TYPE_NCHAR: return varDataLen(val) == sizeof(int32_t) && *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; case TSDB_DATA_TYPE_BINARY: @@ -230,10 +268,10 @@ typedef struct tDataTypeDescriptor { int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize); void (*statisFunc)(const void *pData, int32_t numofrow, int64_t *min, int64_t *max, int64_t *sum, - int16_t *minindex, int16_t *maxindex, int16_t *numofnull); + int16_t *minindex, int16_t *maxindex, int16_t *numofnull); } tDataTypeDescriptor; -extern tDataTypeDescriptor tDataTypes[15]; +extern tDataTypeDescriptor tDataTypes[16]; bool isValidDataType(int32_t type); diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index 6bc22e5fc8ddcdae1ebd42e400c1c6707b959fea..1bdd49267dacc1674cda3ebfd48a0ab11a7cba3a 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -2,6 +2,10 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) -ADD_SUBDIRECTORY(taosdemo) -ADD_SUBDIRECTORY(taosdump) + +IF (TD_TAOS_TOOLS) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/src/kit/taos_tools/deps/avro/lang/c/src) + ADD_SUBDIRECTORY(taos-tools) +ENDIF () + ADD_SUBDIRECTORY(taospack) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index d69a267707470e7a5df4edfa85764aae580a13a6..db572c9d310709dc6fe024b351126679ea9805e1 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -19,9 +19,9 @@ ELSE () ENDIF () IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(shell taos_static cJson lua ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos_static cJson ${LINK_LUA} ${LINK_JEMALLOC}) ELSE () - TARGET_LINK_LIBRARIES(shell taos cJson lua ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos cJson ${LINK_LUA} ${LINK_JEMALLOC}) ENDIF () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) @@ -36,8 +36,14 @@ ELSEIF (TD_WINDOWS) IF (TD_POWER) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power) + ELSEIF (TD_TQ) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME tq) ELSEIF (TD_PRO) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME prodbc) + ELSEIF (TD_KH) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME khclient) + ELSEIF (TD_JH) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME jh_taos) ELSE () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) ENDIF () diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 03ccfe2d576df76407bc7a22cf17d884dd2bad51..9c5794278c5bd9545fb6260e4f8442d8c9e8cad9 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -28,8 +28,16 @@ #define MAX_HISTORY_SIZE 1000 #define MAX_COMMAND_SIZE 1048586 -#ifdef _TD_PRO_ +#ifdef _TD_POWER_ + #define HISTORY_FILE ".power_history" +#elif (_TD_TQ_ == true) + #define HISTORY_FILE ".tq_history" +#elif (_TD_PRO_ == true) #define HISTORY_FILE ".prodb_history" +#elif (_TD_KH_ == true) + #define HISTORY_FILE ".kh_history" +#elif (_TD_JH_ == true) + #define HISTORY_FILE ".jh_taos_history" #else #define HISTORY_FILE ".taos_history" #endif diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index 5821281a036674e7a60edc2f63500822a358b1bc..43256719e125a712e6a52ddadaa9637498278092 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -36,6 +36,7 @@ typedef struct { int totalThreads; void * taos; char * db; + int code; } ShellThreadObj; static int32_t shellUseDb(TAOS *con, char *db) { @@ -112,10 +113,10 @@ static void *shellCheckThreadFp(void *arg) { int32_t end = (pThread->threadIndex + 1) * interval; if (start >= tbNum) return NULL; - if (end > tbNum) end = tbNum + 1; + if (end > tbNum) end = tbNum; char file[32] = {0}; - snprintf(file, 32, "tb%d.txt", pThread->threadIndex); + snprintf(file, 32, "tb%d.sql", pThread->threadIndex); FILE *fp = fopen(file, "w"); if (!fp) { @@ -123,16 +124,19 @@ static void *shellCheckThreadFp(void *arg) { return NULL; } + ASSERT(pThread->code != 0); + char sql[SHELL_SQL_LEN]; for (int32_t t = start; t < end; ++t) { char *tbname = tbNames[t]; if (tbname == NULL) break; - snprintf(sql, SHELL_SQL_LEN, "select * from %s limit 1", tbname); + snprintf(sql, SHELL_SQL_LEN, "select last_row(_c0) from %s;", tbname); TAOS_RES *pSql = taos_query(pThread->taos, sql); int32_t code = taos_errno(pSql); - if (code != 0) { + // -k: -1 means check all errors, while other non-zero values means check specific errors. + if ((code == pThread->code) || ((pThread->code == -1) && (code != 0))) { int32_t len = snprintf(sql, SHELL_SQL_LEN, "drop table %s.%s;\n", pThread->db, tbname); fwrite(sql, 1, len, fp); atomic_add_fetch_32(&errorNum, 1); @@ -161,6 +165,7 @@ static void shellRunCheckThreads(TAOS *con, SShellArguments *_args) { pThread->totalThreads = _args->threadNum; pThread->taos = con; pThread->db = _args->database; + pThread->code = _args->check; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 8ab9bfcf4e7685081cd6f09990f5365d94c4094b..3f672c4531921642bcf1a20888b482c98968f9c7 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -34,28 +34,36 @@ char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n" "Copyright (c) 2020 by PowerDB, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "power> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; #elif (_TD_TQ_ == true) char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "tq> "; - -char CONTINUE_PROMPT[] = " -> "; +char CONTINUE_PROMPT[] = " -> "; int prompt_size = 4; #elif (_TD_PRO_ == true) char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n" "Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "ProDB> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; +#elif (_TD_KH_ == true) +char CLIENT_VERSION[] = "Welcome to the KingHistorian shell from %s, Client Version:%s\n" + "Copyright (c) 2021 by Hanatech, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "kh> "; +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 4; +#elif (_TD_JH_ == true) +char CLIENT_VERSION[] = "Welcome to the jh_iot shell from %s, Client Version:%s\n" + "Copyright (c) 2021 by jinheng, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "jh_taos> "; +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 9; #else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; #endif @@ -501,6 +509,7 @@ static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_ break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: memcpy(buf, val, length); buf[length] = 0; fprintf(fp, "\'%s\'", buf); @@ -637,7 +646,7 @@ static void shellPrintNChar(const char *str, int length, int width) { static void printField(const char* val, TAOS_FIELD* field, int width, int32_t length, int precision) { if (val == NULL) { int w = width; - if (field->type < TSDB_DATA_TYPE_TINYINT || field->type > TSDB_DATA_TYPE_DOUBLE) { + if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_NCHAR || field->type == TSDB_DATA_TYPE_TIMESTAMP) { w = 0; } w = printf("%*s", w, TSDB_DATA_NULL_STR); @@ -684,6 +693,7 @@ static void printField(const char* val, TAOS_FIELD* field, int width, int32_t le break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: shellPrintNChar(val, length, width); break; case TSDB_DATA_TYPE_TIMESTAMP: @@ -797,7 +807,8 @@ static int calcColWidth(TAOS_FIELD* field, int precision) { return MAX(field->bytes, width); } - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON:{ int16_t bytes = field->bytes * TSDB_NCHAR_SIZE; if (bytes > tsMaxBinaryDisplayWidth) { return MAX(tsMaxBinaryDisplayWidth, width); diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 0301fe6df2a6a1fbf8a75507193dfacb55385895..b1c85d951bf1f8cf801286f51b84d47d9c893b5c 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -17,6 +17,8 @@ #include "taos.h" #include "shellCommand.h" +#define SHELL_INPUT_MAX_COMMAND_SIZE 10000 + extern char configDir[]; char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" @@ -274,32 +276,35 @@ int32_t shellReadCommand(TAOS *con, char command[]) { // Read input. void *console = GetStdHandle(STD_INPUT_HANDLE); unsigned long read; - wchar_t c; + wchar_t *c= (wchar_t *)calloc(SHELL_INPUT_MAX_COMMAND_SIZE, sizeof(wchar_t)); char mbStr[16]; while (1) { - int ret = ReadConsole(console, &c, 1, &read, NULL); - int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL); - mbStr[size] = 0; - switch (c) { - case '\n': - if (isReadyGo(&cmd)) { - sprintf(command, "%s%s", cmd.buffer, cmd.command); - free(cmd.buffer); - cmd.buffer = NULL; - free(cmd.command); - cmd.command = NULL; - return 0; - } else { - shellPrintContinuePrompt(); - updateBuffer(&cmd); - } - break; - case '\r': - break; - default: - for (int i = 0; i < size; ++i) { - insertChar(&cmd, mbStr[i]); - } + int ret = ReadConsole(console, c, SHELL_INPUT_MAX_COMMAND_SIZE, &read, NULL); + for (int input_index = 0; input_index < read; input_index++) { + int size = WideCharToMultiByte(CP_UTF8, 0, &c[input_index], 1, mbStr, sizeof(mbStr), NULL, NULL); + mbStr[size] = 0; + switch (c[input_index]) { + case '\n': + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + free(cmd.buffer); + cmd.buffer = NULL; + free(cmd.command); + cmd.command = NULL; + free(c); + return 0; + } else { + shellPrintContinuePrompt(); + updateBuffer(&cmd); + } + break; + case '\r': + break; + default: + for (int i = 0; i < size; ++i) { + insertChar(&cmd, mbStr[i]); + } + } } } @@ -327,6 +332,20 @@ void *shellLoopQuery(void *arg) { return NULL; } -void get_history_path(char *history) { sprintf(history, "C:/TDengine/%s", HISTORY_FILE); } +void get_history_path(char *history) { +#ifdef _TD_POWER_ + sprintf(history, "C:/PowerDB/%s", HISTORY_FILE); +#elif (_TD_TQ_ == true) + sprintf(history, "C:/TQueue/%s", HISTORY_FILE); +#elif (_TD_PRO_ == true) + sprintf(history, "C:/ProDB/%s", HISTORY_FILE); +#elif (_TD_KH_ == true) + sprintf(history, "C:/KingHistorian/%s", HISTORY_FILE); +#elif (_TD_JH_ == true) + sprintf(history, "C:/jh_iot/%s", HISTORY_FILE); +#else + sprintf(history, "C:/TDengine/%s", HISTORY_FILE); +#endif +} void exitShell() { exit(EXIT_SUCCESS); } diff --git a/src/kit/taos-tools b/src/kit/taos-tools new file mode 160000 index 0000000000000000000000000000000000000000..78519c0c90a261b6a559c6bbe7f3d3b5a7b630b4 --- /dev/null +++ b/src/kit/taos-tools @@ -0,0 +1 @@ +Subproject commit 78519c0c90a261b6a559c6bbe7f3d3b5a7b630b4 diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt deleted file mode 100644 index 57d6242d5343ad727b1706ab614ad0add844ddb6..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/CMakeLists.txt +++ /dev/null @@ -1,104 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) -PROJECT(TDengine) - -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(inc) - -FIND_PACKAGE(Git) -IF (GIT_FOUND) - MESSAGE("Git found") - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR} - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) - IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "") - SET(TAOSDEMO_COMMIT_SHA1 "unknown") - ELSE () - STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) - STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1) - ENDIF () - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR} - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_STATUS) - IF (TD_LINUX) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_STATUS) - ENDIF (TD_LINUX) -ELSE() - MESSAGE("Git not found") - SET(TAOSDEMO_COMMIT_SHA1 "unknown") - SET(TAOSDEMO_STATUS "unknown") -ENDIF (GIT_FOUND) - - -MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) -STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS) - -IF (TAOSDEMO_STATUS MATCHES "M") - SET(TAOSDEMO_STATUS "modified") -ELSE() - SET(TAOSDEMO_STATUS "") -ENDIF () -MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS}) - -ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}") -ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}") - -MESSAGE("TD_VER_NUMBER is:" ${TD_VER_NUMBER}) -IF ("${TD_VER_NUMBER}" STREQUAL "") - SET(TD_VERSION_NUMBER "TDengine-version-unknown") -ELSE() - SET(TD_VERSION_NUMBER ${TD_VER_NUMBER}) -ENDIF () -MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) -ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") - -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) - ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) - SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") -ELSE () - SET(LINK_JEMALLOC "") -ENDIF () - -IF (TD_LINUX) - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC}) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC}) - ENDIF () -ELSEIF (TD_WINDOWS) - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - SET_SOURCE_FILES_PROPERTIES(./src/demoUtil.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoData.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoInsert.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoCommandOpt.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoQuery.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoMain.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoSubscribe.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoOutput.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoJsonOpt.c PROPERTIES COMPILE_FLAGS -w) - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) - ENDIF () -ELSEIF (TD_DARWIN) - # missing a few dependencies, such as - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) - ENDIF () -ENDIF () - diff --git a/src/kit/taosdemo/async-sub.json b/src/kit/taosdemo/async-sub.json deleted file mode 100644 index a30a1be45cd8bcc6a6fadffd7473df7df067e839..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/async-sub.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "filetype": "subscribe", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "test", - "specified_table_query": { - "concurrent": 1, - "mode": "async", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "resubAfterConsume": 10, - "sqls": [ - { - "sql": "select col1 from meters where col1 > 1;", - "result": "./subscribe_res0.txt" - }, - { - "sql": "select col2 from meters where col2 > 1;", - "result": "./subscribe_res2.txt" - } - ] - }, - "super_table_query": { - "stblname": "meters", - "threads": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "sqls": [ - { - "sql": "select col1 from xxxx where col1 > 10;", - "result": "./subscribe_res1.txt" - } - ] - } -} diff --git a/src/kit/taosdemo/inc/demo.h b/src/kit/taosdemo/inc/demo.h deleted file mode 100644 index ff605afcbb05d2c27abd382a249456b52763f673..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/inc/demo.h +++ /dev/null @@ -1,644 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef __DEMO__ -#define __DEMO__ - -#include -#include -#include -#define _GNU_SOURCE -#define CURL_STATICLIB - -#ifdef LINUX -#include -#include -#ifndef _ALPINE -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#else -#include -#include -#endif - -#include -#include - -// #include "os.h" -#include "taos.h" -#include "taoserror.h" -#include "tutil.h" - -#define REQ_EXTRA_BUF_LEN 1024 -#define RESP_BUF_LEN 4096 -#define SQL_BUFF_LEN 1024 - -extern char configDir[]; - -#define STR_INSERT_INTO "INSERT INTO " - -#define MAX_RECORDS_PER_REQ 32766 - -#define HEAD_BUFF_LEN \ - TSDB_MAX_COLUMNS * 24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. - -#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN -#define FETCH_BUFFER_SIZE 100 * TSDB_MAX_ALLOWED_SQL_LEN -#define COND_BUF_LEN (BUFFER_SIZE - 30) -#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) - -#define MAX_USERNAME_SIZE 64 -#define MAX_HOSTNAME_SIZE \ - 253 // https://man7.org/linux/man-pages/man7/hostname.7.html -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE \ - (16 * TSDB_MAX_COLUMNS) + 20 // max record len: 16*MAX_COLUMNS, timestamp - // string and ,('') need extra space -#define OPT_ABORT 1 /* –abort */ -#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. -#define MAX_PATH_LEN 4096 - -#define DEFAULT_START_TIME 1500000000000 - -#define MAX_PREPARED_RAND 1000000 -#define INT_BUFF_LEN 12 -#define BIGINT_BUFF_LEN 21 -#define SMALLINT_BUFF_LEN 7 -#define TINYINT_BUFF_LEN 5 -#define BOOL_BUFF_LEN 6 -#define FLOAT_BUFF_LEN 22 -#define DOUBLE_BUFF_LEN 42 -#define TIMESTAMP_BUFF_LEN 21 -#define PRINT_STAT_INTERVAL 30 * 1000 - -#define MAX_SAMPLES 10000 -#define MAX_NUM_COLUMNS \ - (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp - -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 200 - -#define MAX_QUERY_SQL_COUNT 100 - -#define MAX_DATABASE_COUNT 256 -#define MAX_JSON_BUFF 6400000 - -#define INPUT_BUF_LEN 256 -#define EXTRA_SQL_LEN 256 -#define TBNAME_PREFIX_LEN \ - (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq -#define SMALL_BUFF_LEN 8 -#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN * 3) -#define NOTE_BUFF_LEN (SMALL_BUFF_LEN * 16) - -#define DEFAULT_NTHREADS 8 -#define DEFAULT_TIMESTAMP_STEP 1 -#define DEFAULT_INTERLACE_ROWS 0 -#define DEFAULT_DATATYPE_NUM 1 -#define DEFAULT_CHILDTABLES 10000 -#define DEFAULT_TEST_MODE 0 -#define DEFAULT_METAFILE NULL -#define DEFAULT_SQLFILE NULL -#define DEFAULT_HOST "localhost" -#define DEFAULT_PORT 6030 -#define DEFAULT_IFACE INTERFACE_BUT -#define DEFAULT_DATABASE "test" -#define DEFAULT_REPLICA 1 -#define DEFAULT_TB_PREFIX "d" -#define DEFAULT_ESCAPE_CHAR false -#define DEFAULT_USE_METRIC true -#define DEFAULT_DROP_DB true -#define DEFAULT_AGGR_FUNC false -#define DEFAULT_DEBUG false -#define DEFAULT_VERBOSE false -#define DEFAULT_PERF_STAT false -#define DEFAULT_ANS_YES false -#define DEFAULT_OUTPUT "./output.txt" -#define DEFAULT_SYNC_MODE 0 -#define DEFAULT_DATA_TYPE \ - { TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT } -#define DEFAULT_DATATYPE \ - { "FLOAT", "INT", "FLOAT" } -#define DEFAULT_BINWIDTH 64 -#define DEFAULT_COL_COUNT 4 -#define DEFAULT_LEN_ONE_ROW 76 -#define DEFAULT_INSERT_INTERVAL 0 -#define DEFAULT_QUERY_TIME 1 -#define DEFAULT_PREPARED_RAND 10000 -#define DEFAULT_REQ_PER_REQ 30000 -#define DEFAULT_INSERT_ROWS 10000 -#define DEFAULT_ABORT 0 -#define DEFAULT_RATIO 0 -#define DEFAULT_DISORDER_RANGE 1000 -#define DEFAULT_METHOD_DEL 1 -#define DEFAULT_TOTAL_INSERT 0 -#define DEFAULT_TOTAL_AFFECT 0 -#define DEFAULT_DEMO_MODE true -#define DEFAULT_CREATE_BATCH 10 -#define DEFAULT_SUB_INTERVAL 10000 -#define DEFAULT_QUERY_INTERVAL 10000 - -#define SML_LINE_SQL_SYNTAX_OFFSET 7 - -#if _MSC_VER <= 1900 -#define __func__ __FUNCTION__ -#endif - -#define debugPrint(fmt, ...) \ - do { \ - if (g_args.debug_print || g_args.verbose_print) \ - fprintf(stderr, "DEBG: " fmt, __VA_ARGS__); \ - } while (0) - -#define verbosePrint(fmt, ...) \ - do { \ - if (g_args.verbose_print) fprintf(stderr, "VERB: " fmt, __VA_ARGS__); \ - } while (0) - -#define performancePrint(fmt, ...) \ - do { \ - if (g_args.performance_print) \ - fprintf(stderr, "PERF: " fmt, __VA_ARGS__); \ - } while (0) - -#define errorPrint(fmt, ...) \ - do { \ - fprintf(stderr, "\033[31m"); \ - fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ - fprintf(stderr, "ERROR: " fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } while (0) - -enum TEST_MODE { - INSERT_TEST, // 0 - QUERY_TEST, // 1 - SUBSCRIBE_TEST, // 2 - INVAID_TEST -}; - -typedef enum CREATE_SUB_TABLE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL -} CREATE_SUB_TABLE_MOD_EN; - -typedef enum TABLE_EXISTS_EN { - TBL_NO_EXISTS, - TBL_ALREADY_EXISTS, - TBL_EXISTS_BUTT -} TABLE_EXISTS_EN; - -enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; - -enum enum_TAOS_INTERFACE { - TAOSC_IFACE, - REST_IFACE, - STMT_IFACE, - SML_IFACE, - INTERFACE_BUT -}; - -typedef enum enumQUERY_CLASS { - SPECIFIED_CLASS, - STABLE_CLASS, - CLASS_BUT -} QUERY_CLASS; - -typedef enum enum_PROGRESSIVE_OR_INTERLACE { - PROGRESSIVE_INSERT_MODE, - INTERLACE_INSERT_MODE, - INVALID_INSERT_MODE -} PROG_OR_INTERLACE_MODE; - -typedef enum enumQUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT -} QUERY_TYPE; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE -// ------------------------------------- -enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES -}; - -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -typedef struct SArguments_S { - char * metaFile; - uint32_t test_mode; - char * host; - uint16_t port; - uint16_t iface; - char * user; - char password[SHELL_MAX_PASSWORD_LEN]; - char * database; - int replica; - char * tb_prefix; - bool escapeChar; - char * sqlFile; - bool use_metric; - bool drop_database; - bool aggr_func; - bool answer_yes; - bool debug_print; - bool verbose_print; - bool performance_print; - char * output_file; - bool async_mode; - char data_type[MAX_NUM_COLUMNS + 1]; - char * dataType[MAX_NUM_COLUMNS + 1]; - uint32_t binwidth; - uint32_t columnCount; - uint64_t lenOfOneRow; - uint32_t nthreads; - uint64_t insert_interval; - uint64_t timestamp_step; - int64_t query_times; - int64_t prepared_rand; - uint32_t interlaceRows; - uint32_t reqPerReq; // num_of_records_per_req - uint64_t max_sql_len; - int64_t ntables; - int64_t insertRows; - int abort; - uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint32_t method_of_delete; - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - bool demo_mode; // use default column name and semi-random data -} SArguments; - -typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN]; - char data_type; - char dataType[DATATYPE_BUFF_LEN]; - uint32_t dataLen; - char note[NOTE_BUFF_LEN]; -} StrColumn; - -typedef struct SSuperTable_S { - char stbName[TSDB_TABLE_NAME_LEN]; - char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample - char childTblPrefix[TBNAME_PREFIX_LEN]; - uint16_t childTblExists; - int64_t childTblCount; - uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in - // one sql - uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - uint16_t iface; // 0: taosc, 1: rest, 2: stmt - uint16_t lineProtocol; - int64_t childTblLimit; - uint64_t childTblOffset; - - // int multiThreadWriteOneTbl; // 0: no, 1: yes - uint32_t interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint64_t maxSqlLen; // - - uint64_t insertInterval; // insert interval, will override global insert - // interval - int64_t insertRows; - int64_t timeStampStep; - int tsPrecision; - char startTimestamp[MAX_TB_NAME_SIZE]; - char sampleFormat[SMALL_BUFF_LEN]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - - uint32_t columnCount; - StrColumn columns[TSDB_MAX_COLUMNS]; - uint32_t tagCount; - StrColumn tags[TSDB_MAX_TAGS]; - - char * childTblName; - bool escapeChar; - char * colsOfCreateChildTable; - uint64_t lenOfOneRow; - uint64_t lenOfTagOfOneRow; - - char *sampleDataBuf; - bool useSampleTs; - - uint32_t tagSource; // 0: rand, 1: tag sample - char * tagDataBuf; - uint32_t tagSampleCount; - uint32_t tagUsePos; - - // bind param batch - char *sampleBindBatchArray; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; -} SSuperTable; - -typedef struct { - char name[TSDB_DB_NAME_LEN]; - char create_time[32]; - int64_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[64]; - int32_t cache; // MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[SMALL_BUFF_LEN]; // time resolution - int8_t update; - char status[16]; -} SDbInfo; - -typedef struct SDbCfg_S { - // int maxtablesPerVnode; - uint32_t minRows; // 0 means default - uint32_t maxRows; // 0 means default - int comp; - int walLevel; - int cacheLast; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[SMALL_BUFF_LEN]; -} SDbCfg; - -typedef struct SDataBase_S { - char dbName[TSDB_DB_NAME_LEN]; - bool drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - uint64_t superTblCount; - SSuperTable *superTbls; -} SDataBase; - -typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - struct sockaddr_in serv_addr; - - uint16_t port; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char resultFile[MAX_FILE_NAME_LEN]; - bool use_metric; - bool aggr_func; - bool asyncMode; - - uint32_t threadCount; - uint32_t threadCountForCreateTbl; - uint32_t dbCount; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - SDataBase *db; -} SDbs; - -typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t concurrent; - int sqlCount; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t queryTimes; - bool subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char topic[MAX_QUERY_SQL_COUNT][32]; - int consumed[MAX_QUERY_SQL_COUNT]; - TAOS_RES *res[MAX_QUERY_SQL_COUNT]; - uint64_t totalQueried; -} SpecifiedQueryInfo; - -typedef struct SuperQueryInfo_S { - char stbName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t threadCnt; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - bool subscribeRestart; - int subscribeKeepProgress; - uint64_t queryTimes; - int64_t childTblCount; - char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume; - int endAfterConsume; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char * childTblName; - uint64_t totalQueried; -} SuperQueryInfo; - -typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - uint16_t port; - struct sockaddr_in serv_addr; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char dbName[TSDB_DB_NAME_LEN]; - char queryMode[SMALL_BUFF_LEN]; // taosc, rest - SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; - uint64_t totalQueried; -} SQueryMetaInfo; - -typedef struct SThreadInfo_S { - TAOS * taos; - TAOS_STMT * stmt; - int64_t * bind_ts; - int64_t * bind_ts_array; - char * bindParams; - char * is_null; - int threadID; - char db_name[TSDB_DB_NAME_LEN]; - uint32_t time_precision; - char filePath[MAX_PATH_LEN]; - FILE * fp; - char tb_prefix[TSDB_TABLE_NAME_LEN]; - uint64_t start_table_from; - uint64_t end_table_to; - int64_t ntables; - int64_t tables_created; - uint64_t data_of_rate; - int64_t start_time; - char * cols; - bool use_metric; - SSuperTable *stbInfo; - char * buffer; // sql cmd buffer - - // for async insert - tsem_t lock_sem; - int64_t counter; - uint64_t st; - uint64_t et; - uint64_t lastTs; - - // sample data - int64_t samplePos; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - // insert delay statistics - uint64_t cntDelay; - uint64_t totalDelay; - uint64_t avgDelay; - uint64_t maxDelay; - uint64_t minDelay; - - // seq of query or subscribe - uint64_t querySeq; // sequence number of sql command - TAOS_SUB *tsub; - - char **lines; - SOCKET sockfd; -} threadInfo; - -/* ************ Global variables ************ */ -extern char * g_aggreFuncDemo[]; -extern char * g_aggreFunc[]; -extern SArguments g_args; -extern SDbs g_Dbs; -extern char * g_dupstr; -extern int64_t g_totalChildTables; -extern int64_t g_actualChildTables; -extern SQueryMetaInfo g_queryInfo; -extern FILE * g_fpOfInsertResult; - -#define min(a, b) (((a) < (b)) ? (a) : (b)) - -/* ************ Function declares ************ */ -/* demoCommandOpt.c */ -int parse_args(int argc, char *argv[]); -void setParaFromArg(); -void querySqlFile(TAOS *taos, char *sqlFile); -void testCmdLine(); -/* demoJsonOpt.c */ -int getInfoFromJsonFile(char *file); -int testMetaFile(); -/* demoUtil.c */ -int isCommentLine(char *line); -void replaceChildTblName(char *inSql, char *outSql, int tblIndex); -void setupForAnsiEscape(void); -void resetAfterAnsiEscape(void); -int taosRandom(); -void tmfree(void *buf); -void tmfclose(FILE *fp); -void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo); -void prompt(); -void ERROR_EXIT(const char *msg); -int postProceSql(char *host, uint16_t port, char *sqlstr, - threadInfo *pThreadInfo); -int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); -int regexMatch(const char *s, const char *reg, int cflags); -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr); -char *formatTimestamp(char *buf, int64_t val, int precision); -void errorWrongValue(char *program, char *wrong_arg, char *wrong_value); -void errorUnrecognized(char *program, char *wrong_arg); -void errorPrintReqArg(char *program, char *wrong_arg); -void errorPrintReqArg2(char *program, char *wrong_arg); -void errorPrintReqArg3(char *program, char *wrong_arg); -bool isStringNumber(char *input); -int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl); -int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName, - char * stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl, - int64_t limit, uint64_t offset, - bool escapChar); -/* demoInsert.c */ -int insertTestProcess(); -void postFreeResource(); -/* demoOutput.c */ -void printVersion(); -void printfInsertMeta(); -void printfInsertMetaToFile(FILE *fp); -void printStatPerThread(threadInfo *pThreadInfo); -void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo); -void printfQueryMeta(); -void printHelp(); -void printfQuerySystemInfo(TAOS *taos); -/* demoQuery.c */ -int queryTestProcess(); -/* demoSubscribe.c */ -int subscribeTestProcess(); -#endif \ No newline at end of file diff --git a/src/kit/taosdemo/inc/demoData.h b/src/kit/taosdemo/inc/demoData.h deleted file mode 100644 index f0ac1f2501fa76d1da6c537328d8dd319bbe3c95..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/inc/demoData.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef __DEMODATA__ -#define __DEMODATA__ -#include "cJSON.h" -#include "demo.h" -/***** Global variables ******/ - -extern char * g_sampleDataBuf; -extern char * g_sampleBindBatchArray; -extern int32_t * g_randint; -extern uint32_t *g_randuint; -extern int64_t * g_randbigint; -extern uint64_t *g_randubigint; -extern float * g_randfloat; -extern double * g_randdouble; -extern char * g_randbool_buff; -extern char * g_randint_buff; -extern char * g_randuint_buff; -extern char * g_rand_voltage_buff; -extern char * g_randbigint_buff; -extern char * g_randubigint_buff; -extern char * g_randsmallint_buff; -extern char * g_randusmallint_buff; -extern char * g_randtinyint_buff; -extern char * g_randutinyint_buff; -extern char * g_randfloat_buff; -extern char * g_rand_current_buff; -extern char * g_rand_phase_buff; -extern char * g_randdouble_buff; -/***** Declare functions *****/ -int init_rand_data(); -char * rand_bool_str(); -int32_t rand_bool(); -char * rand_tinyint_str(); -int32_t rand_tinyint(); -char * rand_utinyint_str(); -int32_t rand_utinyint(); -char * rand_smallint_str(); -int32_t rand_smallint(); -char * rand_usmallint_str(); -int32_t rand_usmallint(); -char * rand_int_str(); -int32_t rand_int(); -char * rand_uint_str(); -int32_t rand_uint(); -char * rand_bigint_str(); -int64_t rand_bigint(); -char * rand_ubigint_str(); -int64_t rand_ubigint(); -char * rand_float_str(); -float rand_float(); -char * demo_current_float_str(); -float UNUSED_FUNC demo_current_float(); -char * demo_voltage_int_str(); -int32_t UNUSED_FUNC demo_voltage_int(); -char * demo_phase_float_str(); -float UNUSED_FUNC demo_phase_float(); -void rand_string(char *str, int size); -char * rand_double_str(); -double rand_double(); - -int generateTagValuesForStb(SSuperTable *stbInfo, int64_t tableSeq, - char *tagsValBuf); -int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio, - int disorderRange); -int32_t prepareStbStmtBindTag(char *bindArray, SSuperTable *stbInfo, - char *tagsVal, int32_t timePrec); -int32_t prepareStmtWithoutStb(threadInfo *pThreadInfo, char *tableName, - uint32_t batch, int64_t insertRows, - int64_t recordFrom, int64_t startTime); -int32_t generateStbInterlaceData(threadInfo *pThreadInfo, char *tableName, - uint32_t batchPerTbl, uint64_t i, - uint32_t batchPerTblTimes, uint64_t tableSeq, - char *buffer, int64_t insertRows, - int64_t startTime, uint64_t *pRemainderBufLen); -int64_t generateInterlaceDataWithoutStb(char *tableName, uint32_t batch, - uint64_t tableSeq, char *dbName, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen); -int32_t generateStbProgressiveData(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen); -int32_t generateProgressiveDataWithoutStb( - char *tableName, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pRemainderBufLen); -int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf, - int64_t remainderBufLen, int64_t timestamp); -int prepareSampleForStb(SSuperTable *stbInfo); -int prepareSampleForNtb(); -int parseSamplefileToStmtBatch(SSuperTable *stbInfo); -int parseStbSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec, - uint32_t batch); -int parseNtbSampleToStmtBatchForThread(threadInfo *pThreadInfo, - uint32_t timePrec, uint32_t batch); -int prepareSampleData(); -int32_t generateSmlConstPart(char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq); - -int32_t generateSmlMutablePart(char *line, char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp); -int32_t generateSmlJsonTags(cJSON *tagsList, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq); -int32_t generateSmlJsonCols(cJSON *array, cJSON *tag, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp); -#endif \ No newline at end of file diff --git a/src/kit/taosdemo/query.json b/src/kit/taosdemo/query.json deleted file mode 100644 index d84f997c329f005e62642ac32856b8face1c8048..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/query.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "yes", - "databases": "dbx", - "query_times": 1, - "specified_table_query": { - "query_interval": 1, - "concurrent": 4, - "sqls": [ - { - "sql": "select last_row(*) from stb where color='red'", - "result": "./query_res0.txt" - }, - { - "sql": "select count(*) from stb_01", - "result": "./query_res1.txt" - } - ] - }, - "super_table_query": { - "stblname": "stb", - "query_interval": 1, - "threads": 4, - "sqls": [ - { - "sql": "select last_row(*) from xxxx", - "result": "./query_res2.txt" - } - ] - } -} diff --git a/src/kit/taosdemo/src/demoCommandOpt.c b/src/kit/taosdemo/src/demoCommandOpt.c deleted file mode 100644 index ede4c71d1de7376c85d681299d35ca2f2075d45b..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoCommandOpt.c +++ /dev/null @@ -1,1804 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" -#include "demoData.h" - -char *g_aggreFuncDemo[] = {"*", - "count(*)", - "avg(current)", - "sum(current)", - "max(current)", - "min(current)", - "first(current)", - "last(current)"}; -char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", - "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; - -int parse_args(int argc, char *argv[]) { - int32_t code = -1; - for (int i = 1; i < argc; i++) { - if ((0 == strncmp(argv[i], "-f", strlen("-f"))) || - (0 == strncmp(argv[i], "--file", strlen("--file")))) { - g_args.demo_mode = false; - - if (2 == strlen(argv[i])) { - if (i + 1 == argc) { - errorPrintReqArg(argv[0], "f"); - goto end_parse_command; - } - g_args.metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) { - g_args.metaFile = (char *)(argv[i] + strlen("-f")); - } else if (strlen("--file") == strlen(argv[i])) { - if (i + 1 == argc) { - errorPrintReqArg3(argv[0], "--file"); - goto end_parse_command; - } - g_args.metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { - g_args.metaFile = (char *)(argv[i] + strlen("--file=")); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) || - (0 == - strncmp(argv[i], "--config-dir", strlen("--config-dir")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "c"); - goto end_parse_command; - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), - TSDB_FILENAME_LEN); - } else if (strlen("--config-dir") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--config-dir"); - goto end_parse_command; - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], - "--config-dir=", strlen("--config-dir="))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), - TSDB_FILENAME_LEN); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) || - (0 == strncmp(argv[i], "--host", strlen("--host")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "h"); - goto end_parse_command; - } - g_args.host = argv[++i]; - } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) { - g_args.host = (char *)(argv[i] + strlen("-h")); - } else if (strlen("--host") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--host"); - goto end_parse_command; - } - g_args.host = argv[++i]; - } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { - g_args.host = (char *)(argv[i] + strlen("--host=")); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if (strcmp(argv[i], "-PP") == 0) { - g_args.performance_print = true; - } else if ((0 == strncmp(argv[i], "-P", strlen("-P"))) || - (0 == strncmp(argv[i], "--port", strlen("--port")))) { - uint64_t port; - char strPort[BIGINT_BUFF_LEN]; - - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "P"); - goto end_parse_command; - } else if (isStringNumber(argv[i + 1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "P"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) { - if (isStringNumber((char *)(argv[i] + strlen("--port=")))) { - tstrncpy(strPort, (char *)(argv[i] + strlen("--port=")), - BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) { - if (isStringNumber((char *)(argv[i] + strlen("-P")))) { - tstrncpy(strPort, (char *)(argv[i] + strlen("-P")), - BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else if (strlen("--port") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--port"); - goto end_parse_command; - } else if (isStringNumber(argv[i + 1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - port = atoi(strPort); - if (port > 65535) { - errorWrongValue("taosdump", "-P or --port", strPort); - goto end_parse_command; - } - g_args.port = (uint16_t)port; - - } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) || - (0 == - strncmp(argv[i], "--interface", strlen("--interface")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "I"); - goto end_parse_command; - } - if (0 == strcasecmp(argv[i + 1], "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "-I", argv[i + 1]); - goto end_parse_command; - } - i++; - } else if (0 == strncmp(argv[i], - "--interface=", strlen("--interface="))) { - if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), - "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "sml")) { - g_args.iface = SML_IFACE; - } else { - errorPrintReqArg3(argv[0], "--interface"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) { - if (0 == - strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "-I", - (char *)(argv[i] + strlen("-I"))); - goto end_parse_command; - } - } else if (strlen("--interface") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--interface"); - goto end_parse_command; - } - if (0 == strcasecmp(argv[i + 1], "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "--interface", argv[i + 1]); - goto end_parse_command; - } - i++; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) || - (0 == strncmp(argv[i], "--user", strlen("--user")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "u"); - goto end_parse_command; - } - g_args.user = argv[++i]; - } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) { - g_args.user = (char *)(argv[i++] + strlen("-u")); - } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) { - g_args.user = (char *)(argv[i++] + strlen("--user=")); - } else if (strlen("--user") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--user"); - goto end_parse_command; - } - g_args.user = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) || - (0 == strcmp(argv[i], "--password"))) { - if ((strlen(argv[i]) == 2) || - (0 == strcmp(argv[i], "--password"))) { - printf("Enter password: "); - taosSetConsoleEcho(false); - if (scanf("%s", g_args.password) > 1) { - fprintf(stderr, "password read error!\n"); - } - taosSetConsoleEcho(true); - } else { - tstrncpy(g_args.password, (char *)(argv[i] + 2), - SHELL_MAX_PASSWORD_LEN); - } - } else if ((0 == strncmp(argv[i], "-o", strlen("-o"))) || - (0 == strncmp(argv[i], "--output", strlen("--output")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--output"); - goto end_parse_command; - } - g_args.output_file = argv[++i]; - } else if (0 == - strncmp(argv[i], "--output=", strlen("--output="))) { - g_args.output_file = (char *)(argv[i++] + strlen("--output=")); - } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) { - g_args.output_file = (char *)(argv[i++] + strlen("-o")); - } else if (strlen("--output") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--output"); - goto end_parse_command; - } - g_args.output_file = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) || - (0 == - strncmp(argv[i], "--sql-file", strlen("--sql-file")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "s"); - goto end_parse_command; - } - g_args.sqlFile = argv[++i]; - } else if (0 == - strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) { - g_args.sqlFile = (char *)(argv[i++] + strlen("--sql-file=")); - } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) { - g_args.sqlFile = (char *)(argv[i++] + strlen("-s")); - } else if (strlen("--sql-file") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--sql-file"); - goto end_parse_command; - } - g_args.sqlFile = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) || - (0 == - strncmp(argv[i], "--query-mode", strlen("--query-mode")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "q"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "q"); - goto end_parse_command; - } - g_args.async_mode = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], - "--query-mode=", strlen("--query-mode="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--query-mode=")))) { - g_args.async_mode = - atoi((char *)(argv[i] + strlen("--query-mode="))); - } else { - errorPrintReqArg2(argv[0], "--query-mode"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) { - if (isStringNumber((char *)(argv[i] + strlen("-q")))) { - g_args.async_mode = atoi((char *)(argv[i] + strlen("-q"))); - } else { - errorPrintReqArg2(argv[0], "-q"); - goto end_parse_command; - } - } else if (strlen("--query-mode") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--query-mode"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--query-mode"); - goto end_parse_command; - } - g_args.async_mode = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) || - (0 == strncmp(argv[i], "--threads", strlen("--threads")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "T"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "T"); - goto end_parse_command; - } - g_args.nthreads = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--threads=", strlen("--threads="))) { - if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { - g_args.nthreads = - atoi((char *)(argv[i] + strlen("--threads="))); - } else { - errorPrintReqArg2(argv[0], "--threads"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { - if (isStringNumber((char *)(argv[i] + strlen("-T")))) { - g_args.nthreads = atoi((char *)(argv[i] + strlen("-T"))); - } else { - errorPrintReqArg2(argv[0], "-T"); - goto end_parse_command; - } - } else if (strlen("--threads") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--threads"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--threads"); - goto end_parse_command; - } - g_args.nthreads = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) || - (0 == strncmp(argv[i], "--insert-interval", - strlen("--insert-interval")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "i"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "i"); - goto end_parse_command; - } - g_args.insert_interval = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--insert-interval=", - strlen("--insert-interval="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--insert-interval=")))) { - g_args.insert_interval = - atoi((char *)(argv[i] + strlen("--insert-interval="))); - } else { - errorPrintReqArg3(argv[0], "--insert-innterval"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) { - if (isStringNumber((char *)(argv[i] + strlen("-i")))) { - g_args.insert_interval = - atoi((char *)(argv[i] + strlen("-i"))); - } else { - errorPrintReqArg3(argv[0], "-i"); - goto end_parse_command; - } - } else if (strlen("--insert-interval") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--insert-interval"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--insert-interval"); - goto end_parse_command; - } - g_args.insert_interval = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) || - (0 == - strncmp(argv[i], "--time-step", strlen("--time-step")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "S"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "S"); - goto end_parse_command; - } - g_args.timestamp_step = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], - "--time-step=", strlen("--time-step="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--time-step=")))) { - g_args.timestamp_step = - atoi((char *)(argv[i] + strlen("--time-step="))); - } else { - errorPrintReqArg2(argv[0], "--time-step"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) { - if (isStringNumber((char *)(argv[i] + strlen("-S")))) { - g_args.timestamp_step = - atoi((char *)(argv[i] + strlen("-S"))); - } else { - errorPrintReqArg2(argv[0], "-S"); - goto end_parse_command; - } - } else if (strlen("--time-step") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--time-step"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--time-step"); - goto end_parse_command; - } - g_args.timestamp_step = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i + 1) || (!isStringNumber(argv[i + 1]))) { - printHelp(); - errorPrint("%s", "\n\t-qt need a number following!\n"); - goto end_parse_command; - } - g_args.query_times = atoi(argv[++i]); - } else if ((0 == strncmp(argv[i], "-B", strlen("-B"))) || - (0 == strncmp(argv[i], "--interlace-rows", - strlen("--interlace-rows")))) { - if (strlen("-B") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "B"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "B"); - goto end_parse_command; - } - g_args.interlaceRows = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--interlace-rows=", - strlen("--interlace-rows="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--interlace-rows=")))) { - g_args.interlaceRows = - atoi((char *)(argv[i] + strlen("--interlace-rows="))); - } else { - errorPrintReqArg2(argv[0], "--interlace-rows"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { - if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - g_args.interlaceRows = - atoi((char *)(argv[i] + strlen("-B"))); - } else { - errorPrintReqArg2(argv[0], "-B"); - goto end_parse_command; - } - } else if (strlen("--interlace-rows") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--interlace-rows"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--interlace-rows"); - goto end_parse_command; - } - g_args.interlaceRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) || - (0 == strncmp(argv[i], "--rec-per-req", 13))) { - if (strlen("-r") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "r"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "r"); - goto end_parse_command; - } - g_args.reqPerReq = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--rec-per-req=", - strlen("--rec-per-req="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--rec-per-req=")))) { - g_args.reqPerReq = - atoi((char *)(argv[i] + strlen("--rec-per-req="))); - } else { - errorPrintReqArg2(argv[0], "--rec-per-req"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { - if (isStringNumber((char *)(argv[i] + strlen("-r")))) { - g_args.reqPerReq = atoi((char *)(argv[i] + strlen("-r"))); - } else { - errorPrintReqArg2(argv[0], "-r"); - goto end_parse_command; - } - } else if (strlen("--rec-per-req") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--rec-per-req"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--rec-per-req"); - goto end_parse_command; - } - g_args.reqPerReq = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) || - (0 == strncmp(argv[i], "--tables", strlen("--tables")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "t"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "t"); - goto end_parse_command; - } - g_args.ntables = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--tables=", strlen("--tables="))) { - if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { - g_args.ntables = - atoi((char *)(argv[i] + strlen("--tables="))); - } else { - errorPrintReqArg2(argv[0], "--tables"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { - if (isStringNumber((char *)(argv[i] + strlen("-t")))) { - g_args.ntables = atoi((char *)(argv[i] + strlen("-t"))); - } else { - errorPrintReqArg2(argv[0], "-t"); - goto end_parse_command; - } - } else if (strlen("--tables") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--tables"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--tables"); - goto end_parse_command; - } - g_args.ntables = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - g_totalChildTables = g_args.ntables; - } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) || - (0 == strncmp(argv[i], "--records", strlen("--records")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "n"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "n"); - goto end_parse_command; - } - g_args.insertRows = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--records=", strlen("--records="))) { - if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { - g_args.insertRows = - atoi((char *)(argv[i] + strlen("--records="))); - } else { - errorPrintReqArg2(argv[0], "--records"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { - if (isStringNumber((char *)(argv[i] + strlen("-n")))) { - g_args.insertRows = atoi((char *)(argv[i] + strlen("-n"))); - } else { - errorPrintReqArg2(argv[0], "-n"); - goto end_parse_command; - } - } else if (strlen("--records") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--records"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--records"); - goto end_parse_command; - } - g_args.insertRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) || - (0 == - strncmp(argv[i], "--database", strlen("--database")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "d"); - goto end_parse_command; - } - g_args.database = argv[++i]; - } else if (0 == - strncmp(argv[i], "--database=", strlen("--database="))) { - g_args.output_file = (char *)(argv[i] + strlen("--database=")); - } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) { - g_args.output_file = (char *)(argv[i] + strlen("-d")); - } else if (strlen("--database") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--database"); - goto end_parse_command; - } - g_args.database = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) || - (0 == strncmp(argv[i], "--columns", strlen("--columns")))) { - g_args.demo_mode = false; - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "l"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "l"); - goto end_parse_command; - } - g_args.columnCount = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--columns=", strlen("--columns="))) { - if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { - g_args.columnCount = - atoi((char *)(argv[i] + strlen("--columns="))); - } else { - errorPrintReqArg2(argv[0], "--columns"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { - if (isStringNumber((char *)(argv[i] + strlen("-l")))) { - g_args.columnCount = atoi((char *)(argv[i] + strlen("-l"))); - } else { - errorPrintReqArg2(argv[0], "-l"); - goto end_parse_command; - } - } else if (strlen("--columns") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--columns"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--columns"); - goto end_parse_command; - } - g_args.columnCount = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.columnCount > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptable columns count is %d\n", - MAX_NUM_COLUMNS); - prompt(); - g_args.columnCount = MAX_NUM_COLUMNS; - } - - for (int col = DEFAULT_DATATYPE_NUM; col < g_args.columnCount; - col++) { - g_args.dataType[col] = "INT"; - g_args.data_type[col] = TSDB_DATA_TYPE_INT; - } - for (int col = g_args.columnCount; col < MAX_NUM_COLUMNS; col++) { - g_args.dataType[col] = NULL; - g_args.data_type[col] = TSDB_DATA_TYPE_NULL; - } - } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) || - (0 == - strncmp(argv[i], "--data-type", strlen("--data-type")))) { - g_args.demo_mode = false; - - char *dataType; - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "b"); - goto end_parse_command; - } - dataType = argv[++i]; - } else if (0 == strncmp(argv[i], - "--data-type=", strlen("--data-type="))) { - dataType = (char *)(argv[i] + strlen("--data-type=")); - } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) { - dataType = (char *)(argv[i] + strlen("-b")); - } else if (strlen("--data-type") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--data-type"); - goto end_parse_command; - } - dataType = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (strstr(dataType, ",") == NULL) { - // only one col - if (strcasecmp(dataType, "INT") && - strcasecmp(dataType, "FLOAT") && - strcasecmp(dataType, "TINYINT") && - strcasecmp(dataType, "BOOL") && - strcasecmp(dataType, "SMALLINT") && - strcasecmp(dataType, "BIGINT") && - strcasecmp(dataType, "DOUBLE") && - strcasecmp(dataType, "TIMESTAMP") && - !regexMatch(dataType, - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED) && - strcasecmp(dataType, "UTINYINT") && - strcasecmp(dataType, "USMALLINT") && - strcasecmp(dataType, "UINT") && - strcasecmp(dataType, "UBIGINT")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - goto end_parse_command; - } - if (0 == strcasecmp(dataType, "INT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(dataType, "TINYINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strcasecmp(dataType, "SMALLINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(dataType, "BIGINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(dataType, "FLOAT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(dataType, "DOUBLE")) { - g_args.data_type[0] = TSDB_DATA_TYPE_DOUBLE; - } else if (1 == regexMatch(dataType, - "^BINARY(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[0] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(dataType, - "^NCHAR(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[0] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(dataType, "BOOL")) { - g_args.data_type[0] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { - g_args.data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(dataType, "UTINYINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(dataType, "USMALLINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(dataType, "UINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(dataType, "UBIGINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UBIGINT; - } else { - g_args.data_type[0] = TSDB_DATA_TYPE_NULL; - } - g_args.dataType[0] = dataType; - g_args.dataType[1] = NULL; - g_args.data_type[1] = TSDB_DATA_TYPE_NULL; - } else { - // more than one col - int index = 0; - g_dupstr = strdup(dataType); - char *running = g_dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") && - strcasecmp(token, "FLOAT") && - strcasecmp(token, "TINYINT") && - strcasecmp(token, "BOOL") && - strcasecmp(token, "SMALLINT") && - strcasecmp(token, "BIGINT") && - strcasecmp(token, "DOUBLE") && - strcasecmp(token, "TIMESTAMP") && - !regexMatch(token, - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED) && - strcasecmp(token, "UTINYINT") && - strcasecmp(token, "USMALLINT") && - strcasecmp(token, "UINT") && - strcasecmp(token, "UBIGINT")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - goto end_parse_command; - } - - if (0 == strcasecmp(token, "INT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(token, "FLOAT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(token, "SMALLINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(token, "BIGINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(token, "DOUBLE")) { - g_args.data_type[index] = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strcasecmp(token, "TINYINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_TINYINT; - } else if (1 == regexMatch(token, - "^BINARY(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[index] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(token, - "^NCHAR(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[index] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(token, "BOOL")) { - g_args.data_type[index] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(token, "TIMESTAMP")) { - g_args.data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(token, "UTINYINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(token, "USMALLINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(token, "UINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(token, "UBIGINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UBIGINT; - } else { - g_args.data_type[index] = TSDB_DATA_TYPE_NULL; - } - g_args.dataType[index] = token; - index++; - token = strsep(&running, ","); - if (index >= MAX_NUM_COLUMNS) break; - } - g_args.dataType[index] = NULL; - g_args.data_type[index] = TSDB_DATA_TYPE_NULL; - } - } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) || - (0 == - strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "w"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "w"); - goto end_parse_command; - } - g_args.binwidth = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) { - if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) { - g_args.binwidth = - atoi((char *)(argv[i] + strlen("--binwidth="))); - } else { - errorPrintReqArg2(argv[0], "--binwidth"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) { - if (isStringNumber((char *)(argv[i] + strlen("-w")))) { - g_args.binwidth = atoi((char *)(argv[i] + strlen("-w"))); - } else { - errorPrintReqArg2(argv[0], "-w"); - goto end_parse_command; - } - } else if (strlen("--binwidth") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--binwidth"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--binwidth"); - goto end_parse_command; - } - g_args.binwidth = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) || - (0 == strncmp(argv[i], "--table-prefix", - strlen("--table-prefix")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "m"); - goto end_parse_command; - } - g_args.tb_prefix = argv[++i]; - } else if (0 == strncmp(argv[i], "--table-prefix=", - strlen("--table-prefix="))) { - g_args.tb_prefix = - (char *)(argv[i] + strlen("--table-prefix=")); - } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) { - g_args.tb_prefix = (char *)(argv[i] + strlen("-m")); - } else if (strlen("--table-prefix") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--table-prefix"); - goto end_parse_command; - } - g_args.tb_prefix = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-E", strlen("-E"))) || - (0 == strncmp(argv[i], "--escape-character", - strlen("--escape-character")))) { - g_args.escapeChar = true; - } else if ((strcmp(argv[i], "-N") == 0) || - (0 == strcmp(argv[i], "--normal-table"))) { - g_args.demo_mode = false; - g_args.use_metric = false; - } else if ((strcmp(argv[i], "-M") == 0) || - (0 == strcmp(argv[i], "--random"))) { - g_args.demo_mode = false; - } else if ((strcmp(argv[i], "-x") == 0) || - (0 == strcmp(argv[i], "--aggr-func"))) { - g_args.aggr_func = true; - } else if ((strcmp(argv[i], "-y") == 0) || - (0 == strcmp(argv[i], "--answer-yes"))) { - g_args.answer_yes = true; - } else if ((strcmp(argv[i], "-g") == 0) || - (0 == strcmp(argv[i], "--debug"))) { - g_args.debug_print = true; - } else if (strcmp(argv[i], "-gg") == 0) { - g_args.verbose_print = true; - } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) || - (0 == strncmp(argv[i], "--disorder-range", - strlen("--disorder-range")))) { - if (strlen("-R") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "R"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "R"); - goto end_parse_command; - } - g_args.disorderRange = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--disorder-range=", - strlen("--disorder-range="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--disorder-range=")))) { - g_args.disorderRange = - atoi((char *)(argv[i] + strlen("--disorder-range="))); - } else { - errorPrintReqArg2(argv[0], "--disorder-range"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { - if (isStringNumber((char *)(argv[i] + strlen("-R")))) { - g_args.disorderRange = - atoi((char *)(argv[i] + strlen("-R"))); - } else { - errorPrintReqArg2(argv[0], "-R"); - goto end_parse_command; - } - - if (g_args.disorderRange < 0) { - errorPrint("Invalid disorder range %d, will be set to %d\n", - g_args.disorderRange, 1000); - g_args.disorderRange = 1000; - } - } else if (strlen("--disorder-range") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--disorder-range"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--disorder-range"); - goto end_parse_command; - } - g_args.disorderRange = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) || - (0 == - strncmp(argv[i], "--disorder", strlen("--disorder")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "O"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "O"); - goto end_parse_command; - } - g_args.disorderRatio = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--disorder=", strlen("--disorder="))) { - if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) { - g_args.disorderRatio = - atoi((char *)(argv[i] + strlen("--disorder="))); - } else { - errorPrintReqArg2(argv[0], "--disorder"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) { - if (isStringNumber((char *)(argv[i] + strlen("-O")))) { - g_args.disorderRatio = - atoi((char *)(argv[i] + strlen("-O"))); - } else { - errorPrintReqArg2(argv[0], "-O"); - goto end_parse_command; - } - } else if (strlen("--disorder") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--disorder"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--disorder"); - goto end_parse_command; - } - g_args.disorderRatio = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.disorderRatio > 50) { - errorPrint("Invalid disorder ratio %d, will be set to %d\n", - g_args.disorderRatio, 50); - g_args.disorderRatio = 50; - } - } else if ((0 == strncmp(argv[i], "-a", strlen("-a"))) || - (0 == strncmp(argv[i], "--replica", strlen("--replica")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "a"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "a"); - goto end_parse_command; - } - g_args.replica = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--replica=", strlen("--replica="))) { - if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) { - g_args.replica = - atoi((char *)(argv[i] + strlen("--replica="))); - } else { - errorPrintReqArg2(argv[0], "--replica"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) { - if (isStringNumber((char *)(argv[i] + strlen("-a")))) { - g_args.replica = atoi((char *)(argv[i] + strlen("-a"))); - } else { - errorPrintReqArg2(argv[0], "-a"); - goto end_parse_command; - } - } else if (strlen("--replica") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--replica"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--replica"); - goto end_parse_command; - } - g_args.replica = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.replica > 3 || g_args.replica < 1) { - errorPrint("Invalid replica value %d, will be set to %d\n", - g_args.replica, 1); - g_args.replica = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - g_args.method_of_delete = atoi(argv[++i]); - if (g_args.method_of_delete > 3) { - errorPrint("%s", - "\n\t-D need a value (0~3) number following!\n"); - goto end_parse_command; - } - } else if ((strcmp(argv[i], "--version") == 0) || - (strcmp(argv[i], "-V") == 0)) { - printVersion(); - } else if ((strcmp(argv[i], "--help") == 0) || - (strcmp(argv[i], "-?") == 0)) { - printHelp(); - } else if (strcmp(argv[i], "--usage") == 0) { - printf( - " Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ - [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ - [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ - [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ - [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ - [--help] [--usage] [--version]\n"); - exit(EXIT_SUCCESS); - } else { - // to simulate argp_option output - if (strlen(argv[i]) > 2) { - if (0 == strncmp(argv[i], "--", 2)) { - fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], - argv[i]); - } else if (0 == strncmp(argv[i], "-", 1)) { - char tmp[2] = {0}; - tstrncpy(tmp, argv[i] + 1, 2); - fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], - tmp); - } else { - fprintf(stderr, "%s: Too many arguments\n", argv[0]); - } - } else { - fprintf(stderr, "%s invalid options -- '%s'\n", argv[0], - (char *)((char *)argv[i]) + 1); - } - fprintf(stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more " - "information.\n"); - goto end_parse_command; - } - } - - int columnCount; - for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount++) { - if (g_args.dataType[columnCount] == NULL) { - break; - } - } - - if (0 == columnCount) { - errorPrint("%s", "data type error!\n"); - goto end_parse_command; - } - g_args.columnCount = columnCount; - - g_args.lenOfOneRow = TIMESTAMP_BUFF_LEN; // timestamp - for (int c = 0; c < g_args.columnCount; c++) { - switch (g_args.data_type[c]) { - case TSDB_DATA_TYPE_BINARY: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - g_args.lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - g_args.lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - g_args.lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - g_args.lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - g_args.lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - g_args.lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - g_args.lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint("get error data type : %s\n", g_args.dataType[c]); - goto end_parse_command; - } - } - - if (((g_args.debug_print) && (NULL != g_args.metaFile)) || - g_args.verbose_print) { - printf( - "##################################################################" - "#\n"); - printf("# meta file: %s\n", g_args.metaFile); - printf("# Server IP: %s:%hu\n", - g_args.host == NULL ? "localhost" : g_args.host, g_args.port); - printf("# User: %s\n", g_args.user); - printf("# Password: %s\n", g_args.password); - printf("# Use metric: %s\n", - g_args.use_metric ? "true" : "false"); - if (*(g_args.dataType)) { - printf("# Specified data type: "); - for (int c = 0; c < MAX_NUM_COLUMNS; c++) - if (g_args.dataType[c]) - printf("%s,", g_args.dataType[c]); - else - break; - printf("\n"); - } - printf("# Insertion interval: %" PRIu64 "\n", - g_args.insert_interval); - printf("# Number of records per req: %u\n", g_args.reqPerReq); - printf("# Max SQL length: %" PRIu64 "\n", - g_args.max_sql_len); - printf("# Length of Binary: %d\n", g_args.binwidth); - printf("# Number of Threads: %d\n", g_args.nthreads); - printf("# Number of Tables: %" PRId64 "\n", - g_args.ntables); - printf("# Number of Data per Table: %" PRId64 "\n", - g_args.insertRows); - printf("# Database name: %s\n", g_args.database); - printf("# Table prefix: %s\n", g_args.tb_prefix); - if (g_args.disorderRatio) { - printf("# Data order: %d\n", - g_args.disorderRatio); - printf("# Data out of order rate: %d\n", - g_args.disorderRange); - } - printf("# Delete method: %d\n", - g_args.method_of_delete); - printf("# Answer yes when prompt: %d\n", g_args.answer_yes); - printf("# Print debug info: %d\n", g_args.debug_print); - printf("# Print verbose info: %d\n", - g_args.verbose_print); - printf( - "##################################################################" - "#\n"); - - prompt(); - } - code = 0; -end_parse_command: - return code; -} -void setParaFromArg() { - char type[20]; - char length[20]; - if (g_args.host) { - tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); - } else { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } - - if (g_args.user) { - tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); - } - - tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN); - - if (g_args.port) { - g_Dbs.port = g_args.port; - } - - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = true; - - tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN); - - tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - - g_Dbs.use_metric = g_args.use_metric; - g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND); - g_Dbs.aggr_func = g_args.aggr_func; - - char dataString[TSDB_MAX_BYTES_PER_ROW]; - char * data_type = g_args.data_type; - char **dataType = g_args.dataType; - - memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - - if ((data_type[0] == TSDB_DATA_TYPE_BINARY) || - (data_type[0] == TSDB_DATA_TYPE_BOOL) || - (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { - g_Dbs.aggr_func = false; - } - - if (g_args.use_metric) { - g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", - TSDB_TABLE_NAME_LEN); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; - g_Dbs.db[0].superTbls[0].escapeChar = g_args.escapeChar; - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.asyncMode = g_args.async_mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, - TBNAME_PREFIX_LEN); - tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN); - - if (g_args.iface == INTERFACE_BUT) { - g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; - } else { - g_Dbs.db[0].superTbls[0].iface = g_args.iface; - } - g_Dbs.db[0].superTbls[0].lineProtocol = TSDB_SML_LINE_PROTOCOL; - g_Dbs.db[0].superTbls[0].tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, - "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; - - g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; - g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == TSDB_DATA_TYPE_NULL) { - break; - } - - g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, dataType[i], - min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - if (1 == regexMatch(dataType[i], - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", - REG_ICASE | REG_EXTENDED)) { - sscanf(dataType[i], "%[^(](%[^)]", type, length); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = atoi(length); - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, type, - min(DATATYPE_BUFF_LEN, strlen(type) + 1)); - } else { - switch (g_Dbs.db[0].superTbls[0].columns[i].data_type) { - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_UTINYINT: - case TSDB_DATA_TYPE_TINYINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(char); - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int16_t); - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int32_t); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int64_t); - break; - case TSDB_DATA_TYPE_FLOAT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(float); - break; - case TSDB_DATA_TYPE_DOUBLE: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(double); - break; - default: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - g_args.binwidth; - break; - } - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - dataType[i], - min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - } - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; - } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.columnCount; i++) { - g_Dbs.db[0].superTbls[0].columns[i].data_type = - TSDB_DATA_TYPE_INT; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", - min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = sizeof(int32_t); - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", - min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].tags[0].data_type = TSDB_DATA_TYPE_INT; - g_Dbs.db[0].superTbls[0].tags[0].dataLen = sizeof(int32_t); - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", - min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1)); - g_Dbs.db[0].superTbls[0].tags[1].data_type = TSDB_DATA_TYPE_BINARY; - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.db[0].superTbls[0].tagCount = 0; - } -} - -void querySqlFile(TAOS *taos, char *sqlFile) { - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char *cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); - if (cmd == NULL) { - errorPrint("%s", "failde to allocate memory\n"); - return; - } - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = (double)taosGetTimestampMs(); - - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec %s failed!\n", cmd); - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; - } - memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); - cmd_len = 0; - } - - t = taosGetTimestampMs() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t / 1000000); - - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; -} - -void *queryStableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS * taos = pThreadInfo->taos; - setThreadName("queryStableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - if (NULL == command) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, - strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows = pThreadInfo->stbInfo->insertRows; - int64_t ntables = - pThreadInfo->ntables; // pThreadInfo->end_table_to - - // pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func ? (sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) - : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf( - "\nThe first field is either Binary or Bool. Aggregation functions " - "are not supported.\n"); - } - - printf("%" PRId64 " records:\n", totalData); - fprintf(fp, "Querying On %" PRId64 " records:\n", totalData); - - for (int j = 0; j < n; j++) { - char condition[COND_BUF_LEN] = "\0"; - char tempS[64] = "\0"; - - int64_t m = 10 < ntables ? 10 : ntables; - - for (int64_t i = 1; i <= m; i++) { - if (i == 1) { - if (g_args.demo_mode) { - sprintf(tempS, "groupid = %" PRId64 "", i); - } else { - sprintf(tempS, "t0 = %" PRId64 "", i); - } - } else { - if (g_args.demo_mode) { - sprintf(tempS, " or groupid = %" PRId64 " ", i); - } else { - sprintf(tempS, " or t0 = %" PRId64 " ", i); - } - } - strncat(condition, tempS, COND_BUF_LEN - 1); - - sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], - condition); - - printf("Where condition: %s\n", condition); - - debugPrint("%s() LN%d, sql command: %s\n", __func__, __LINE__, - command); - fprintf(fp, "%s\n", command); - - double t = (double)taosGetTimestampUs(); - - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint("Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); - free(command); - return NULL; - } - int count = 0; - while (taos_fetch_row(pSql) != NULL) { - count++; - } - t = taosGetTimestampUs() - t; - - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - ntables * insertRows / (t / 1000), t); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], - t / 1000000); - - taos_free_result(pSql); - } - fprintf(fp, "\n"); - } - fclose(fp); - free(command); - - return NULL; -} - -void *queryNtableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS * taos = pThreadInfo->taos; - setThreadName("queryNtableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - if (NULL == command) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - uint64_t startTime = pThreadInfo->start_time; - char * tb_prefix = pThreadInfo->tb_prefix; - FILE * fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, - strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows; - /* if (pThreadInfo->stbInfo) { - insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; - } else { - */ - insertRows = g_args.insertRows; - // } - - int64_t ntables = - pThreadInfo->ntables; // pThreadInfo->end_table_to - - // pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func ? (sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) - : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf( - "\nThe first field is either Binary or Bool. Aggregation functions " - "are not supported.\n"); - } - printf("%" PRId64 " records:\n", totalData); - fprintf( - fp, - "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - uint64_t count = 0; - for (int64_t i = 0; i < ntables; i++) { - sprintf(command, "SELECT %s FROM %s%" PRId64 " WHERE ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, startTime); - - double t = (double)taosGetTimestampUs(); - debugPrint("%s() LN%d, sql command: %s\n", __func__, __LINE__, - command); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint("Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); - free(command); - return NULL; - } - - while (taos_fetch_row(pSql) != NULL) { - count++; - } - - t = taosGetTimestampUs() - t; - totalT += t; - - taos_free_result(pSql); - } - - fprintf(fp, "|%10s | %" PRId64 " | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(ntables * insertRows) / totalT, totalT / 1000000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], - totalT / 1000000); - } - fprintf(fp, "\n"); - fclose(fp); - free(command); - return NULL; -} - -void queryAggrFunc() { - // query data - - pthread_t read_id; - threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo)); - if (pThreadInfo == NULL) { - errorPrint("%s", "failde to allocate memory\n"); - return; - } - - pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 - pThreadInfo->start_table_from = 0; - - if (g_args.use_metric) { - pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); - } else { - pThreadInfo->ntables = g_args.ntables; - pThreadInfo->end_table_to = g_args.ntables - 1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); - } - - pThreadInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - g_Dbs.db[0].dbName, g_Dbs.port); - if (pThreadInfo->taos == NULL) { - free(pThreadInfo); - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); - } else { - pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); - } - pthread_join(read_id, NULL); - taos_close(pThreadInfo->taos); - free(pThreadInfo); -} - -void testCmdLine() { - if (strlen(configDir)) { - wordexp_t full_path; - if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", configDir); - return; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } - - g_args.test_mode = INSERT_TEST; - insertTestProcess(); - - if (g_Dbs.aggr_func) { - queryAggrFunc(); - } -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoData.c b/src/kit/taosdemo/src/demoData.c deleted file mode 100644 index 39d7954a2488621cf3fdee859777484a1f38601b..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoData.c +++ /dev/null @@ -1,2510 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demoData.h" -#include "demo.h" - -char * g_sampleDataBuf = NULL; -char * g_sampleBindBatchArray = NULL; -int32_t * g_randint = NULL; -uint32_t *g_randuint = NULL; -int64_t * g_randbigint = NULL; -uint64_t *g_randubigint = NULL; -float * g_randfloat = NULL; -double * g_randdouble = NULL; -char * g_randbool_buff = NULL; -char * g_randint_buff = NULL; -char * g_randuint_buff = NULL; -char * g_rand_voltage_buff = NULL; -char * g_randbigint_buff = NULL; -char * g_randubigint_buff = NULL; -char * g_randsmallint_buff = NULL; -char * g_randusmallint_buff = NULL; -char * g_randtinyint_buff = NULL; -char * g_randutinyint_buff = NULL; -char * g_randfloat_buff = NULL; -char * g_rand_current_buff = NULL; -char * g_rand_phase_buff = NULL; -char * g_randdouble_buff = NULL; - -const char charset[] = - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; - -char *rand_bool_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbool_buff + ((cursor % g_args.prepared_rand) * BOOL_BUFF_LEN); -} - -int32_t rand_bool() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_BOOL_NULL; -} - -char *rand_tinyint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randtinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -int32_t rand_tinyint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_TINYINT_NULL; -} - -char *rand_utinyint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randutinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -int32_t rand_utinyint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_UTINYINT_NULL; -} - -char *rand_smallint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randsmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -int32_t rand_smallint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_SMALLINT_NULL; -} - -char *rand_usmallint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randusmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -int32_t rand_usmallint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_USMALLINT_NULL; -} - -char *rand_int_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t rand_int() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand]; -} - -char *rand_uint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t rand_uint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand]; -} - -char *rand_bigint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -int64_t rand_bigint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint[cursor % g_args.prepared_rand]; -} - -char *rand_ubigint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -int64_t rand_ubigint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint[cursor % g_args.prepared_rand]; -} - -char *rand_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float rand_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat[cursor % g_args.prepared_rand]; -} - -char *demo_current_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_current_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float UNUSED_FUNC demo_current_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)(9.8 + - 0.04 * (g_randint[cursor % g_args.prepared_rand] % 10) + - g_randfloat[cursor % g_args.prepared_rand] / 1000000000); -} - -char *demo_voltage_int_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_voltage_buff + - ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t UNUSED_FUNC demo_voltage_int() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return 215 + g_randint[cursor % g_args.prepared_rand] % 10; -} - -char *demo_phase_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_phase_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float UNUSED_FUNC demo_phase_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)((115 + g_randint[cursor % g_args.prepared_rand] % 10 + - g_randfloat[cursor % g_args.prepared_rand] / 1000000000) / - 360); -} - -void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = abs(taosRandom()) % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } -} - -char *rand_double_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN); -} - -double rand_double() { - static int cursor; - cursor++; - cursor = cursor % g_args.prepared_rand; - return g_randdouble[cursor]; -} - -int init_rand_data() { - int32_t code = -1; - g_randint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randbigint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randsmallint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randtinyint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbool_buff = calloc(1, BOOL_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randbool_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randfloat_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_rand_current_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_rand_phase_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randdouble_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randuint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randuint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randutinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randutinyint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randusmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randusmallint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randubigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randubigint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randint = calloc(1, sizeof(int32_t) * g_args.prepared_rand); - if (NULL == g_randint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randuint = calloc(1, sizeof(uint32_t) * g_args.prepared_rand); - if (NULL == g_randuint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbigint = calloc(1, sizeof(int64_t) * g_args.prepared_rand); - if (NULL == g_randbigint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randubigint = calloc(1, sizeof(uint64_t) * g_args.prepared_rand); - if (NULL == g_randubigint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randfloat = calloc(1, sizeof(float) * g_args.prepared_rand); - if (NULL == g_randfloat) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randdouble = calloc(1, sizeof(double) * g_args.prepared_rand); - if (NULL == g_randdouble) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - - for (int i = 0; i < g_args.prepared_rand; i++) { - g_randint[i] = (int)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randuint[i] = (int)(taosRandom()); - sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", g_randint[i]); - sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d", - 215 + g_randint[i] % 10); - - sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s", - ((g_randint[i] % 2) & 1) ? "true" : "false"); - sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randint[i] % 32768); - sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randint[i] % 128); - sprintf(g_randuint_buff + i * INT_BUFF_LEN, "%d", g_randuint[i]); - sprintf(g_randusmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randuint[i] % 65535); - sprintf(g_randutinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randuint[i] % 255); - - g_randbigint[i] = (int64_t)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randubigint[i] = (uint64_t)(taosRandom()); - sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%" PRId64 "", - g_randbigint[i]); - sprintf(g_randubigint_buff + i * BIGINT_BUFF_LEN, "%" PRId64 "", - g_randubigint[i]); - - g_randfloat[i] = - (float)(taosRandom() / 1000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f", g_randfloat[i]); - sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f", - (float)(9.8 + 0.04 * (g_randint[i] % 10) + - g_randfloat[i] / 1000000000)); - sprintf( - g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f", - (float)((115 + g_randint[i] % 10 + g_randfloat[i] / 1000000000) / - 360)); - - g_randdouble[i] = (double)(taosRandom() / 1000000.0) * - (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f", g_randdouble[i]); - } - code = 0; -end_init_rand_data: - return code; -} - -static void generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len, - char *buf) { - if (tableSeq % 2) { - tstrncpy(buf, "beijing", len); - } else { - tstrncpy(buf, "shanghai", len); - } - // rand_string(buf, stbInfo->tags[i].dataLen); -} - -int generateTagValuesForStb(SSuperTable *stbInfo, int64_t tableSeq, - char *tagsValBuf) { - int dataLen = 0; - dataLen += snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", - strlen("binary"))) || - (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", - strlen("nchar")))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - - int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; - char * buf = (char *)calloc(1, tagBufLen); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - generateBinaryNCharTagValues(tableSeq, tagBufLen, buf); - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "\'%s\',", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", - strlen("int"))) { - if ((g_args.demo_mode) && (i == 0)) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", (tableSeq % 10) + 1); - } else { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", tableSeq); - } - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", - strlen("bigint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", - strlen("float"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", - strlen("double"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", - strlen("smallint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", - strlen("tinyint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", - strlen("bool"))) { - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "%d,", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", - strlen("timestamp"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_ubigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "utinyint", - strlen("utinyint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_utinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "usmallint", - strlen("usmallint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_usmallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "uint", - strlen("uint"))) { - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "%d,", rand_uint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "ubigint", - strlen("ubigint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_ubigint()); - } else { - errorPrint("unsupport data type: %s\n", stbInfo->tags[i].dataType); - return -1; - } - } - - dataLen -= 1; - dataLen += snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return 0; -} - -static int readTagFromCsvFileToMem(SSuperTable *stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(stbInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", stbInfo->tagsFile, - strerror(errno)); - return -1; - } - - if (stbInfo->tagDataBuf) { - free(stbInfo->tagDataBuf); - stbInfo->tagDataBuf = NULL; - } - - int tagCount = MAX_SAMPLES; - int count = 0; - char *tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while ((readLen = tgetline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = - realloc(tagDataBuf, - (size_t)(tagCount * 1.5 * stbInfo->lenOfTagOfOneRow)); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount * 1.5); - memset( - tagDataBuf + count * stbInfo->lenOfTagOfOneRow, 0, - (size_t)((tagCount - count) * stbInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", - stbInfo->tagsFile); - tmfree(tagDataBuf); - free(line); - fclose(fp); - return -1; - } - } - } - - stbInfo->tagDataBuf = tagDataBuf; - stbInfo->tagSampleCount = count; - - free(line); - fclose(fp); - return 0; -} - -static void getAndSetRowsFromCsvFile(SSuperTable *stbInfo) { - FILE *fp = fopen(stbInfo->sampleFile, "r"); - int line_count = 0; - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - return; - } - char *buf = calloc(1, stbInfo->maxSqlLen); - if (buf == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return; - } - - while (fgets(buf, (int)stbInfo->maxSqlLen, fp)) { - line_count++; - } - fclose(fp); - tmfree(buf); - stbInfo->insertRows = line_count; -} - -static int generateSampleFromCsvForStb(SSuperTable *stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - FILE *fp = fopen(stbInfo->sampleFile, "r"); - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - return -1; - } - while (1) { - readLen = tgetline(&line, &n, fp); - if (-1 == readLen) { - if (0 != fseek(fp, 0, SEEK_SET)) { - errorPrint("Failed to fseek file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - fclose(fp); - return -1; - } - continue; - } - - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - if (readLen > stbInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%" PRIu64 - "], so discard this row\n", - (int32_t)readLen, stbInfo->lenOfOneRow); - continue; - } - - memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, - readLen); - getRows++; - - if (getRows == MAX_SAMPLES) { - break; - } - } - - fclose(fp); - tmfree(line); - return 0; -} - -int prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { - return -1; - } - } - } - } - - return 0; -} - -static int getRowDataFromSample(char *dataBuf, int64_t maxLen, - int64_t timestamp, SSuperTable *stbInfo, - int64_t *sampleUsePos) { - if ((*sampleUsePos) == MAX_SAMPLES) { - *sampleUsePos = 0; - } - - int dataLen = 0; - if (stbInfo->useSampleTs) { - dataLen += snprintf( - dataBuf + dataLen, maxLen - dataLen, "(%s", - stbInfo->sampleDataBuf + stbInfo->lenOfOneRow * (*sampleUsePos)); - } else { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%" PRId64 ", ", timestamp); - dataLen += snprintf( - dataBuf + dataLen, maxLen - dataLen, "%s", - stbInfo->sampleDataBuf + stbInfo->lenOfOneRow * (*sampleUsePos)); - } - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - (*sampleUsePos)++; - - return dataLen; -} - -int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf, - int64_t remainderBufLen, int64_t timestamp) { - int64_t dataLen = 0; - char * pstr = recBuf; - int64_t maxLen = MAX_DATA_SIZE; - int tmpLen; - - dataLen += - snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 "", timestamp); - - for (int i = 0; i < stbInfo->columnCount; i++) { - tstrncpy(pstr + dataLen, ",", 2); - dataLen += 1; - - if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) || - (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - - if ((stbInfo->columns[i].dataLen + 1) > - /* need count 3 extra chars \', \', and , */ - (remainderBufLen - dataLen - 3)) { - return 0; - } - char *buf = (char *)calloc(stbInfo->columns[i].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += - snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); - tmfree(buf); - - } else { - char *tmp = NULL; - switch (stbInfo->columns[i].data_type) { - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (i == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UINT: - tmp = rand_uint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BIGINT: - tmp = rand_bigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - tmp = rand_ubigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (i == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, FLOAT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmp = rand_double_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, DOUBLE_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - tmp = rand_smallint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - tmp = rand_usmallint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TINYINT: - tmp = rand_tinyint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - tmp = rand_utinyint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BOOL: - tmp = rand_bool_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BOOL_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmp = rand_bigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Not support data type: %s\n", - stbInfo->columns[i].dataType); - exit(EXIT_FAILURE); - } - if (tmp) { - dataLen += tmpLen; - } - } - - if (dataLen > (remainderBufLen - (128))) return 0; - } - - dataLen += snprintf(pstr + dataLen, 2, ")"); - - verbosePrint("%s() LN%d, dataLen:%" PRId64 "\n", __func__, __LINE__, - dataLen); - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return strlen(recBuf); -} - -static int64_t generateData(char *recBuf, char *data_type, int64_t timestamp, - int lenOfBinary) { - memset(recBuf, 0, MAX_DATA_SIZE); - char *pstr = recBuf; - pstr += sprintf(pstr, "(%" PRId64 "", timestamp); - - int columnCount = g_args.columnCount; - - bool b; - char *s; - for (int i = 0; i < columnCount; i++) { - switch (data_type[i]) { - case TSDB_DATA_TYPE_TINYINT: - pstr += sprintf(pstr, ",%d", rand_tinyint()); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pstr += sprintf(pstr, ",%d", rand_smallint()); - break; - - case TSDB_DATA_TYPE_INT: - pstr += sprintf(pstr, ",%d", rand_int()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, ",%" PRId64 "", rand_bigint()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, ",%" PRId64 "", rand_bigint()); - break; - - case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, ",%10.4f", rand_float()); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, ",%20.8f", rand_double()); - break; - - case TSDB_DATA_TYPE_BOOL: - b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - s = calloc(1, lenOfBinary + 1); - if (NULL == s) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pstr += sprintf(pstr, ",%d", rand_utinyint()); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pstr += sprintf(pstr, ",%d", rand_usmallint()); - break; - - case TSDB_DATA_TYPE_UINT: - pstr += sprintf(pstr, ",%d", rand_uint()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pstr += sprintf(pstr, ",%" PRId64 "", rand_ubigint()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Unknown data type %d\n", data_type[i]); - return -1; - } - - if (strlen(recBuf) > MAX_DATA_SIZE) { - errorPrint("%s", "column length too long, abort\n"); - return -1; - } - } - - pstr += sprintf(pstr, ")"); - - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return (int32_t)strlen(recBuf); -} - -static int generateSampleFromRand(char *sampleDataBuf, uint64_t lenOfOneRow, - int columnCount, StrColumn *columns) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - - char *buff = calloc(lenOfOneRow, 1); - if (NULL == buff) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - for (int i = 0; i < MAX_SAMPLES; i++) { - uint64_t pos = 0; - memset(buff, 0, lenOfOneRow); - - for (int c = 0; c < columnCount; c++) { - char *tmp = NULL; - - uint32_t dataLen; - char data_type = - (columns) ? (columns[c].data_type) : g_args.data_type[c]; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - dataLen = (columns) ? columns[c].dataLen : g_args.binwidth; - rand_string(data, dataLen); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_NCHAR: - dataLen = (columns) ? columns[c].dataLen : g_args.binwidth; - rand_string(data, dataLen - 1); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (c == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_UINT: - pos += sprintf(buff + pos, "%s,", rand_uint_str()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pos += sprintf(buff + pos, "%s,", rand_ubigint_str()); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (c == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pos += sprintf(buff + pos, "%s,", rand_double_str()); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pos += sprintf(buff + pos, "%s,", rand_smallint_str()); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pos += sprintf(buff + pos, "%s,", rand_usmallint_str()); - break; - - case TSDB_DATA_TYPE_TINYINT: - pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pos += sprintf(buff + pos, "%s,", rand_utinyint_str()); - break; - - case TSDB_DATA_TYPE_BOOL: - pos += sprintf(buff + pos, "%s,", rand_bool_str()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint( - "%s() LN%d, Unknown data type %s\n", __func__, __LINE__, - (columns) ? (columns[c].dataType) : g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - *(buff + pos - 1) = 0; - memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); - } - - free(buff); - return 0; -} - -static int generateSampleFromRandForNtb() { - return generateSampleFromRand(g_sampleDataBuf, g_args.lenOfOneRow, - g_args.columnCount, NULL); -} - -static int generateSampleFromRandForStb(SSuperTable *stbInfo) { - return generateSampleFromRand(stbInfo->sampleDataBuf, stbInfo->lenOfOneRow, - stbInfo->columnCount, stbInfo->columns); -} - -int prepareSampleForNtb() { - g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == g_sampleDataBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - return generateSampleFromRandForNtb(); -} - -int prepareSampleForStb(SSuperTable *stbInfo) { - stbInfo->sampleDataBuf = calloc(stbInfo->lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == stbInfo->sampleDataBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - int ret; - if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { - if (stbInfo->useSampleTs) { - getAndSetRowsFromCsvFile(stbInfo); - } - ret = generateSampleFromCsvForStb(stbInfo); - } else { - ret = generateSampleFromRandForStb(stbInfo); - } - - if (0 != ret) { - errorPrint("read sample from %s file failed.\n", stbInfo->sampleFile); - tmfree(stbInfo->sampleDataBuf); - stbInfo->sampleDataBuf = NULL; - return -1; - } - - return 0; -} - -int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio, - int disorderRange) { - int64_t randTail = timeStampStep * seq; - if (disorderRatio > 0) { - int rand_num = taosRandom() % 100; - if (rand_num < disorderRatio) { - randTail = (randTail + (taosRandom() % disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %" PRId64 "\n", randTail); - } - } - - return randTail; -} - -static int32_t generateDataTailWithoutStb( - uint32_t batch, char *buffer, int64_t remainderBufLen, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, - /* int64_t *pSamplePos, */ int64_t *dataLen) { - uint64_t len = 0; - char * pstr = buffer; - - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - - int32_t k = 0; - for (k = 0; k < batch;) { - char *data = pstr; - memset(data, 0, MAX_DATA_SIZE); - - int64_t retLen = 0; - - char *data_type = g_args.data_type; - int lenOfBinary = g_args.binwidth; - - if (g_args.disorderRatio) { - retLen = - generateData(data, data_type, - startTime + getTSRandTail(g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); - } else { - retLen = generateData(data, data_type, - startTime + g_args.timestamp_step * k, - lenOfBinary); - } - - if (len > remainderBufLen) break; - - pstr += retLen; - k++; - len += retLen; - remainderBufLen -= retLen; - - verbosePrint("%s() LN%d len=%" PRIu64 " k=%d \nbuffer=%s\n", __func__, - __LINE__, len, k, buffer); - - recordFrom++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - -static int32_t generateStbDataTail(SSuperTable *stbInfo, uint32_t batch, - char *buffer, int64_t remainderBufLen, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *dataLen) { - uint64_t len = 0; - - char *pstr = buffer; - - bool tsRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; - } else { - tsRand = false; - } - verbosePrint("%s() LN%d batch=%u buflen=%" PRId64 "\n", __func__, __LINE__, - batch, remainderBufLen); - - int32_t k; - for (k = 0; k < batch;) { - char *data = pstr; - - int64_t lenOfRow = 0; - - if (tsRand) { - if (stbInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData( - stbInfo, data, remainderBufLen, - startTime + getTSRandTail(stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange)); - } else { - lenOfRow = - generateStbRowData(stbInfo, data, remainderBufLen, - startTime + stbInfo->timeStampStep * k); - } - } else { - lenOfRow = getRowDataFromSample( - data, - (remainderBufLen < MAX_DATA_SIZE) ? remainderBufLen - : MAX_DATA_SIZE, - startTime + stbInfo->timeStampStep * k, stbInfo, pSamplePos); - } - - if (lenOfRow == 0) { - data[0] = '\0'; - break; - } - if ((lenOfRow + 1) > remainderBufLen) { - break; - } - - pstr += lenOfRow; - k++; - len += lenOfRow; - remainderBufLen -= lenOfRow; - - verbosePrint("%s() LN%d len=%" PRIu64 " k=%u \nbuffer=%s\n", __func__, - __LINE__, len, k, buffer); - - recordFrom++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - -static int generateSQLHeadWithoutStb(char *tableName, char *dbName, - char *buffer, int remainderBufLen) { - int len; - - char headBuf[HEAD_BUFF_LEN]; - - len = snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - - if (len > remainderBufLen) return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -static int generateStbSQLHead(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int remainderBufLen) { - int len; - - char headBuf[HEAD_BUFF_LEN]; - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - if (NULL == tagsValBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, tableSeq, tagsValBuf)) { - tmfree(tagsValBuf); - return -1; - } - } else { - snprintf( - tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * - (tableSeq % stbInfo->tagSampleCount)); - } - - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s using %s.%s TAGS%s values", - dbName, tableName, dbName, stbInfo->stbName, tagsValBuf); - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - } else { - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - } - - if (len > remainderBufLen) return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -int32_t generateStbInterlaceData(threadInfo *pThreadInfo, char *tableName, - uint32_t batchPerTbl, uint64_t i, - uint32_t batchPerTblTimes, uint64_t tableSeq, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) { - char *pstr = buffer; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int headLen = - generateStbSQLHead(stbInfo, tableName, tableSeq, pThreadInfo->db_name, - pstr, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%" PRIu64 " buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - verbosePrint("[%d] %s() LN%d i=%" PRIu64 - " batchPerTblTimes=%u batchPerTbl = %u\n", - pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, - batchPerTbl); - - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - - int32_t k = generateStbDataTail(stbInfo, batchPerTbl, pstr, - *pRemainderBufLen, insertRows, 0, startTime, - &(pThreadInfo->samplePos), &dataLen); - - if (k == batchPerTbl) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint( - "%s() LN%d, generated data tail: %u, not equal batch per table: " - "%u\n", - __func__, __LINE__, k, batchPerTbl); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -int64_t generateInterlaceDataWithoutStb(char *tableName, uint32_t batch, - uint64_t tableSeq, char *dbName, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) { - char *pstr = buffer; - - int headLen = generateSQLHeadWithoutStb(tableName, dbName, pstr, - (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - int32_t k = generateDataTailWithoutStb(batch, pstr, *pRemainderBufLen, - insertRows, 0, startTime, &dataLen); - - if (k == batch) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint( - "%s() LN%d, generated data tail: %d, not equal batch per table: " - "%u\n", - __func__, __LINE__, k, batch); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, char data_type, - int32_t dataLen, int32_t timePrec, - char *value) { - int32_t * bind_int; - uint32_t *bind_uint; - int64_t * bind_bigint; - uint64_t *bind_ubigint; - float * bind_float; - double * bind_double; - int8_t * bind_bool; - int64_t * bind_ts2; - int16_t * bind_smallint; - uint16_t *bind_usmallint; - int8_t * bind_tinyint; - uint8_t * bind_utinyint; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary; - - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - bind_binary = calloc(1, strlen(value) + 1); - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - bind_binary = calloc(1, dataLen + 1); - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } - - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NCHAR: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar; - - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - bind_nchar = calloc(1, strlen(value) + 1); - strncpy(bind_nchar, value, strlen(value)); - } else { - bind_nchar = calloc(1, dataLen + 1); - rand_string(bind_nchar, dataLen); - } - - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_INT: - bind_int = calloc(1, sizeof(int32_t)); - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UINT: - bind_uint = malloc(sizeof(uint32_t)); - - if (value) { - *bind_uint = atoi(value); - } else { - *bind_uint = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_UINT; - bind->buffer_length = sizeof(uint32_t); - bind->buffer = bind_uint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BIGINT: - bind_bigint = malloc(sizeof(int64_t)); - - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UBIGINT: - bind_ubigint = malloc(sizeof(uint64_t)); - - if (value) { - *bind_ubigint = atoll(value); - } else { - *bind_ubigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; - bind->buffer_length = sizeof(uint64_t); - bind->buffer = bind_ubigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_FLOAT: - bind_float = malloc(sizeof(float)); - - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_DOUBLE: - bind_double = malloc(sizeof(double)); - - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_SMALLINT: - bind_smallint = malloc(sizeof(int16_t)); - - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_USMALLINT: - bind_usmallint = malloc(sizeof(uint16_t)); - - if (value) { - *bind_usmallint = (uint16_t)atoi(value); - } else { - *bind_usmallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(uint16_t); - bind->buffer = bind_usmallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TINYINT: - bind_tinyint = malloc(sizeof(int8_t)); - - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UTINYINT: - bind_utinyint = malloc(sizeof(uint8_t)); - - if (value) { - *bind_utinyint = (int8_t)atoi(value); - } else { - *bind_utinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; - bind->buffer_length = sizeof(uint8_t); - bind->buffer = bind_utinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BOOL: - bind_bool = malloc(sizeof(int8_t)); - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; - } else { - *bind_bool = false; - } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - bind_ts2 = malloc(sizeof(int64_t)); - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while (value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != - taosParseTime(value, &tmpEpoch, (int32_t)strlen(value), - timePrec, 0)) { - free(bind_ts2); - errorPrint("Input %s, time format error!\n", value); - return -1; - } - *bind_ts2 = tmpEpoch; - } else { - *bind_ts2 = atoll(value); - } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Not support data type: %d\n", data_type); - return -1; - } - - return 0; -} - -int32_t prepareStmtWithoutStb(threadInfo *pThreadInfo, char *tableName, - uint32_t batch, int64_t insertRows, - int64_t recordFrom, int64_t startTime) { - TAOS_STMT *stmt = pThreadInfo->stmt; - int ret = taos_stmt_set_tbname(stmt, tableName); - if (ret != 0) { - errorPrint( - "failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: " - "%s\n", - tableName, ret, taos_stmt_errstr(stmt)); - return ret; - } - - char *data_type = g_args.data_type; - - char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); - if (bindArray == NULL) { - errorPrint("Failed to allocate %d bind params\n", - (g_args.columnCount + 1)); - return -1; - } - - int32_t k = 0; - for (k = 0; k < batch;) { - /* columnCount + 1 (ts) */ - - TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - - int64_t *bind_ts = pThreadInfo->bind_ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - - if (g_args.disorderRatio) { - *bind_ts = startTime + getTSRandTail(g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange); - } else { - *bind_ts = startTime + g_args.timestamp_step * k; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - for (int i = 0; i < g_args.columnCount; i++) { - bind = (TAOS_BIND *)((char *)bindArray + - (sizeof(TAOS_BIND) * (i + 1))); - if (-1 == - prepareStmtBindArrayByType(bind, data_type[i], g_args.binwidth, - pThreadInfo->time_precision, NULL)) { - free(bindArray); - return -1; - } - } - if (taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint("taos_stmt_bind_param() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - break; - } - // if msg > 3MB, break - if (taos_stmt_add_batch(stmt)) { - errorPrint("taos_stmt_add_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - break; - } - - k++; - recordFrom++; - if (recordFrom >= insertRows) { - break; - } - } - - free(bindArray); - return k; -} - -int32_t prepareStbStmtBindTag(char *bindArray, SSuperTable *stbInfo, - char *tagsVal, int32_t timePrec) { - TAOS_BIND *tag; - - for (int t = 0; t < stbInfo->tagCount; t++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if (prepareStmtBindArrayByType(tag, stbInfo->tags[t].data_type, - stbInfo->tags[t].dataLen, timePrec, - NULL)) { - return -1; - } - } - - return 0; -} - -int parseSamplefileToStmtBatch(SSuperTable *stbInfo) { - int32_t columnCount = (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - char * sampleBindBatchArray = NULL; - - if (stbInfo) { - stbInfo->sampleBindBatchArray = - calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = stbInfo->sampleBindBatchArray; - } else { - g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = g_sampleBindBatchArray; - } - - for (int c = 0; c < columnCount; c++) { - char data_type = - (stbInfo) ? stbInfo->columns[c].data_type : g_args.data_type[c]; - - char *tmpP = NULL; - - switch (data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - tmpP = calloc(1, sizeof(int32_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - tmpP = calloc(1, sizeof(char) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BOOL: - tmpP = calloc(1, sizeof(char) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_FLOAT: - tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = calloc( - 1, MAX_SAMPLES * (((stbInfo) ? stbInfo->columns[c].dataLen - : g_args.binwidth) + - 1)); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - default: - errorPrint("Unknown data type: %s\n", - (stbInfo) ? stbInfo->columns[c].dataType - : g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - char *sampleDataBuf = (stbInfo) ? stbInfo->sampleDataBuf : g_sampleDataBuf; - int64_t lenOfOneRow = (stbInfo) ? stbInfo->lenOfOneRow : g_args.lenOfOneRow; - - for (int i = 0; i < MAX_SAMPLES; i++) { - int cursor = 0; - - for (int c = 0; c < columnCount; c++) { - char data_type = - (stbInfo) ? stbInfo->columns[c].data_type : g_args.data_type[c]; - char *restStr = sampleDataBuf + lenOfOneRow * i + cursor; - int lengthOfRest = (int)strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index++) { - if (restStr[index] == ',') { - break; - } - } - - char *tmpStr = calloc(1, index + 1); - if (NULL == tmpStr) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - strncpy(tmpStr, restStr, index); - cursor += index + 1; // skip ',' too - char *tmpP; - - switch (data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *((int32_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int32_t) * i)) = atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_FLOAT: - *(float *)(((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(float) * i)) = (float)atof(tmpStr); - break; - - case TSDB_DATA_TYPE_DOUBLE: - *(double *)(((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(double) * i)) = atof(tmpStr); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *((int8_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int8_t) * i)) = (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *((int16_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int16_t) * i)) = (int16_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *((int64_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int64_t) * i)) = (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BOOL: - *((int8_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int8_t) * i)) = (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - *((int64_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int64_t) * i)) = (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = (char *)(*(uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c)); - strcpy(tmpP + i * (((stbInfo) ? stbInfo->columns[c].dataLen - : g_args.binwidth)), - tmpStr); - break; - - default: - break; - } - - free(tmpStr); - } - } - - return 0; -} - -static int parseSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, - uint32_t timePrec, uint32_t batch) { - uint32_t columnCount = - (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - - pThreadInfo->bind_ts_array = calloc(1, sizeof(int64_t) * batch); - if (NULL == pThreadInfo->bind_ts_array) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - pThreadInfo->bindParams = - calloc(1, sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); - if (NULL == pThreadInfo->bindParams) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - pThreadInfo->is_null = calloc(1, batch); - if (NULL == pThreadInfo->is_null) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - return 0; -} - -int parseStbSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec, - uint32_t batch) { - return parseSampleToStmtBatchForThread(pThreadInfo, stbInfo, timePrec, - batch); -} - -int parseNtbSampleToStmtBatchForThread(threadInfo *pThreadInfo, - uint32_t timePrec, uint32_t batch) { - return parseSampleToStmtBatchForThread(pThreadInfo, NULL, timePrec, batch); -} - -int32_t generateStbProgressiveData(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen) { - char *pstr = buffer; - - memset(pstr, 0, *pRemainderBufLen); - - int64_t headLen = generateStbSQLHead(stbInfo, tableName, tableSeq, dbName, - buffer, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateStbDataTail(stbInfo, g_args.reqPerReq, pstr, - *pRemainderBufLen, insertRows, recordFrom, - startTime, pSamplePos, &dataLen); -} - -int32_t generateProgressiveDataWithoutStb( - char *tableName, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pRemainderBufLen) { - char *pstr = buffer; - - memset(buffer, 0, *pRemainderBufLen); - - int64_t headLen = generateSQLHeadWithoutStb( - tableName, pThreadInfo->db_name, buffer, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateDataTailWithoutStb(g_args.reqPerReq, pstr, *pRemainderBufLen, - insertRows, recordFrom, startTime, - /*pSamplePos, */ &dataLen); -} - -int32_t generateSmlConstPart(char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq) { - int64_t dataLen = 0; - uint64_t length = stbInfo->lenOfOneRow; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) { - dataLen += - snprintf(sml + dataLen, length - dataLen, "%s,id=%s%" PRIu64 "", - stbInfo->stbName, stbInfo->childTblPrefix, - tbSeq + pThreadInfo->start_table_from); - } else if (stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - dataLen += snprintf(sml + dataLen, length - dataLen, "id=%s%" PRIu64 "", - stbInfo->childTblPrefix, - tbSeq + pThreadInfo->start_table_from); - } else { - errorPrint("unsupport schemaless protocol (%d)\n", - stbInfo->lineProtocol); - return -1; - } - - for (int j = 0; j < stbInfo->tagCount; j++) { - tstrncpy(sml + dataLen, - (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) ? "," : " ", - 2); - dataLen += 1; - switch (stbInfo->tags[j].data_type) { - case TSDB_DATA_TYPE_TIMESTAMP: - errorPrint("Does not support data type %s as tag\n", - stbInfo->tags[j].dataType); - return -1; - case TSDB_DATA_TYPE_BOOL: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_bool_str()); - break; - case TSDB_DATA_TYPE_TINYINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_tinyint_str()); - break; - case TSDB_DATA_TYPE_UTINYINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_utinyint_str()); - break; - case TSDB_DATA_TYPE_SMALLINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_smallint_str()); - break; - case TSDB_DATA_TYPE_USMALLINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_usmallint_str()); - break; - case TSDB_DATA_TYPE_INT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_int_str()); - break; - case TSDB_DATA_TYPE_UINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_uint_str()); - break; - case TSDB_DATA_TYPE_BIGINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_bigint_str()); - break; - case TSDB_DATA_TYPE_UBIGINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_ubigint_str()); - break; - case TSDB_DATA_TYPE_FLOAT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_float_str()); - break; - case TSDB_DATA_TYPE_DOUBLE: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_double_str()); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->tags[j].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->tags[j].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->tags[j].dataLen); - dataLen += - snprintf(sml + dataLen, length - dataLen, "t%d=%s", j, buf); - tmfree(buf); - break; - - default: - errorPrint("Unsupport data type %s\n", - stbInfo->tags[j].dataType); - return -1; - } - } - return 0; -} - -int32_t generateSmlMutablePart(char *line, char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp) { - int dataLen = 0; - uint64_t buffer = stbInfo->lenOfOneRow; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) { - dataLen = snprintf(line, buffer, "%s ", sml); - for (uint32_t c = 0; c < stbInfo->columnCount; c++) { - if (c != 0) { - tstrncpy(line + dataLen, ",", 2); - dataLen += 1; - } - switch (stbInfo->columns[c].data_type) { - case TSDB_DATA_TYPE_TIMESTAMP: - errorPrint("Does not support data type %s as tag\n", - stbInfo->columns[c].dataType); - return -1; - case TSDB_DATA_TYPE_BOOL: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%s", c, rand_bool_str()); - break; - case TSDB_DATA_TYPE_TINYINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si8", c, rand_tinyint_str()); - break; - case TSDB_DATA_TYPE_UTINYINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su8", c, rand_utinyint_str()); - break; - case TSDB_DATA_TYPE_SMALLINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si16", c, rand_smallint_str()); - break; - case TSDB_DATA_TYPE_USMALLINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su16", c, rand_usmallint_str()); - break; - case TSDB_DATA_TYPE_INT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si32", c, rand_int_str()); - break; - case TSDB_DATA_TYPE_UINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su32", c, rand_uint_str()); - break; - case TSDB_DATA_TYPE_BIGINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si64", c, rand_bigint_str()); - break; - case TSDB_DATA_TYPE_UBIGINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su64", c, rand_ubigint_str()); - break; - case TSDB_DATA_TYPE_FLOAT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%sf32", c, rand_float_str()); - break; - case TSDB_DATA_TYPE_DOUBLE: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%sf64", c, rand_double_str()); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[c].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( - "binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = - (char *)calloc(stbInfo->columns[c].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[c].dataLen); - if (stbInfo->columns[c].data_type == - TSDB_DATA_TYPE_BINARY) { - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=\"%s\"", c, buf); - } else { - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=L\"%s\"", c, buf); - } - tmfree(buf); - break; - default: - errorPrint("Unsupport data type %s\n", - stbInfo->columns[c].dataType); - return -1; - } - } - dataLen += snprintf(line + dataLen, buffer - dataLen, " %" PRId64 "", - timestamp); - return 0; - } else if (stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - switch (stbInfo->columns[0].data_type) { - case TSDB_DATA_TYPE_BOOL: - snprintf(line, buffer, "%s %" PRId64 " %s %s", stbInfo->stbName, - timestamp, rand_bool_str(), sml); - break; - case TSDB_DATA_TYPE_TINYINT: - snprintf(line, buffer, "%s %" PRId64 " %si8 %s", - stbInfo->stbName, timestamp, rand_tinyint_str(), sml); - break; - case TSDB_DATA_TYPE_UTINYINT: - snprintf(line, buffer, "%s %" PRId64 " %su8 %s", - stbInfo->stbName, timestamp, rand_utinyint_str(), sml); - break; - case TSDB_DATA_TYPE_SMALLINT: - snprintf(line, buffer, "%s %" PRId64 " %si16 %s", - stbInfo->stbName, timestamp, rand_smallint_str(), sml); - break; - case TSDB_DATA_TYPE_USMALLINT: - snprintf(line, buffer, "%s %" PRId64 " %su16 %s", - stbInfo->stbName, timestamp, rand_usmallint_str(), - sml); - break; - case TSDB_DATA_TYPE_INT: - snprintf(line, buffer, "%s %" PRId64 " %si32 %s", - stbInfo->stbName, timestamp, rand_int_str(), sml); - break; - case TSDB_DATA_TYPE_UINT: - snprintf(line, buffer, "%s %" PRId64 " %su32 %s", - stbInfo->stbName, timestamp, rand_uint_str(), sml); - break; - case TSDB_DATA_TYPE_BIGINT: - snprintf(line, buffer, "%s %" PRId64 " %si64 %s", - stbInfo->stbName, timestamp, rand_bigint_str(), sml); - break; - case TSDB_DATA_TYPE_UBIGINT: - snprintf(line, buffer, "%s %" PRId64 " %su64 %s", - stbInfo->stbName, timestamp, rand_ubigint_str(), sml); - break; - case TSDB_DATA_TYPE_FLOAT: - snprintf(line, buffer, "%s %" PRId64 " %sf32 %s", - stbInfo->stbName, timestamp, rand_float_str(), sml); - break; - case TSDB_DATA_TYPE_DOUBLE: - snprintf(line, buffer, "%s %" PRId64 " %sf64 %s", - stbInfo->stbName, timestamp, rand_double_str(), sml); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[0].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->columns[0].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[0].dataLen); - if (stbInfo->columns[0].data_type == TSDB_DATA_TYPE_BINARY) { - snprintf(line, buffer, "%s %" PRId64 " \"%s\" %s", - stbInfo->stbName, timestamp, buf, sml); - } else { - snprintf(line, buffer, "%s %" PRId64 " L\"%s\" %s", - stbInfo->stbName, timestamp, buf, sml); - } - tmfree(buf); - break; - default: - errorPrint("Unsupport data type %s\n", - stbInfo->columns[0].dataType); - return -1; - } - return 0; - } else { - errorPrint("unsupport schemaless protocol(%d)\n", - stbInfo->lineProtocol); - return -1; - } -} - -int32_t generateSmlJsonTags(cJSON *tagsList, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq) { - cJSON *tags = cJSON_CreateObject(); - char * tbName = calloc(1, TSDB_TABLE_NAME_LEN); - assert(tbName); - snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64 "", - stbInfo->childTblPrefix, tbSeq + pThreadInfo->start_table_from); - cJSON_AddStringToObject(tags, "id", tbName); - char *tagName = calloc(1, TSDB_MAX_TAGS); - assert(tagName); - for (int i = 0; i < stbInfo->tagCount; i++) { - cJSON *tag = cJSON_CreateObject(); - snprintf(tagName, TSDB_MAX_TAGS, "t%d", i); - switch (stbInfo->tags[i].data_type) { - case TSDB_DATA_TYPE_BOOL: - cJSON_AddNumberToObject(tag, "value", rand_bool()); - cJSON_AddStringToObject(tag, "type", "bool"); - break; - case TSDB_DATA_TYPE_TINYINT: - cJSON_AddNumberToObject(tag, "value", rand_tinyint()); - cJSON_AddStringToObject(tag, "type", "tinyint"); - break; - case TSDB_DATA_TYPE_SMALLINT: - cJSON_AddNumberToObject(tag, "value", rand_smallint()); - cJSON_AddStringToObject(tag, "type", "smallint"); - break; - case TSDB_DATA_TYPE_INT: - cJSON_AddNumberToObject(tag, "value", rand_int()); - cJSON_AddStringToObject(tag, "type", "int"); - break; - case TSDB_DATA_TYPE_BIGINT: - cJSON_AddNumberToObject(tag, "value", (double)rand_bigint()); - cJSON_AddStringToObject(tag, "type", "bigint"); - break; - case TSDB_DATA_TYPE_FLOAT: - cJSON_AddNumberToObject(tag, "value", rand_float()); - cJSON_AddStringToObject(tag, "type", "float"); - break; - case TSDB_DATA_TYPE_DOUBLE: - cJSON_AddNumberToObject(tag, "value", rand_double()); - cJSON_AddStringToObject(tag, "type", "double"); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->tags[i].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->tags[i].dataLen); - if (stbInfo->tags[i].data_type == TSDB_DATA_TYPE_BINARY) { - cJSON_AddStringToObject(tag, "value", buf); - cJSON_AddStringToObject(tag, "type", "binary"); - } else { - cJSON_AddStringToObject(tag, "value", buf); - cJSON_AddStringToObject(tag, "type", "nchar"); - } - tmfree(buf); - break; - default: - errorPrint( - "unsupport data type (%s) for schemaless json protocol\n", - stbInfo->tags[i].dataType); - return -1; - } - cJSON_AddItemToObject(tags, tagName, tag); - } - cJSON_AddItemToArray(tagsList, tags); - tmfree(tagName); - tmfree(tbName); - return 0; -} - -int32_t generateSmlJsonCols(cJSON *array, cJSON *tag, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp) { - cJSON *record = cJSON_CreateObject(); - cJSON *ts = cJSON_CreateObject(); - cJSON_AddNumberToObject(ts, "value", (double)timestamp); - if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_MILLI) { - cJSON_AddStringToObject(ts, "type", "ms"); - } else if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_MICRO) { - cJSON_AddStringToObject(ts, "type", "us"); - } else if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_NANO) { - cJSON_AddStringToObject(ts, "type", "ns"); - } else { - errorPrint("unsupport time precision %d\n", - pThreadInfo->time_precision); - return -1; - } - cJSON *value = cJSON_CreateObject(); - switch (stbInfo->columns[0].data_type) { - case TSDB_DATA_TYPE_BOOL: - cJSON_AddNumberToObject(value, "value", rand_bool()); - cJSON_AddStringToObject(value, "type", "bool"); - break; - case TSDB_DATA_TYPE_TINYINT: - cJSON_AddNumberToObject(value, "value", rand_tinyint()); - cJSON_AddStringToObject(value, "type", "tinyint"); - break; - case TSDB_DATA_TYPE_SMALLINT: - cJSON_AddNumberToObject(value, "value", rand_smallint()); - cJSON_AddStringToObject(value, "type", "smallint"); - break; - case TSDB_DATA_TYPE_INT: - cJSON_AddNumberToObject(value, "value", rand_int()); - cJSON_AddStringToObject(value, "type", "int"); - break; - case TSDB_DATA_TYPE_BIGINT: - cJSON_AddNumberToObject(value, "value", (double)rand_bigint()); - cJSON_AddStringToObject(value, "type", "bigint"); - break; - case TSDB_DATA_TYPE_FLOAT: - cJSON_AddNumberToObject(value, "value", rand_float()); - cJSON_AddStringToObject(value, "type", "float"); - break; - case TSDB_DATA_TYPE_DOUBLE: - cJSON_AddNumberToObject(value, "value", rand_double()); - cJSON_AddStringToObject(value, "type", "double"); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[0].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->columns[0].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[0].dataLen); - if (stbInfo->columns[0].data_type == TSDB_DATA_TYPE_BINARY) { - cJSON_AddStringToObject(value, "value", buf); - cJSON_AddStringToObject(value, "type", "binary"); - } else { - cJSON_AddStringToObject(value, "value", buf); - cJSON_AddStringToObject(value, "type", "nchar"); - } - break; - default: - errorPrint( - "unsupport data type (%s) for schemaless json protocol\n", - stbInfo->columns[0].dataType); - return -1; - } - cJSON_AddItemToObject(record, "timestamp", ts); - cJSON_AddItemToObject(record, "value", value); - cJSON_AddItemToObject(record, "tags", tag); - cJSON_AddStringToObject(record, "metric", stbInfo->stbName); - cJSON_AddItemToArray(array, record); - return 0; -} diff --git a/src/kit/taosdemo/src/demoInsert.c b/src/kit/taosdemo/src/demoInsert.c deleted file mode 100644 index 4dac128f33a3a9dc4723254f27378a98c90ebf1a..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoInsert.c +++ /dev/null @@ -1,3481 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "cJSON.h" -#include "demo.h" -#include "demoData.h" - -static int calcRowLen(SSuperTable *superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char *dataType = superTbls->columns[colIndex].dataType; - - switch (superTbls->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint("get error data type : %s\n", dataType); - exit(EXIT_FAILURE); - } - if (superTbls->iface == SML_IFACE) { - lenOfOneRow += SML_LINE_SQL_SYNTAX_OFFSET; - } - } - - superTbls->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char *dataType = superTbls->tags[tagIndex].dataType; - switch (superTbls->tags[tagIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_NCHAR: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BOOL: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - break; - case TSDB_DATA_TYPE_FLOAT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_DOUBLE: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - break; - default: - errorPrint("get error tag type : %s\n", dataType); - exit(EXIT_FAILURE); - } - if (superTbls->iface == SML_IFACE) { - lenOfOneRow += SML_LINE_SQL_SYNTAX_OFFSET; - } - } - - if (superTbls->iface == SML_IFACE) { - lenOfTagOfOneRow += - 2 * TSDB_TABLE_NAME_LEN * 2 + SML_LINE_SQL_SYNTAX_OFFSET; - superTbls->lenOfOneRow += lenOfTagOfOneRow; - } - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - return 0; -} - -static int getSuperTableFromServer(TAOS *taos, char *dbName, - SSuperTable *superTbls) { - char command[SQL_BUFF_LEN] = "\0"; - TAOS_RES *res; - TAOS_ROW row = NULL; - int count = 0; - - // get schema use cmd: describe superTblName; - snprintf(command, SQL_BUFF_LEN, "describe %s.%s", dbName, - superTbls->stbName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s, reason: %s\n", command, - taos_errstr(res)); - taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - tstrncpy(superTbls->tags[tagIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", - strlen("TINYINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", - strlen("SMALLINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->tags[tagIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->tags[tagIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + - 1); - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - tstrncpy(superTbls->tags[tagIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } - tagIndex++; - } else { - tstrncpy(superTbls->columns[columnIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_BIGINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_FLOAT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_DOUBLE; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_BINARY; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_NCHAR; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", - strlen("TINYINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", - strlen("SMALLINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->columns[columnIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->columns[columnIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + - 1); - - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - tstrncpy(superTbls->columns[columnIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } - - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); - return 0; -} - -static int createSuperTable(TAOS *taos, char *dbName, SSuperTable *superTbl, - char *command) { - char cols[COL_BUFFER_LEN] = "\0"; - int len = 0; - - int lenOfOneRow = 0; - - if (superTbl->columnCount == 0) { - errorPrint("super table column count is %d\n", superTbl->columnCount); - return -1; - } - - for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - switch (superTbl->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s(%d)", - colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s(%d)", - colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (colIndex == 1)) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", VOLTAGE INT"); - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "INT"); - } - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TINYINT"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BOOL"); - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (colIndex == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", CURRENT FLOAT"); - } else if (colIndex == 2) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", PHASE FLOAT"); - } - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "FLOAT"); - } - - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "DOUBLE"); - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TIMESTAMP"); - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UTINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TINYINT UNSIGNED"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_USMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT UNSIGNED"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "INT UNSIGNED"); - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UBIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT UNSIGNED"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - default: - taos_close(taos); - errorPrint("config error data type : %s\n", - superTbl->columns[colIndex].dataType); - return -1; - } - } - - superTbl->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp - - // save for creating child table - superTbl->colsOfCreateChildTable = - (char *)calloc(len + TIMESTAMP_BUFF_LEN, 1); - if (NULL == superTbl->colsOfCreateChildTable) { - taos_close(taos); - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - snprintf(superTbl->colsOfCreateChildTable, len + TIMESTAMP_BUFF_LEN, - "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - superTbl->colsOfCreateChildTable); - - if (superTbl->tagCount == 0) { - errorPrint("super table tag count is %d\n", superTbl->tagCount); - return -1; - } - - char tags[TSDB_MAX_TAGS_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { - char *dataType = superTbl->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - if ((g_args.demo_mode) && (tagIndex == 1)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "location BINARY(%d),", - superTbl->tags[tagIndex].dataLen); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s(%d),", tagIndex, "BINARY", - superTbl->tags[tagIndex].dataLen); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += - snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s(%d),", - tagIndex, "NCHAR", superTbl->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - if ((g_args.demo_mode) && (tagIndex == 0)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "groupId INT, "); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "INT"); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BIGINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "SMALLINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TINYINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BOOL"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "FLOAT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "DOUBLE"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "UTINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TINYINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "USMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "SMALLINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "UINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "INT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "UBIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BIGINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TIMESTAMP"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TIMESTAMP_BUFF_LEN; - } else { - taos_close(taos); - errorPrint("config error tag type : %s\n", dataType); - return -1; - } - } - - len -= 1; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); - - superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, - superTbl->escapeChar - ? "CREATE TABLE IF NOT EXISTS %s.`%s` (ts TIMESTAMP%s) TAGS %s" - : "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", - dbName, superTbl->stbName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint("create supertable %s failed!\n\n", superTbl->stbName); - return -1; - } - - debugPrint("create supertable %s success!\n\n", superTbl->stbName); - return 0; -} - -int createDatabasesAndStables(char *command) { - TAOS *taos = NULL; - int ret = 0; - taos = - taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - return -1; - } - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - return -1; - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - "CREATE DATABASE IF NOT EXISTS %s", - g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " BLOCKS %d", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " CACHE %d", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " DAYS %d", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " KEEP %d", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.quorum > 1) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " QUORUM %d", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " REPLICA %d", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " UPDATE %d", g_Dbs.db[i].dbCfg.update); - } - // if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, - // BUFFER_SIZE - dataLen, "tables %d ", - // g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " MINROWS %d", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " MAXROWS %d", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " COMP %d", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " wal %d", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.cacheLast > 0) { - dataLen += - snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " CACHELAST %d", g_Dbs.db[i].dbCfg.cacheLast); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - dataLen += - snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - errorPrint("\ncreate database %s failed!\n\n", - g_Dbs.db[i].dbName); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); - } - - debugPrint("%s() LN%d supertbl count:%" PRIu64 "\n", __func__, __LINE__, - g_Dbs.db[i].superTblCount); - - int validStbCount = 0; - - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].iface == SML_IFACE) { - goto skip; - } - - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].stbName); - ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); - - if ((ret != 0) || (g_Dbs.db[i].drop)) { - char *cmd = calloc(1, BUFFER_SIZE); - if (NULL == cmd) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j], cmd); - tmfree(cmd); - - if (0 != ret) { - errorPrint("create super table %" PRIu64 " failed!\n\n", j); - continue; - } - } else { - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); - if (0 != ret) { - errorPrint("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].stbName); - continue; - } - } - skip: - validStbCount++; - } - g_Dbs.db[i].superTblCount = validStbCount; - } - - taos_close(taos); - return 0; -} - -static void *createTable(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - setThreadName("createTable"); - - uint64_t lastPrintTime = taosGetTimestampMs(); - - int buff_len = BUFFER_SIZE; - - pThreadInfo->buffer = calloc(1, buff_len); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %" PRIu64 " to %" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->start_table_from, - pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - g_args.escapeChar - ? "CREATE TABLE IF NOT EXISTS %s.`%s%" PRIu64 "` %s;" - : "CREATE TABLE IF NOT EXISTS %s.%s%" PRIu64 " %s;", - pThreadInfo->db_name, g_args.tb_prefix, i, - pThreadInfo->cols); - batchNum++; - } else { - if (stbInfo == NULL) { - free(pThreadInfo->buffer); - errorPrint( - "%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - exit(EXIT_FAILURE); - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, buff_len - len, - "CREATE TABLE "); - } - - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - if (NULL == tagsValBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, i, tagsValBuf)) { - tmfree(tagsValBuf); - tmfree(pThreadInfo->buffer); - exit(EXIT_FAILURE); - } - } else { - snprintf(tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + - stbInfo->lenOfTagOfOneRow * - (i % stbInfo->tagSampleCount)); - } - len += snprintf( - pThreadInfo->buffer + len, buff_len - len, - stbInfo->escapeChar ? "if not exists %s.`%s%" PRIu64 - "` using %s.`%s` tags %s " - : "if not exists %s.%s%" PRIu64 - " using %s.%s tags %s ", - pThreadInfo->db_name, stbInfo->childTblPrefix, i, - pThreadInfo->db_name, stbInfo->stbName, tagsValBuf); - tmfree(tagsValBuf); - batchNum++; - if ((batchNum < stbInfo->batchCreateTableNum) && - ((buff_len - len) >= - (stbInfo->lenOfTagOfOneRow + EXTRA_SQL_LEN))) { - continue; - } - } - } - - len = 0; - - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec() failed. buffer:\n%s\n", - pThreadInfo->buffer); - free(pThreadInfo->buffer); - return NULL; - } - pThreadInfo->tables_created += batchNum; - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > PRINT_STAT_INTERVAL) { - printf("thread[%d] already create %" PRIu64 " - %" PRIu64 - " tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec() failed. buffer:\n%s\n", - pThreadInfo->buffer); - } - pThreadInfo->tables_created += batchNum; - } - free(pThreadInfo->buffer); - return NULL; -} - -int startMultiThreadCreateChildTable(char *cols, int threads, - uint64_t tableFrom, int64_t ntables, - char *db_name, SSuperTable *stbInfo) { - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - if (NULL == pids) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if (NULL == infos) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - return -1; - } - - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = (int)i; - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->stbInfo = stbInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - db_name, g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint("failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - free(pids); - free(infos); - return -1; - } - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pThreadInfo->tables_created = 0; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - - g_actualChildTables += pThreadInfo->tables_created; - } - - free(pids); - free(infos); - - return 0; -} - -int createChildTables() { - int32_t code = 0; - fprintf(stderr, "creating %" PRId64 " table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "creating %" PRId64 " table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - } - double start = (double)taosGetTimestampMs(); - char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; - int len; - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - // with super table - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) || - (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - verbosePrint( - "%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - uint64_t startFrom = 0; - - verbosePrint("%s() LN%d: create %" PRId64 - " child tables from %" PRIu64 "\n", - __func__, __LINE__, g_totalChildTables, - startFrom); - - code = startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountForCreateTbl, startFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); - if (code) { - errorPrint( - "%s() LN%d, startMultiThreadCreateChildTable() " - "failed for db %d stable %d\n", - __func__, __LINE__, i, j); - return code; - } - } - } - } else { - // normal table - len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.columnCount; j++) { - if ((strncasecmp(g_args.dataType[j], "BINARY", - strlen("BINARY")) == 0) || - (strncasecmp(g_args.dataType[j], "NCHAR", - strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s(%d)", j, g_args.dataType[j], - g_args.binwidth); - } else { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s", j, g_args.dataType[j]); - } - len = (int)strlen(tblColsBuf); - } - - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); - - verbosePrint("%s() LN%d: dbName: %s num of tb: %" PRId64 - " schema: %s\n", - __func__, __LINE__, g_Dbs.db[i].dbName, g_args.ntables, - tblColsBuf); - code = startMultiThreadCreateChildTable( - tblColsBuf, g_Dbs.threadCountForCreateTbl, 0, g_args.ntables, - g_Dbs.db[i].dbName, NULL); - if (code) { - errorPrint( - "%s() LN%d, startMultiThreadCreateChildTable() " - "failed\n", - __func__, __LINE__); - return code; - } - } - } - double end = (double)taosGetTimestampMs(); - fprintf(stderr, - "\nSpent %.4f seconds to create %" PRId64 - " table(s) with %d thread(s), actual %" PRId64 - " table(s) created\n\n", - (end - start) / 1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "\nSpent %.4f seconds to create %" PRId64 - " table(s) with %d thread(s), actual %" PRId64 - " table(s) created\n\n", - (end - start) / 1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - } - return code; -} - -void postFreeResource() { - tmfclose(g_fpOfInsertResult); - tmfree(g_dupstr); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - - for (int c = 0; c < g_Dbs.db[i].superTbls[j].columnCount; c++) { - if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { - tmfree((char *)((uintptr_t) * - (uintptr_t *)(g_Dbs.db[i] - .superTbls[j] - .sampleBindBatchArray + - sizeof(char *) * c))); - } - } - tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); - - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - tmfree(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - } - tmfree(g_Dbs.db[i].superTbls); - } - tmfree(g_Dbs.db); - tmfree(g_randbool_buff); - tmfree(g_randint_buff); - tmfree(g_rand_voltage_buff); - tmfree(g_randbigint_buff); - tmfree(g_randsmallint_buff); - tmfree(g_randtinyint_buff); - tmfree(g_randfloat_buff); - tmfree(g_rand_current_buff); - tmfree(g_rand_phase_buff); - - tmfree(g_sampleDataBuf); - - for (int l = 0; l < g_args.columnCount; l++) { - if (g_sampleBindBatchArray) { - tmfree((char *)((uintptr_t) * (uintptr_t *)(g_sampleBindBatchArray + - sizeof(char *) * l))); - } - } - tmfree(g_sampleBindBatchArray); -} - -static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { - int32_t affectedRows; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_RES * res; - int32_t code; - uint16_t iface; - if (stbInfo) - iface = stbInfo->iface; - else { - if (g_args.iface == INTERFACE_BUT) - iface = TAOSC_IFACE; - else - iface = g_args.iface; - } - - debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, - (iface == TAOSC_IFACE) ? "taosc" - : (iface == REST_IFACE) ? "rest" - : "stmt"); - - switch (iface) { - case TAOSC_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, - __LINE__, pThreadInfo->buffer); - - affectedRows = queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - INSERT_TYPE, false); - break; - - case REST_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, - __LINE__, pThreadInfo->buffer); - - if (0 != postProceSql(g_Dbs.host, g_Dbs.port, pThreadInfo->buffer, - pThreadInfo)) { - affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } else { - affectedRows = k; - } - break; - - case STMT_IFACE: - debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, - pThreadInfo->stmt); - if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint( - "%s() LN%d, failied to execute insert statement. reason: " - "%s\n", - __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); - - fprintf(stderr, - "\n\033[31m === Please reduce batch number if WAL size " - "exceeds limit. ===\033[0m\n\n"); - exit(EXIT_FAILURE); - } - affectedRows = k; - break; - case SML_IFACE: - res = taos_schemaless_insert( - pThreadInfo->taos, pThreadInfo->lines, - stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL ? 0 : k, - stbInfo->lineProtocol, stbInfo->tsPrecision); - code = taos_errno(res); - affectedRows = taos_affected_rows(res); - if (code != TSDB_CODE_SUCCESS) { - errorPrint( - "%s() LN%d, failed to execute schemaless insert. reason: " - "%s\n", - __func__, __LINE__, taos_errstr(res)); - exit(EXIT_FAILURE); - } - break; - default: - errorPrint("Unknown insert mode: %d\n", stbInfo->iface); - affectedRows = 0; - } - - return affectedRows; -} - -static void getTableName(char *pTblName, threadInfo *pThreadInfo, - uint64_t tableSeq) { - SSuperTable *stbInfo = pThreadInfo->stbInfo; - if (stbInfo) { - if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { - if (stbInfo->childTblLimit > 0) { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s`" : "%s", - stbInfo->childTblName + - (tableSeq - stbInfo->childTblOffset) * - TSDB_TABLE_NAME_LEN); - } else { - verbosePrint("[%d] %s() LN%d: from=%" PRIu64 " count=%" PRId64 - " seq=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, tableSeq); - snprintf( - pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s`" : "%s", - stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s%" PRIu64 "`" : "%s%" PRIu64 "", - stbInfo->childTblPrefix, tableSeq); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - g_args.escapeChar ? "`%s%" PRIu64 "`" : "%s%" PRIu64 "", - g_args.tb_prefix, tableSeq); - } -} - -static int execStbBindParamBatch(threadInfo *pThreadInfo, char *tableName, - int64_t tableSeq, uint32_t batch, - uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos) { - TAOS_STMT *stmt = pThreadInfo->stmt; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - uint32_t columnCount = pThreadInfo->stbInfo->columnCount; - - uint32_t thisBatch = (uint32_t)(MAX_SAMPLES - (*pSamplePos)); - - if (thisBatch > batch) { - thisBatch = batch; - } - verbosePrint("%s() LN%d, batch=%d pos=%" PRId64 " thisBatch=%d\n", __func__, - __LINE__, batch, *pSamplePos, thisBatch); - - memset(pThreadInfo->bindParams, 0, - (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); - memset(pThreadInfo->is_null, 0, thisBatch); - - for (int c = 0; c < columnCount + 1; c++) { - TAOS_MULTI_BIND *param = - (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + - sizeof(TAOS_MULTI_BIND) * c); - - char data_type; - - if (c == 0) { - data_type = TSDB_DATA_TYPE_TIMESTAMP; - param->buffer_length = sizeof(int64_t); - param->buffer = pThreadInfo->bind_ts_array; - - } else { - data_type = stbInfo->columns[c - 1].data_type; - - char *tmpP; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - param->buffer_length = stbInfo->columns[c - 1].dataLen; - - tmpP = - (char *)((uintptr_t) * - (uintptr_t *)(stbInfo->sampleBindBatchArray + - sizeof(char *) * (c - 1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%" PRId64 - " width=%" PRIxPTR " position=%" PRId64 "\n", - __func__, __LINE__, tmpP, *pSamplePos, - param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = - (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_NCHAR: - param->buffer_length = stbInfo->columns[c - 1].dataLen; - - tmpP = - (char *)((uintptr_t) * - (uintptr_t *)(stbInfo->sampleBindBatchArray + - sizeof(char *) * (c - 1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%" PRId64 - " width=%" PRIxPTR " position=%" PRId64 "\n", - __func__, __LINE__, tmpP, *pSamplePos, - param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = - (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - param->buffer_length = sizeof(int32_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - param->buffer_length = sizeof(int16_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BOOL: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_FLOAT: - param->buffer_length = sizeof(float); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - param->buffer_length = sizeof(double); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - default: - errorPrint("wrong data type: %d\n", data_type); - return -1; - } - } - - param->buffer_type = data_type; - param->length = calloc(1, sizeof(int32_t) * thisBatch); - if (param->length == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - for (int b = 0; b < thisBatch; b++) { - if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { - param->length[b] = (int32_t)strlen( - (char *)param->buffer + b * stbInfo->columns[c].dataLen); - } else { - param->length[b] = (int32_t)param->buffer_length; - } - } - param->is_null = pThreadInfo->is_null; - param->num = thisBatch; - } - - uint32_t k; - for (k = 0; k < thisBatch;) { - /* columnCount + 1 (ts) */ - if (stbInfo->disorderRatio) { - *(pThreadInfo->bind_ts_array + k) = - startTime + getTSRandTail(stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *(pThreadInfo->bind_ts_array + k) = - startTime + stbInfo->timeStampStep * k; - } - - debugPrint("%s() LN%d, k=%d ts=%" PRId64 "\n", __func__, __LINE__, k, - *(pThreadInfo->bind_ts_array + k)); - k++; - recordFrom++; - - (*pSamplePos)++; - if ((*pSamplePos) == MAX_SAMPLES) { - *pSamplePos = 0; - } - - if (recordFrom >= insertRows) { - break; - } - } - - if (taos_stmt_bind_param_batch( - stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams)) { - errorPrint("taos_stmt_bind_param_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - - for (int c = 0; c < stbInfo->columnCount + 1; c++) { - TAOS_MULTI_BIND *param = - (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + - sizeof(TAOS_MULTI_BIND) * c); - free(param->length); - } - - // if msg > 3MB, break - if (taos_stmt_add_batch(stmt)) { - errorPrint("taos_stmt_add_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - return k; -} - -int32_t prepareStbStmt(threadInfo *pThreadInfo, char *tableName, - int64_t tableSeq, uint32_t batch, uint64_t insertRows, - uint64_t recordFrom, int64_t startTime, - int64_t *pSamplePos) { - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_STMT * stmt = pThreadInfo->stmt; - - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, tableSeq, tagsValBuf)) { - tmfree(tagsValBuf); - return -1; - } - } else { - snprintf( - tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * - (tableSeq % stbInfo->tagSampleCount)); - } - - if (prepareStbStmtBindTag(tagsArray, stbInfo, tagsValBuf, - pThreadInfo->time_precision)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } - - if (taos_stmt_set_tbname_tags(stmt, tableName, - (TAOS_BIND *)tagsArray)) { - errorPrint("taos_stmt_set_tbname_tags() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - - } else { - if (taos_stmt_set_tbname(stmt, tableName)) { - errorPrint("taos_stmt_set_tbname() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - } - tmfree(tagsValBuf); - tmfree(tagsArray); - return execStbBindParamBatch(pThreadInfo, tableName, tableSeq, batch, - insertRows, recordFrom, startTime, pSamplePos); -} - -// stmt sync write interlace data -static void *syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, - uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - - int64_t insertRows; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - uint64_t timesInterlace = (insertRows / interlaceRows) + 1; - uint32_t precalcBatch = interlaceRows; - - if (precalcBatch > g_args.reqPerReq) precalcBatch = g_args.reqPerReq; - - if (precalcBatch > MAX_SAMPLES) precalcBatch = MAX_SAMPLES; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime; - - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - pThreadInfo->samplePos = 0; - - for (int64_t interlace = 0; interlace < timesInterlace; interlace++) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - int64_t generated = 0; - int64_t samplePos; - - for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; - tableSeq++) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - return NULL; - } - - samplePos = pThreadInfo->samplePos; - startTime = pThreadInfo->start_time + - interlace * interlaceRows * timeStampStep; - uint64_t remainRecPerTbl = insertRows - interlaceRows * interlace; - uint64_t recPerTbl = 0; - - uint64_t remainPerInterlace; - if (remainRecPerTbl > interlaceRows) { - remainPerInterlace = interlaceRows; - } else { - remainPerInterlace = remainRecPerTbl; - } - - while (remainPerInterlace > 0) { - uint32_t batch; - if (remainPerInterlace > precalcBatch) { - batch = precalcBatch; - } else { - batch = (uint32_t)remainPerInterlace; - } - debugPrint( - "[%d] %s() LN%d, tableName:%s, batch:%d startTime:%" PRId64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, tableName, batch, - startTime); - - if (stbInfo) { - generated = - prepareStbStmt(pThreadInfo, tableName, tableSeq, batch, - insertRows, 0, startTime, &samplePos); - } else { - generated = prepareStmtWithoutStb( - pThreadInfo, tableName, batch, insertRows, - interlaceRows * interlace + recPerTbl, startTime); - } - - debugPrint("[%d] %s() LN%d, generated records is %" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generated); - if (generated < 0) { - errorPrint( - "[%d] %s() LN%d, generated records is %" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace_stmt; - } else if (generated == 0) { - break; - } - - recPerTbl += generated; - remainPerInterlace -= generated; - pThreadInfo->totalInsertRows += generated; - - verbosePrint("[%d] %s() LN%d totalInsertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->totalInsertRows); - - startTs = taosGetTimestampUs(); - - int64_t affectedRows = - execInsert(pThreadInfo, (uint32_t)generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint( - "%s() LN%d, insert execution time is %10.2f ms\n", __func__, - __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) - pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) - pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (generated != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert() insert %" PRId64 - ", affected rows: %" PRId64 "\n\n", - pThreadInfo->threadID, __func__, __LINE__, - generated, affectedRows); - goto free_of_interlace_stmt; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, - currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - startTime += (generated * timeStampStep); - } - } - pThreadInfo->samplePos = samplePos; - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - - flagSleep = true; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - -free_of_interlace_stmt: - printStatPerThread(pThreadInfo); - return NULL; -} - -void *syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, - __func__, __LINE__); - - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > g_args.reqPerReq) interlaceRows = g_args.reqPerReq; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while (pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - // generate data - memset(pThreadInfo->buffer, 0, maxSqlLen); - uint64_t remainderBufLen = maxSqlLen; - - char *pstr = pThreadInfo->buffer; - - int len = - snprintf(pstr, strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; - - uint32_t recOfBatch = 0; - - int32_t generated; - for (uint64_t i = 0; i < batchPerTblTimes; i++) { - char tableName[TSDB_TABLE_NAME_LEN]; - - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } - - uint64_t oldRemainderLen = remainderBufLen; - - if (stbInfo) { - generated = generateStbInterlaceData( - pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, - tableSeq, pstr, insertRows, startTime, &remainderBufLen); - } else { - generated = (int32_t)generateInterlaceDataWithoutStb( - tableName, batchPerTbl, tableSeq, pThreadInfo->db_name, - pstr, insertRows, startTime, &remainderBufLen); - } - - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, - generated); - goto free_of_interlace; - } else if (generated == 0) { - break; - } - - tableSeq++; - recOfBatch += batchPerTbl; - - pstr += (oldRemainderLen - remainderBufLen); - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, - recOfBatch); - - if (tableSeq == - pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time + - generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = (uint32_t)remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) - break; - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%" PRId64 - " insertRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) break; - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%" PRIu64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl); - if (batchPerTbl > 0) { - errorPrint( - "\tIf the batch is %d, the length of the SQL to insert a " - "row must be less then %" PRId64 "\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - errorPrint("\tPlease check if the buffer length(%" PRId64 - ") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint( - "[%d] %s() LN%d execInsert insert %d, affected rows: %" PRId64 - "\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - affectedRows, pThreadInfo->buffer); - goto free_of_interlace; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - -free_of_interlace: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} - -static void *syncWriteInterlaceSml(threadInfo *pThreadInfo, - uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### interlace schemaless write\n", - pThreadInfo->threadID, __func__, __LINE__); - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - int32_t code = 0; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > g_args.reqPerReq) interlaceRows = g_args.reqPerReq; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } - - char **smlList; - cJSON *tagsList; - cJSON *jsonArray; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL || - stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - smlList = (char **)calloc(pThreadInfo->ntables, sizeof(char *)); - if (NULL == smlList) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - for (int t = 0; t < pThreadInfo->ntables; t++) { - char *sml = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == sml) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_interlace_sml; - } - code = generateSmlConstPart(sml, stbInfo, pThreadInfo, t); - if (code) { - goto free_smlheadlist_interlace_sml; - } - smlList[t] = sml; - } - - pThreadInfo->lines = calloc(g_args.reqPerReq, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_interlace_sml; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - pThreadInfo->lines[i] = calloc(1, stbInfo->lenOfOneRow); - if (NULL == pThreadInfo->lines[i]) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_lines_interlace_sml; - } - } - } else { - jsonArray = cJSON_CreateArray(); - tagsList = cJSON_CreateArray(); - for (int t = 0; t < pThreadInfo->ntables; t++) { - code = generateSmlJsonTags(tagsList, stbInfo, pThreadInfo, t); - if (code) { - goto free_json_interlace_sml; - } - } - - pThreadInfo->lines = (char **)calloc(1, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_json_interlace_sml; - } - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while (pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - // generate data - - uint32_t recOfBatch = 0; - - for (uint64_t i = 0; i < batchPerTblTimes; i++) { - int64_t timestamp = startTime; - for (int j = recOfBatch; j < recOfBatch + batchPerTbl; j++) { - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - cJSON *tag = cJSON_Duplicate( - cJSON_GetArrayItem( - tagsList, - (int)(tableSeq - pThreadInfo->start_table_from)), - true); - code = generateSmlJsonCols(jsonArray, tag, stbInfo, - pThreadInfo, timestamp); - if (code) { - goto free_json_interlace_sml; - } - } else { - code = generateSmlMutablePart( - pThreadInfo->lines[j], - smlList[tableSeq - pThreadInfo->start_table_from], - stbInfo, pThreadInfo, timestamp); - if (code) { - goto free_lines_interlace_sml; - } - } - - timestamp += timeStampStep; - } - tableSeq++; - recOfBatch += batchPerTbl; - - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, - recOfBatch); - - if (tableSeq == - pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time + - generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) { - break; - } - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) { - batchPerTbl = (uint32_t)remainRows; - } - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) { - break; - } - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%" PRId64 - " insertRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) { - break; - } - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%" PRIu64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - pThreadInfo->lines[0] = cJSON_Print(jsonArray); - } - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint("Failed to insert records of batch %d\n", batchPerTbl); - if (batchPerTbl > 0) { - errorPrint( - "\tIf the batch is %d, the length of the SQL to insert a " - "row must be less then %" PRId64 "\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - errorPrint("\tPlease check if the buffer length(%" PRId64 - ") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_lines_interlace_sml; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines[0]); - cJSON_Delete(jsonArray); - jsonArray = cJSON_CreateArray(); - } - - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint("execInsert insert %d, affected rows: %" PRId64 "\n%s\n", - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_lines_interlace_sml; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - - printStatPerThread(pThreadInfo); - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines); - free_json_interlace_sml: - if (jsonArray != NULL) { - cJSON_Delete(jsonArray); - } - if (tagsList != NULL) { - cJSON_Delete(tagsList); - } - } else { - free_lines_interlace_sml: - for (int index = 0; index < g_args.reqPerReq; index++) { - tmfree(pThreadInfo->lines[index]); - } - tmfree(pThreadInfo->lines); - free_smlheadlist_interlace_sml: - for (int index = 0; index < pThreadInfo->ntables; index++) { - tmfree(smlList[index]); - } - tmfree(smlList); - } - return NULL; -} - -void *syncWriteProgressiveStmt(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int64_t timeStampStep = - stbInfo ? stbInfo->timeStampStep : g_args.timestamp_step; - int64_t insertRows = (stbInfo) ? stbInfo->insertRows : g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; tableSeq++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%" PRId64 " tableName=%s\n", - __func__, __LINE__, pThreadInfo->threadID, tableSeq, - tableName); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - return NULL; - } - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - generated = prepareStbStmt( - pThreadInfo, tableName, tableSeq, - (uint32_t)((g_args.reqPerReq > stbInfo->insertRows) - ? stbInfo->insertRows - : g_args.reqPerReq), - insertRows, i, start_time, &(pThreadInfo->samplePos)); - } else { - generated = prepareStmtWithoutStb(pThreadInfo, tableName, - g_args.reqPerReq, insertRows, - i, start_time); - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_stmt_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint("affected rows: %d\n", affectedRows); - goto free_of_stmt_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) break; - } // insertRows - - if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && - (stbInfo) && - (0 == - strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%" PRId64 "\n", __func__, - __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - -free_of_stmt_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} - -void *syncWriteProgressive(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - uint64_t maxSqlLen = stbInfo ? stbInfo->maxSqlLen : g_args.max_sql_len; - int64_t timeStampStep = - stbInfo ? stbInfo->timeStampStep : g_args.timestamp_step; - int64_t insertRows = (stbInfo) ? stbInfo->insertRows : g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; tableSeq++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%" PRId64 " tableName=%s\n", - __func__, __LINE__, pThreadInfo->threadID, tableSeq, - tableName); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } - - int64_t remainderBufLen = maxSqlLen - 2000; - char * pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, strlen(STR_INSERT_INTO) + 1, "%s", - STR_INSERT_INTO); - - pstr += len; - remainderBufLen -= len; - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmt( - pThreadInfo, tableName, tableSeq, - (uint32_t)((g_args.reqPerReq > stbInfo->insertRows) - ? stbInfo->insertRows - : g_args.reqPerReq), - insertRows, i, start_time, &(pThreadInfo->samplePos)); - } else { - generated = generateStbProgressiveData( - stbInfo, tableName, tableSeq, pThreadInfo->db_name, - pstr, insertRows, i, start_time, - &(pThreadInfo->samplePos), &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { - generated = prepareStmtWithoutStb( - pThreadInfo, tableName, g_args.reqPerReq, insertRows, i, - start_time); - } else { - generated = generateProgressiveDataWithoutStb( - tableName, - /* tableSeq, */ - pThreadInfo, pstr, insertRows, i, start_time, - /* &(pThreadInfo->samplePos), */ - &remainderBufLen); - } - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint("affected rows: %d\n", affectedRows); - goto free_of_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) break; - } // insertRows - - if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && - (stbInfo) && - (0 == - strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%" PRId64 "\n", __func__, - __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - -free_of_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} - -void *syncWriteProgressiveSml(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### sml progressive write\n", __func__, __LINE__); - int32_t code = 0; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int64_t timeStampStep = stbInfo->timeStampStep; - int64_t insertRows = stbInfo->insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - uint64_t lastPrintTime = taosGetTimestampMs(); - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - char **smlList; - cJSON *tagsList; - cJSON *jsonArray; - - if (insertRows < g_args.reqPerReq) { - g_args.reqPerReq = (uint32_t)insertRows; - } - - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL || - stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - smlList = (char **)calloc(pThreadInfo->ntables, sizeof(char *)); - if (NULL == smlList) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - for (int t = 0; t < pThreadInfo->ntables; t++) { - char *sml = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == sml) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_progressive_sml; - } - code = generateSmlConstPart(sml, stbInfo, pThreadInfo, t); - if (code) { - goto free_smlheadlist_progressive_sml; - } - smlList[t] = sml; - } - - pThreadInfo->lines = (char **)calloc(g_args.reqPerReq, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_progressive_sml; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - pThreadInfo->lines[i] = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == pThreadInfo->lines[i]) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_lines_progressive_sml; - } - } - } else { - jsonArray = cJSON_CreateArray(); - tagsList = cJSON_CreateArray(); - for (int t = 0; t < pThreadInfo->ntables; t++) { - code = generateSmlJsonTags(tagsList, stbInfo, pThreadInfo, t); - if (code) { - goto free_json_progressive_sml; - } - } - - pThreadInfo->lines = (char **)calloc(1, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_json_progressive_sml; - } - } - int currentPercent = 0; - int percentComplete = 0; - - for (uint64_t i = 0; i < pThreadInfo->ntables; i++) { - int64_t timestamp = pThreadInfo->start_time; - for (uint64_t j = 0; j < insertRows;) { - for (int k = 0; k < g_args.reqPerReq; k++) { - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - cJSON *tag = cJSON_Duplicate( - cJSON_GetArrayItem(tagsList, (int)i), true); - code = generateSmlJsonCols(jsonArray, tag, stbInfo, - pThreadInfo, timestamp); - if (code) { - goto free_json_progressive_sml; - } - } else { - code = generateSmlMutablePart(pThreadInfo->lines[k], - smlList[i], stbInfo, - pThreadInfo, timestamp); - if (code) { - goto free_lines_progressive_sml; - } - } - timestamp += timeStampStep; - j++; - if (j == insertRows) { - break; - } - } - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - pThreadInfo->lines[0] = cJSON_Print(jsonArray); - } - uint64_t startTs = taosGetTimestampUs(); - int32_t affectedRows = execInsert(pThreadInfo, g_args.reqPerReq); - uint64_t endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines[0]); - cJSON_Delete(jsonArray); - jsonArray = cJSON_CreateArray(); - } - - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) { - pThreadInfo->maxDelay = delay; - } - if (delay < pThreadInfo->minDelay) { - pThreadInfo->minDelay = delay; - } - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - pThreadInfo->totalAffectedRows += affectedRows; - pThreadInfo->totalInsertRows += g_args.reqPerReq; - currentPercent = (int)(pThreadInfo->totalAffectedRows * 100 / - (insertRows * pThreadInfo->ntables)); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (j == insertRows) { - break; - } - } - } - - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines); - free_json_progressive_sml: - if (jsonArray != NULL) { - cJSON_Delete(jsonArray); - } - if (tagsList != NULL) { - cJSON_Delete(tagsList); - } - } else { - free_lines_progressive_sml: - for (int index = 0; index < g_args.reqPerReq; index++) { - tmfree(pThreadInfo->lines[index]); - } - tmfree(pThreadInfo->lines); - free_smlheadlist_progressive_sml: - for (int index = 0; index < pThreadInfo->ntables; index++) { - tmfree(smlList[index]); - } - tmfree(smlList); - } - return NULL; -} - -void *syncWrite(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - setThreadName("syncWrite"); - - uint32_t interlaceRows = 0; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - // interlace mode - if (stbInfo) { - if (STMT_IFACE == stbInfo->iface) { - return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); - } else if (SML_IFACE == stbInfo->iface) { - return syncWriteInterlaceSml(pThreadInfo, interlaceRows); - } else { - return syncWriteInterlace(pThreadInfo, interlaceRows); - } - } - } else { - // progressive mode - if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) || - (STMT_IFACE == g_args.iface)) { - return syncWriteProgressiveStmt(pThreadInfo); - } else if (((stbInfo) && (SML_IFACE == stbInfo->iface)) || - (SML_IFACE == g_args.iface)) { - return syncWriteProgressiveSml(pThreadInfo); - } else { - return syncWriteProgressive(pThreadInfo); - } - } - - return NULL; -} - -void callBack(void *param, TAOS_RES *res, int code) { - threadInfo * pThreadInfo = (threadInfo *)param; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - int insert_interval = - (int)(stbInfo ? stbInfo->insertInterval : g_args.insert_interval); - if (insert_interval) { - pThreadInfo->et = taosGetTimestampMs(); - if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { - taosMsleep(insert_interval - - (int32_t)(pThreadInfo->et - pThreadInfo->st)); // ms - } - } - - char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); - char data[MAX_DATA_SIZE]; - char *pstr = buffer; - pstr += sprintf(pstr, "INSERT INTO %s.%s%" PRId64 " VALUES", - pThreadInfo->db_name, pThreadInfo->tb_prefix, - pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - if (pThreadInfo->counter >= g_args.reqPerReq) { - pThreadInfo->start_table_from++; - pThreadInfo->counter = 0; - } - if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { - tsem_post(&pThreadInfo->lock_sem); - free(buffer); - taos_free_result(res); - return; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->stbInfo->disorderRatio && - rand_num < pThreadInfo->stbInfo->disorderRatio) { - int64_t d = - pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); - } else { - generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, - pThreadInfo->lastTs += 1000); - } - pstr += sprintf(pstr, "%s", data); - pThreadInfo->counter++; - - if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - break; - } - } - - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); - free(buffer); - - taos_free_result(res); -} - -void *asyncWrite(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - setThreadName("asyncWrite"); - - pThreadInfo->st = 0; - pThreadInfo->et = 0; - pThreadInfo->lastTs = pThreadInfo->start_time; - - int insert_interval = - (int)(stbInfo ? stbInfo->insertInterval : g_args.insert_interval); - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - - tsem_wait(&(pThreadInfo->lock_sem)); - - return NULL; -} - -int startMultiThreadInsertData(int threads, char *db_name, char *precision, - SSuperTable *stbInfo) { - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - } - - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - } - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MICRO_SECONDS; - } - } else if (0 == strncasecmp(precision, "ns", 2)) { - timePrec = TSDB_TIME_PRECISION_NANO; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_NANO_SECONDS; - } - } else { - errorPrint("Not support precision: %s\n", precision); - return -1; - } - } - if (stbInfo) { - if (stbInfo->iface == SML_IFACE) { - if (stbInfo->lineProtocol != TSDB_SML_LINE_PROTOCOL) { - if (stbInfo->columnCount != 1) { - errorPrint( - "Schemaless telnet/json protocol can only have 1 " - "column " - "instead of %d\n", - stbInfo->columnCount); - return -1; - } - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_NOT_CONFIGURED; - } - if (stbInfo->lineProtocol != TSDB_SML_JSON_PROTOCOL) { - calcRowLen(stbInfo); - } - } - } - - int64_t startTime; - if (stbInfo) { - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(timePrec); - } else { - if (TSDB_CODE_SUCCESS != - taosParseTime(stbInfo->startTimestamp, &startTime, - (int32_t)strlen(stbInfo->startTimestamp), - timePrec, 0)) { - errorPrint("failed to parse time %s\n", - stbInfo->startTimestamp); - return -1; - } - } - } else { - startTime = DEFAULT_START_TIME; - } - debugPrint("%s() LN%d, startTime= %" PRId64 "\n", __func__, __LINE__, - startTime); - - // read sample data from file first - int ret; - if (stbInfo && stbInfo->iface != SML_IFACE) { - ret = prepareSampleForStb(stbInfo); - } else { - ret = prepareSampleForNtb(); - } - - if (ret) { - errorPrint("%s", "prepare sample data for stable failed!\n"); - return -1; - } - - TAOS *taos0 = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, - g_Dbs.port); - if (NULL == taos0) { - errorPrint("connect to taosd fail , reason: %s\n", taos_errstr(NULL)); - return -1; - } - - int64_t ntables = 0; - uint64_t tableFrom = 0; - - if (stbInfo) { - if (stbInfo->iface != SML_IFACE) { - int64_t limit; - uint64_t offset; - - if ((NULL != g_args.sqlFile) && - (stbInfo->childTblExists == TBL_NO_EXISTS) && - ((stbInfo->childTblOffset != 0) || - (stbInfo->childTblLimit >= 0))) { - printf( - "WARNING: offset and limit will not be used since the " - "child tables not exists!\n"); - } - - if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((stbInfo->childTblLimit < 0) || - ((stbInfo->childTblOffset + stbInfo->childTblLimit) > - (stbInfo->childTblCount))) { - if (stbInfo->childTblCount < stbInfo->childTblOffset) { - printf( - "WARNING: offset will not be used since the child " - "tables count is less then offset!\n"); - - stbInfo->childTblOffset = 0; - } - stbInfo->childTblLimit = - stbInfo->childTblCount - stbInfo->childTblOffset; - } - - offset = stbInfo->childTblOffset; - limit = stbInfo->childTblLimit; - } else { - limit = stbInfo->childTblCount; - offset = 0; - } - - ntables = limit; - tableFrom = offset; - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) && - ((stbInfo->childTblOffset + stbInfo->childTblLimit) > - stbInfo->childTblCount)) { - printf( - "WARNING: specified offset + limit > child table count!\n"); - prompt(); - } - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) && - (0 == stbInfo->childTblLimit)) { - printf( - "WARNING: specified limit = 0, which cannot find table " - "name to insert or query! \n"); - prompt(); - } - - stbInfo->childTblName = - (char *)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (NULL == stbInfo->childTblName) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - int64_t childTblCount; - getChildNameOfSuperTableWithLimitAndOffset( - taos0, db_name, stbInfo->stbName, &stbInfo->childTblName, - &childTblCount, limit, offset, stbInfo->escapeChar); - ntables = childTblCount; - } else { - ntables = stbInfo->childTblCount; - } - } else { - ntables = g_args.ntables; - tableFrom = 0; - } - - taos_close(taos0); - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { - if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != - 0) { - errorPrint("%s\n", "convert host to server address"); - return -1; - } - } - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - if (pids == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if (infos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - return -1; - } - - char *stmtBuffer = calloc(1, BUFFER_SIZE); - if (stmtBuffer == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - tmfree(infos); - return -1; - } - - uint32_t interlaceRows = 0; - uint32_t batch; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - batch = interlaceRows; - } else { - batch = (uint32_t)((g_args.reqPerReq > g_args.insertRows) - ? g_args.insertRows - : g_args.reqPerReq); - } - - if ((g_args.iface == STMT_IFACE) || - ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { - char *pstr = stmtBuffer; - - if ((stbInfo) && (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - stbInfo->stbName); - for (int tag = 0; tag < (stbInfo->tagCount - 1); tag++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - int columnCount = (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - - for (int col = 0; col < columnCount; col++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); - parseSamplefileToStmtBatch(stbInfo); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->time_precision = timePrec; - pThreadInfo->stbInfo = stbInfo; - - pThreadInfo->start_time = startTime; - pThreadInfo->minDelay = UINT64_MAX; - - if ((NULL == stbInfo) || (stbInfo->iface != REST_IFACE)) { - // t_info->taos = taos; - pThreadInfo->taos = taos_connect( - g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == pThreadInfo->taos) { - free(infos); - errorPrint( - "connect to server fail from insert sub " - "thread,reason:%s\n ", - taos_errstr(NULL)); - return -1; - } - - if ((g_args.iface == STMT_IFACE) || - ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { - pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); - if (NULL == pThreadInfo->stmt) { - free(pids); - free(infos); - errorPrint("taos_stmt_init() failed, reason: %s\n", - taos_errstr(NULL)); - return -1; - } - - if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { - free(pids); - free(infos); - free(stmtBuffer); - errorPrint( - "failed to execute taos_stmt_prepare. return 0x%x. " - "reason: %s\n", - ret, taos_stmt_errstr(pThreadInfo->stmt)); - return -1; - } - pThreadInfo->bind_ts = malloc(sizeof(int64_t)); - - if (stbInfo) { - parseStbSampleToStmtBatchForThread(pThreadInfo, stbInfo, - timePrec, batch); - - } else { - parseNtbSampleToStmtBatchForThread(pThreadInfo, timePrec, - batch); - } - } - } else { - pThreadInfo->taos = NULL; - } - - /* if ((NULL == stbInfo) - || (0 == stbInfo->multiThreadWriteOneTbl)) { - */ - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - /* } else { - pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = stbInfo->childTblCount; - pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % - 10000 - rand_tinyint(); - } - */ - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - errorPrint("%s\n", "failed to create socket"); - return -1; - } - - int retConn = connect(sockfd, (struct sockaddr *)&(g_Dbs.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, - retConn); - if (retConn < 0) { - errorPrint("%s\n", "failed to connect"); - return -1; - } - pThreadInfo->sockfd = sockfd; - } - - tsem_init(&(pThreadInfo->lock_sem), 0, 0); - if (ASYNC_MODE == g_Dbs.asyncMode) { - pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); - } else { - pthread_create(pids + i, NULL, syncWrite, pThreadInfo); - } - } - - free(stmtBuffer); - - int64_t start = taosGetTimestampUs(); - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - uint64_t totalDelay = 0; - uint64_t maxDelay = 0; - uint64_t minDelay = UINT64_MAX; - uint64_t cntDelay = 0; - double avgDelay = 0; - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); - - if (pThreadInfo->stmt) { - taos_stmt_close(pThreadInfo->stmt); - } - - tmfree((char *)pThreadInfo->bind_ts); - - tmfree((char *)pThreadInfo->bind_ts_array); - tmfree(pThreadInfo->bindParams); - tmfree(pThreadInfo->is_null); - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - - debugPrint("%s() LN%d, [%d] totalInsert=%" PRIu64 - " totalAffected=%" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - if (stbInfo) { - stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; - } else { - g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; - g_args.totalInsertRows += pThreadInfo->totalInsertRows; - } - - totalDelay += pThreadInfo->totalDelay; - cntDelay += pThreadInfo->cntDelay; - if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; - if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; - } - - if (cntDelay == 0) cntDelay = 1; - avgDelay = (double)totalDelay / cntDelay; - - int64_t end = taosGetTimestampUs(); - int64_t t = end - start; - if (0 == t) t = 1; - - double tInMs = (double)t / 1000000.0; - - if (stbInfo) { - fprintf(stderr, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows / tInMs)); - - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows / tInMs)); - } - } else { - fprintf(stderr, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, - threads, db_name, (double)(g_args.totalInsertRows / tInMs)); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, - threads, db_name, (double)(g_args.totalInsertRows / tInMs)); - } - } - - if (minDelay != UINT64_MAX) { - fprintf(stderr, - "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay / 1000.0, (double)maxDelay / 1000.0, - (double)minDelay / 1000.0); - - if (g_fpOfInsertResult) { - fprintf( - g_fpOfInsertResult, - "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay / 1000.0, (double)maxDelay / 1000.0, - (double)minDelay / 1000.0); - } - } - - // taos_close(taos); - - free(pids); - free(infos); - return 0; -} - -int insertTestProcess() { - int32_t code = -1; - char * cmdBuffer = calloc(1, BUFFER_SIZE); - if (NULL == cmdBuffer) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_insert_process; - } - - printfInsertMeta(); - - debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - errorPrint("failed to open %s for save result\n", g_Dbs.resultFile); - goto end_insert_process; - } - - if (g_fpOfInsertResult) { - printfInsertMetaToFile(g_fpOfInsertResult); - } - - prompt(); - - if (init_rand_data()) { - goto end_insert_process; - } - - // create database and super tables - - if (createDatabasesAndStables(cmdBuffer)) { - goto end_insert_process; - } - - // pretreatment - if (prepareSampleData()) { - goto end_insert_process; - } - - if (g_args.iface != SML_IFACE && g_totalChildTables > 0) { - if (createChildTables()) { - goto end_insert_process; - } - } - // create sub threads for inserting data - // start = taosGetTimestampMs(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable *stbInfo = &g_Dbs.db[i].superTbls[j]; - - if (stbInfo && (stbInfo->insertRows > 0)) { - if (startMultiThreadInsertData( - g_Dbs.threadCount, g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, stbInfo)) { - goto end_insert_process; - } - } - } - } - } else { - if (SML_IFACE == g_args.iface) { - code = -1; - errorPrint("%s\n", "Schemaless insertion must include stable"); - goto end_insert_process; - } else { - if (startMultiThreadInsertData( - g_Dbs.threadCount, g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, NULL)) { - goto end_insert_process; - } - } - } - } - code = 0; -end_insert_process: - tmfree(cmdBuffer); - return code; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoJsonOpt.c b/src/kit/taosdemo/src/demoJsonOpt.c deleted file mode 100644 index e74d2906c8f3294f0531145c8f13e5ce776e444f..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoJsonOpt.c +++ /dev/null @@ -1,1777 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "cJSON.h" -#include "demo.h" - -int getColumnAndTagTypeFromInsertJsonFile(cJSON * stbInfo, - SSuperTable *superTbls) { - int32_t code = -1; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - errorPrint("%s", "failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return 0; - } - - int columnSize = cJSON_GetArraySize(columns); - if ((columnSize + 1 /* ts */) > TSDB_MAX_COLUMNS) { - errorPrint( - "failed to read json, column size overflow, max column size is " - "%d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - - int count = 1; - int index = 0; - StrColumn columnCase; - - // superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON *column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; - - count = 1; - cJSON *countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = (int)countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || - dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, column type not found\n"); - goto PARSE_OVER; - } - // tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, - // DATATYPE_BUFF_LEN); - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON *dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = (uint32_t)dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - columnCase.dataLen = SMALL_BUFF_LEN; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - - if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { - errorPrint( - "failed to read json, column size overflow, allowed max column " - "size is %d\n", - MAX_NUM_COLUMNS); - goto PARSE_OVER; - } - - superTbls->columnCount = index; - - for (int c = 0; c < superTbls->columnCount; c++) { - if (0 == - strncasecmp(superTbls->columns[c].dataType, "INT", strlen("INT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "TINYINT", - strlen("TINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "SMALLINT", - strlen("SMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BIGINT", - strlen("BIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "FLOAT", - strlen("FLOAT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "DOUBLE", - strlen("DOUBLE"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BINARY", - strlen("BINARY"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "NCHAR", - strlen("NCHAR"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BOOL", - strlen("BOOL"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "TIMESTAMP", - strlen("TIMESTAMP"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UTINYINT", - strlen("UTINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "USMALLINT", - strlen("USMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UINT", - strlen("UINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UBIGINT", - strlen("UBIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; - } - } - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - errorPrint("%s", "failed to read json, tags not found\n"); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > TSDB_MAX_TAGS) { - errorPrint( - "failed to read json, tags size overflow, max tag size is %d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - // superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON *tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; - - count = 1; - cJSON *countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = (int)countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || - dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, tag type not found\n"); - goto PARSE_OVER; - } - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON *dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = (uint32_t)dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column len not found\n"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; - } - } - - if (index > TSDB_MAX_TAGS) { - errorPrint( - "failed to read json, tags size overflow, allowed max tag count is " - "%d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - superTbls->tagCount = index; - - for (int t = 0; t < superTbls->tagCount; t++) { - if (0 == - strncasecmp(superTbls->tags[t].dataType, "INT", strlen("INT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "TINYINT", - strlen("TINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "SMALLINT", - strlen("SMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BIGINT", - strlen("BIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "FLOAT", - strlen("FLOAT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "DOUBLE", - strlen("DOUBLE"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BINARY", - strlen("BINARY"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "NCHAR", - strlen("NCHAR"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BOOL", - strlen("BOOL"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "TIMESTAMP", - strlen("TIMESTAMP"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UTINYINT", - strlen("UTINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "USMALLINT", - strlen("USMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UINT", - strlen("UINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UBIGINT", - strlen("UBIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; - } - } - - if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > - TSDB_MAX_COLUMNS) { - errorPrint( - "columns + tags is more than allowed max columns count: %d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - code = 0; - -PARSE_OVER: - return code; -} - -int getMetaFromInsertJsonFile(cJSON *root) { - int32_t code = -1; - - cJSON *cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON *host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON *port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = (uint16_t)port->valueint; - } else if (!port) { - g_Dbs.port = DEFAULT_PORT; - } - - cJSON *user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - } - - cJSON *password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && - password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN); - } - - cJSON *resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && - resultfile->valuestring != NULL) { - tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - tstrncpy(g_Dbs.resultFile, DEFAULT_OUTPUT, MAX_FILE_NAME_LEN); - } - - cJSON *threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = (uint32_t)threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = DEFAULT_NTHREADS; - } else { - errorPrint("%s", "failed to read json, threads not found\n"); - goto PARSE_OVER; - } - - cJSON *threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountForCreateTbl = (uint32_t)threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountForCreateTbl = DEFAULT_NTHREADS; - } else { - errorPrint("%s", "failed to read json, threads2 not found\n"); - goto PARSE_OVER; - } - - cJSON *gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); - if (gInsertInterval && gInsertInterval->type == cJSON_Number) { - if (gInsertInterval->valueint < 0) { - errorPrint("%s", - "failed to read json, insert interval input mistake\n"); - goto PARSE_OVER; - } - g_args.insert_interval = gInsertInterval->valueint; - } else if (!gInsertInterval) { - g_args.insert_interval = DEFAULT_INSERT_INTERVAL; - } else { - errorPrint("%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - cJSON *interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); - if (interlaceRows && interlaceRows->type == cJSON_Number) { - if (interlaceRows->valueint < 0) { - errorPrint("%s", - "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - } - g_args.interlaceRows = (uint32_t)interlaceRows->valueint; - } else if (!interlaceRows) { - g_args.interlaceRows = - DEFAULT_INTERLACE_ROWS; // 0 means progressive mode, > 0 mean - // interlace mode. max value is less or equ - // num_of_records_per_req - } else { - errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - if (maxSqlLen->valueint < 0) { - errorPrint( - "%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.max_sql_len = maxSqlLen->valueint; - } else if (!maxSqlLen) { - g_args.max_sql_len = TSDB_MAX_ALLOWED_SQL_LEN; - } else { - errorPrint( - "%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - if (numRecPerReq->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, num_of_records_per_req input " - "mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { - printf("NOTICE: number of records per request value %" PRIu64 - " > %d\n\n", - numRecPerReq->valueint, MAX_RECORDS_PER_REQ); - printf( - " number of records per request value will be set to " - "%d\n\n", - MAX_RECORDS_PER_REQ); - prompt(); - numRecPerReq->valueint = MAX_RECORDS_PER_REQ; - } - g_args.reqPerReq = (uint32_t)numRecPerReq->valueint; - } else if (!numRecPerReq) { - g_args.reqPerReq = MAX_RECORDS_PER_REQ; - } else { - errorPrint( - "%s() LN%d, failed to read json, num_of_records_per_req not " - "found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *prepareRand = cJSON_GetObjectItem(root, "prepared_rand"); - if (prepareRand && prepareRand->type == cJSON_Number) { - if (prepareRand->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, prepared_rand input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.prepared_rand = prepareRand->valueint; - } else if (!prepareRand) { - g_args.prepared_rand = DEFAULT_PREPARED_RAND; - } else { - errorPrint("%s", "failed to read json, prepared_rand not found\n"); - goto PARSE_OVER; - } - - cJSON *answerPrompt = - cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String && - answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = DEFAULT_ANS_YES; - } - } else if (!answerPrompt) { - g_args.answer_yes = true; // default is no, mean answer_yes. - } else { - errorPrint( - "%s", - "failed to read json, confirm_parameter_prompt input mistake\n"); - goto PARSE_OVER; - } - - // rows per table need be less than insert batch - if (g_args.interlaceRows > g_args.reqPerReq) { - printf( - "NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlaceRows, g_args.reqPerReq); - printf( - " interlace rows value will be set to " - "num_of_records_per_req %u\n\n", - g_args.reqPerReq); - prompt(); - g_args.interlaceRows = g_args.reqPerReq; - } - - cJSON *dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - errorPrint( - "failed to read json, databases size overflow, max database is " - "%d\n", - MAX_DB_COUNT); - goto PARSE_OVER; - } - g_Dbs.db = calloc(1, sizeof(SDataBase) * dbSize); - assert(g_Dbs.db); - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON *dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - errorPrint("%s", "failed to read json, dbinfo not found\n"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || - dbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, db name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { - g_Dbs.db[i].drop = true; - } else { - g_Dbs.db[i].drop = false; - } - } else if (!drop) { - g_Dbs.db[i].drop = g_args.drop_database; - } else { - errorPrint("%s", "failed to read json, drop input mistake\n"); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String && - precision->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - SMALL_BUFF_LEN); - } else if (!precision) { - memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); - } else { - errorPrint("%s", "failed to read json, precision not found\n"); - goto PARSE_OVER; - } - - cJSON *update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = (int)update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; - } else { - errorPrint("%s", "failed to read json, update not found\n"); - goto PARSE_OVER; - } - - cJSON *replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = (int)replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - errorPrint("%s", "failed to read json, replica not found\n"); - goto PARSE_OVER; - } - - cJSON *keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = (int)keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - errorPrint("%s", "failed to read json, keep not found\n"); - goto PARSE_OVER; - } - - cJSON *days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = (int)days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - errorPrint("%s", "failed to read json, days not found\n"); - goto PARSE_OVER; - } - - cJSON *cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = (int)cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - errorPrint("%s", "failed to read json, cache not found\n"); - goto PARSE_OVER; - } - - cJSON *blocks = cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = (int)blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; - } else { - errorPrint("%s", "failed to read json, block not found\n"); - goto PARSE_OVER; - } - - // cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, - // "maxtablesPerVnode"); if (maxtablesPerVnode && - // maxtablesPerVnode->type - // == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} - - cJSON *minRows = cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = (uint32_t)minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, minRows not found\n"); - goto PARSE_OVER; - } - - cJSON *maxRows = cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = (uint32_t)maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, maxRows not found\n"); - goto PARSE_OVER; - } - - cJSON *comp = cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = (int)comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; - } else { - errorPrint("%s", "failed to read json, comp not found\n"); - goto PARSE_OVER; - } - - cJSON *walLevel = cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = (int)walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; - } else { - errorPrint("%s", "failed to read json, walLevel not found\n"); - goto PARSE_OVER; - } - - cJSON *cacheLast = cJSON_GetObjectItem(dbinfo, "cachelast"); - if (cacheLast && cacheLast->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cacheLast = (int)cacheLast->valueint; - } else if (!cacheLast) { - g_Dbs.db[i].dbCfg.cacheLast = -1; - } else { - errorPrint("%s", "failed to read json, cacheLast not found\n"); - goto PARSE_OVER; - } - - cJSON *quorum = cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = (int)quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = 1; - } else { - errorPrint("%s", "failed to read json, quorum input mistake"); - goto PARSE_OVER; - } - - cJSON *fsync = cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = (int)fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - errorPrint("%s", "failed to read json, fsync input mistake\n"); - goto PARSE_OVER; - } - - // super_tables - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super_tables not found\n"); - goto PARSE_OVER; - } - - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - errorPrint( - "failed to read json, supertable size overflow, max supertable " - "is %d\n", - MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls = calloc(1, stbSize * sizeof(SSuperTable)); - assert(g_Dbs.db[i].superTbls); - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON *stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; - - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String || - stbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, stb name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, - TSDB_TABLE_NAME_LEN); - - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || - prefix->valuestring == NULL) { - errorPrint( - "%s", "failed to read json, childtable_prefix not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, - prefix->valuestring, TBNAME_PREFIX_LEN); - - cJSON *escapeChar = - cJSON_GetObjectItem(stbInfo, "escape_character"); - if (escapeChar && escapeChar->type == cJSON_String && - escapeChar->valuestring != NULL) { - if ((0 == strncasecmp(escapeChar->valuestring, "yes", 3))) { - g_Dbs.db[i].superTbls[j].escapeChar = true; - } else if (0 == strncasecmp(escapeChar->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } else { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } - } else if (!escapeChar) { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } else { - errorPrint("%s", - "failed to read json, escape_character not found\n"); - goto PARSE_OVER; - } - - cJSON *autoCreateTbl = - cJSON_GetObjectItem(stbInfo, "auto_create_table"); - if (autoCreateTbl && autoCreateTbl->type == cJSON_String && - autoCreateTbl->valuestring != NULL) { - if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) && - (TBL_ALREADY_EXISTS != - g_Dbs.db[i].superTbls[j].childTblExists)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = - AUTO_CREATE_SUBTBL; - } else if (0 == - strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = - PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = - PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - errorPrint( - "%s", "failed to read json, auto_create_table not found\n"); - goto PARSE_OVER; - } - - cJSON *batchCreateTbl = - cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = - batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = - DEFAULT_CREATE_BATCH; - } else { - errorPrint( - "%s", - "failed to read json, batch_create_tbl_num not found\n"); - goto PARSE_OVER; - } - - cJSON *childTblExists = - cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists && childTblExists->type == cJSON_String && - childTblExists->valuestring != NULL) { - if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) && - (g_Dbs.db[i].drop == false)) { - g_Dbs.db[i].superTbls[j].childTblExists = - TBL_ALREADY_EXISTS; - } else if ((0 == strncasecmp(childTblExists->valuestring, "no", - 2) || - (g_Dbs.db[i].drop == true))) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - errorPrint( - "%s", - "failed to read json, child_table_exists not found\n"); - goto PARSE_OVER; - } - - if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - - cJSON *count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint( - "%s", - "failed to read json, childtable_count input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String && - dataSource->valuestring != NULL) { - tstrncpy( - g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, - min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1)); - } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", - min(SMALL_BUFF_LEN, strlen("rand") + 1)); - } else { - errorPrint("%s", - "failed to read json, data_source not found\n"); - goto PARSE_OVER; - } - - cJSON *stbIface = cJSON_GetObjectItem( - stbInfo, "insert_mode"); // taosc , rest, stmt - if (stbIface && stbIface->type == cJSON_String && - stbIface->valuestring != NULL) { - if (0 == strcasecmp(stbIface->valuestring, "taosc")) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { - g_Dbs.db[i].superTbls[j].iface = REST_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { - g_Dbs.db[i].superTbls[j].iface = STMT_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "sml")) { - g_Dbs.db[i].superTbls[j].iface = SML_IFACE; - g_args.iface = SML_IFACE; - } else { - errorPrint( - "failed to read json, insert_mode %s not recognized\n", - stbIface->valuestring); - goto PARSE_OVER; - } - } else if (!stbIface) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else { - errorPrint("%s", - "failed to read json, insert_mode not found\n"); - goto PARSE_OVER; - } - - cJSON *stbLineProtocol = - cJSON_GetObjectItem(stbInfo, "line_protocol"); - if (stbLineProtocol && stbLineProtocol->type == cJSON_String && - stbLineProtocol->valuestring != NULL) { - if (0 == strcasecmp(stbLineProtocol->valuestring, "line")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_LINE_PROTOCOL; - } else if (0 == - strcasecmp(stbLineProtocol->valuestring, "telnet")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_TELNET_PROTOCOL; - } else if (0 == - strcasecmp(stbLineProtocol->valuestring, "json")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_JSON_PROTOCOL; - } else { - errorPrint( - "failed to read json, line_protocol %s not " - "recognized\n", - stbLineProtocol->valuestring); - goto PARSE_OVER; - } - } else if (!stbLineProtocol) { - g_Dbs.db[i].superTbls[j].lineProtocol = TSDB_SML_LINE_PROTOCOL; - } else { - errorPrint("%s", - "failed to read json, line_protocol not found\n"); - goto PARSE_OVER; - } - - cJSON *childTbl_limit = - cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true) && - (g_Dbs.db[i].superTbls[j].childTblExists == - TBL_ALREADY_EXISTS)) { - if (childTbl_limit->type != cJSON_Number) { - errorPrint("%s", "failed to read json, childtable_limit\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblLimit = - childTbl_limit->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblLimit = - -1; // select ... limit -1 means all query result, drop = - // yes mean all table need recreate, limit value is - // invalid. - } - - cJSON *childTbl_offset = - cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true) && - (g_Dbs.db[i].superTbls[j].childTblExists == - TBL_ALREADY_EXISTS)) { - if ((childTbl_offset->type != cJSON_Number) || - (0 > childTbl_offset->valueint)) { - errorPrint("%s", - "failed to read json, childtable_offset\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblOffset = - childTbl_offset->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblOffset = 0; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, TSDB_DB_NAME_LEN); - } else if (!ts) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", - TSDB_DB_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, start_timestamp not found\n"); - goto PARSE_OVER; - } - - cJSON *timestampStep = - cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = - timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; - } else { - errorPrint("%s", - "failed to read json, timestamp_step not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type == cJSON_String && - sampleFormat->valuestring != NULL) { - tstrncpy( - g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, - min(SMALL_BUFF_LEN, strlen(sampleFormat->valuestring) + 1)); - } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", - SMALL_BUFF_LEN); - } else { - errorPrint("%s", - "failed to read json, sample_format not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && - sampleFile->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, - sampleFile->valuestring, - min(MAX_FILE_NAME_LEN, - strlen(sampleFile->valuestring) + 1)); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, sample_file not found\n"); - goto PARSE_OVER; - } - - cJSON *useSampleTs = cJSON_GetObjectItem(stbInfo, "use_sample_ts"); - if (useSampleTs && useSampleTs->type == cJSON_String && - useSampleTs->valuestring != NULL) { - if (0 == strncasecmp(useSampleTs->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].useSampleTs = true; - } else if (0 == - strncasecmp(useSampleTs->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } - } else if (!useSampleTs) { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - errorPrint("%s", - "failed to read json, use_sample_ts not found\n"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if ((tagsFile && tagsFile->type == cJSON_String) && - (tagsFile->valuestring != NULL)) { - tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, - tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; - } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - errorPrint("%s", "failed to read json, tags_file not found\n"); - goto PARSE_OVER; - } - - cJSON *stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { - int32_t len = (int32_t)stbMaxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < 5) { - len = 5; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; - } else { - errorPrint("%s", - "failed to read json, stbMaxSqlLen input mistake\n"); - goto PARSE_OVER; - } - /* - cJSON *multiThreadWriteOneTbl = - cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no - , yes if (multiThreadWriteOneTbl - && multiThreadWriteOneTbl->type == cJSON_String - && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", - 3)) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; } else - { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not - found\n"); goto PARSE_OVER; - } - */ - cJSON *insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - if (insertRows->valueint < 0) { - errorPrint( - "%s", - "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - errorPrint("%s", - "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *stbInterlaceRows = - cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { - if (stbInterlaceRows->valueint < 0) { - errorPrint( - "%s", - "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].interlaceRows = - (uint32_t)stbInterlaceRows->valueint; - - if (g_Dbs.db[i].superTbls[j].interlaceRows > - g_Dbs.db[i].superTbls[j].insertRows) { - printf( - "NOTICE: db[%d].superTbl[%d]'s interlace rows value %u " - "> insert_rows %" PRId64 "\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, - g_Dbs.db[i].superTbls[j].insertRows); - printf( - " interlace rows value will be set to " - "insert_rows %" PRId64 "\n\n", - g_Dbs.db[i].superTbls[j].insertRows); - prompt(); - g_Dbs.db[i].superTbls[j].interlaceRows = - (uint32_t)g_Dbs.db[i].superTbls[j].insertRows; - } - } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = - g_args.interlaceRows; // 0 means progressive mode, > 0 mean - // interlace mode. max value is less - // or equ num_of_records_per_req - } else { - errorPrint( - "%s", - "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *disorderRatio = - cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - if (disorderRatio->valueint > 50) disorderRatio->valueint = 50; - - if (disorderRatio->valueint < 0) disorderRatio->valueint = 0; - - g_Dbs.db[i].superTbls[j].disorderRatio = - (int)disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - errorPrint("%s", - "failed to read json, disorderRatio not found\n"); - goto PARSE_OVER; - } - - cJSON *disorderRange = - cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = - (int)disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = DEFAULT_DISORDER_RANGE; - } else { - errorPrint("%s", - "failed to read json, disorderRange not found\n"); - goto PARSE_OVER; - } - - cJSON *insertInterval = - cJSON_GetObjectItem(stbInfo, "insert_interval"); - if (insertInterval && insertInterval->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertInterval = - insertInterval->valueint; - if (insertInterval->valueint < 0) { - errorPrint( - "%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - } else if (!insertInterval) { - verbosePrint( - "%s() LN%d: stable insert interval be overrode by global " - "%" PRIu64 ".\n", - __func__, __LINE__, g_args.insert_interval); - g_Dbs.db[i].superTbls[j].insertInterval = - g_args.insert_interval; - } else { - errorPrint( - "%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - if (getColumnAndTagTypeFromInsertJsonFile( - stbInfo, &g_Dbs.db[i].superTbls[j])) { - goto PARSE_OVER; - } - } - } - - code = 0; - -PARSE_OVER: - return code; -} -int getMetaFromQueryJsonFile(cJSON *root) { - int32_t code = -1; - - cJSON *cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON *host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_queryInfo.host, DEFAULT_HOST, MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON *port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = (uint16_t)port->valueint; - } else if (!port) { - g_queryInfo.port = DEFAULT_PORT; - } - - cJSON *user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - ; - } - - cJSON *password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && - password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, - SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, - SHELL_MAX_PASSWORD_LEN); - ; - } - - cJSON *answerPrompt = - cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String && - answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = false; - } else { - errorPrint("%s", - "failed to read json, confirm_parameter_prompt not found\n"); - goto PARSE_OVER; - } - - cJSON *gQueryTimes = cJSON_GetObjectItem(root, "query_times"); - if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s", - "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - g_args.query_times = gQueryTimes->valueint; - } else if (!gQueryTimes) { - g_args.query_times = DEFAULT_QUERY_TIME; - } else { - errorPrint("%s", "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON *dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); - } else if (!dbs) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON *queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && - queryMode->valuestring != NULL) { - tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, - min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1)); - } else if (!queryMode) { - tstrncpy(g_queryInfo.queryMode, "taosc", - min(SMALL_BUFF_LEN, strlen("taosc") + 1)); - } else { - errorPrint("%s", "failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // specified_table_query - cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, super_table_query not found\n"); - goto PARSE_OVER; - } else { - cJSON *queryInterval = - cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (queryInterval && queryInterval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.queryInterval = - queryInterval->valueint; - } else if (!queryInterval) { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - cJSON *specifiedQueryTimes = - cJSON_GetObjectItem(specifiedQuery, "query_times"); - if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint("failed to read json, query_times: %" PRId64 - ", need be a valid (>0) number\n", - specifiedQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.queryTimes = - specifiedQueryTimes->valueint; - } else if (!specifiedQueryTimes) { - g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint( - "%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - if (concurrent->valueint <= 0) { - errorPrint( - "query sqlCount %d or concurrent %d is not correct.\n", - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.concurrent = - (uint32_t)concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - } - - cJSON *specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String && - specifiedAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", - "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON *interval = cJSON_GetObjectItem(specifiedQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = - interval->valueint; - } else if (!interval) { - // printf("failed to read json, subscribe interval no found\n"); - // goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.subscribeInterval = - DEFAULT_SUB_INTERVAL; - } - - cJSON *restart = cJSON_GetObjectItem(specifiedQuery, "restart"); - if (restart && restart->type == cJSON_String && - restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", - "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - cJSON *keepProgress = - cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (keepProgress && keepProgress->type == cJSON_String && - keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint( - "%s", - "failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON *specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (!specifiedSqls) { - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(specifiedSqls); - if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent > - MAX_QUERY_SQL_COUNT) { - errorPrint( - "failed to read json, query sql(%d) * concurrent(%d) " - "overflow, max is %d\n", - superSqlSize, g_queryInfo.specifiedQueryInfo.concurrent, - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON *sql = cJSON_GetArrayItem(specifiedSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || - sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, BUFFER_SIZE); - - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - cJSON *endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = - (int)endAfterConsume->valueint; - } - if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - cJSON *resubAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if ((resubAfterConsume) && - (resubAfterConsume->type == cJSON_Number) && - (resubAfterConsume->valueint >= 0)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = - (int)resubAfterConsume->valueint; - } - - if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if ((NULL != result) && (result->type == cJSON_String) && - (result->valuestring != NULL)) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, super query result file " - "not found\n"); - goto PARSE_OVER; - } - } - } - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 1; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, sub_table_query not found\n"); - code = 0; - goto PARSE_OVER; - } else { - cJSON *subrate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else if (!subrate) { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - cJSON *superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("failed to read json, query_times: %" PRId64 - ", need be a valid (>0) number\n", - superQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else if (!superQueryTimes) { - g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s", - "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON *threads = cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - if (threads->valueint <= 0) { - errorPrint("%s", - "failed to read json, threads input mistake\n"); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.threadCnt = (uint32_t)threads->valueint; - } else if (!threads) { - g_queryInfo.superQueryInfo.threadCnt = DEFAULT_NTHREADS; - } - - // cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, - // "childtable_count"); if (subTblCnt && subTblCnt->type == - // cJSON_Number) - // { - // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.superQueryInfo.childTblCount = 0; - //} - - cJSON *stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && - stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, - TSDB_TABLE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, super table name input error\n"); - goto PARSE_OVER; - } - - cJSON *superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == cJSON_String && - superAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", - "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON *superInterval = cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == cJSON_Number) { - if (superInterval->valueint < 0) { - errorPrint("%s", - "failed to read json, interval input mistake\n"); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.subscribeInterval = - superInterval->valueint; - } else if (!superInterval) { - // printf("failed to read json, subscribe interval no found\n"); - // goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = - DEFAULT_QUERY_INTERVAL; - } - - cJSON *subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && - subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", - "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - - cJSON *superkeepProgress = - cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && superkeepProgress->type == cJSON_String && - superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint("%s", - "failed to read json, subscribe super table " - "keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - cJSON *superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume && - superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - (int)superEndAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.endAfterConsume < -1) - g_queryInfo.superQueryInfo.endAfterConsume = -1; - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - cJSON *superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) && - (superResubAfterConsume->type == cJSON_Number) && - (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - (int)superResubAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - - // supert table sqls - cJSON *superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint( - "failed to read json, query sql size overflow, max is %d\n", - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON *sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || - sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - BUFFER_SIZE); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && - result->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, sub query result file not " - "found\n"); - goto PARSE_OVER; - } - } - } - } - - code = 0; - -PARSE_OVER: - return code; -} - -int getInfoFromJsonFile(char *file) { - debugPrint("%s %d %s\n", __func__, __LINE__, file); - int32_t code = -1; - FILE * fp = fopen(file, "r"); - if (!fp) { - errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); - return code; - } - - int maxLen = MAX_JSON_BUFF; - char *content = calloc(1, maxLen + 1); - int len = (int)fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - errorPrint("failed to read %s, content is null", file); - return code; - } - - content[len] = 0; - cJSON *root = cJSON_Parse(content); - if (root == NULL) { - errorPrint("failed to cjson parse %s, invalid json format\n", file); - goto PARSE_OVER; - } - - cJSON *filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && - filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_args.test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_args.test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_args.test_mode = SUBSCRIBE_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else if (!filetype) { - g_args.test_mode = INSERT_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_TEST == g_args.test_mode) { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.use_metric = g_args.use_metric; - code = getMetaFromInsertJsonFile(root); - } else if ((QUERY_TEST == g_args.test_mode) || - (SUBSCRIBE_TEST == g_args.test_mode)) { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - code = getMetaFromQueryJsonFile(root); - } else { - errorPrint("%s", - "input json file type error! please input correct file " - "type: insert or query or subscribe\n"); - goto PARSE_OVER; - } -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return code; -} - -int testMetaFile() { - if (INSERT_TEST == g_args.test_mode) { - if (g_Dbs.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - } - return insertTestProcess(); - - } else if (QUERY_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - } - return queryTestProcess(); - - } else if (SUBSCRIBE_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - } - return subscribeTestProcess(); - } else { - errorPrint("unsupport test mode (%d)\n", g_args.test_mode); - return -1; - } - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoMain.c b/src/kit/taosdemo/src/demoMain.c deleted file mode 100644 index 4940d7188c999f03da0d021327c58d8a7c1a1b9f..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoMain.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" -int64_t g_totalChildTables = DEFAULT_CHILDTABLES; -int64_t g_actualChildTables = 0; -FILE * g_fpOfInsertResult = NULL; -char * g_dupstr = NULL; -SDbs g_Dbs; -SQueryMetaInfo g_queryInfo; - -SArguments g_args = { - DEFAULT_METAFILE, // metaFile - DEFAULT_TEST_MODE, // test_mode - DEFAULT_HOST, // host - DEFAULT_PORT, // port - DEFAULT_IFACE, // iface - TSDB_DEFAULT_USER, // user - TSDB_DEFAULT_PASS, // password - DEFAULT_DATABASE, // database - DEFAULT_REPLICA, // replica - DEFAULT_TB_PREFIX, // tb_prefix - DEFAULT_ESCAPE_CHAR, // escapeChar - DEFAULT_SQLFILE, // sqlFile - DEFAULT_USE_METRIC, // use_metric - DEFAULT_DROP_DB, // drop_database - DEFAULT_AGGR_FUNC, // aggr_func - DEFAULT_DEBUG, // debug_print - DEFAULT_VERBOSE, // verbose_print - DEFAULT_PERF_STAT, // performance statistic print - DEFAULT_ANS_YES, // answer_yes; - DEFAULT_OUTPUT, // output_file - DEFAULT_SYNC_MODE, // mode : sync or async - DEFAULT_DATA_TYPE, // data_type - DEFAULT_DATATYPE, // dataType - DEFAULT_BINWIDTH, // binwidth - DEFAULT_COL_COUNT, // columnCount, timestamp + float + int + float - DEFAULT_LEN_ONE_ROW, // lenOfOneRow - DEFAULT_NTHREADS, // nthreads - DEFAULT_INSERT_INTERVAL, // insert_interval - DEFAULT_TIMESTAMP_STEP, // timestamp_step - DEFAULT_QUERY_TIME, // query_times - DEFAULT_PREPARED_RAND, // prepared_rand - DEFAULT_INTERLACE_ROWS, // interlaceRows; - DEFAULT_REQ_PER_REQ, // reqPerReq - TSDB_MAX_ALLOWED_SQL_LEN, // max_sql_len - DEFAULT_CHILDTABLES, // ntables - DEFAULT_INSERT_ROWS, // insertRows - DEFAULT_ABORT, // abort - DEFAULT_RATIO, // disorderRatio - DEFAULT_DISORDER_RANGE, // disorderRange - DEFAULT_METHOD_DEL, // method_of_delete - DEFAULT_TOTAL_INSERT, // totalInsertRows; - DEFAULT_TOTAL_AFFECT, // totalAffectedRows; - DEFAULT_DEMO_MODE, // demo_mode; -}; - -int main(int argc, char *argv[]) { - if (parse_args(argc, argv)) { - exit(EXIT_FAILURE); - } - debugPrint("meta file: %s\n", g_args.metaFile); - - if (g_args.metaFile) { - g_totalChildTables = 0; - if (getInfoFromJsonFile(g_args.metaFile)) { - exit(EXIT_FAILURE); - } - if (testMetaFile()) { - exit(EXIT_FAILURE); - } - } else { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.db = calloc(1, sizeof(SDataBase)); - if (NULL == g_Dbs.db) { - errorPrint("%s", "failed to allocate memory\n"); - } - - g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable)); - if (NULL == g_Dbs.db[0].superTbls) { - errorPrint("%s", "failed to allocate memory\n"); - } - - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS *qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - g_Dbs.db[0].dbName, g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); - } else { - testCmdLine(); - } - } - postFreeResource(); - - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoOutput.c b/src/kit/taosdemo/src/demoOutput.c deleted file mode 100644 index 026673ca86edb67d752f3cee58de8ea5a6769247..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoOutput.c +++ /dev/null @@ -1,1052 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -#define SHOW_PARSE_RESULT_START() \ - do { \ - if (g_args.metaFile) \ - printf( \ - "\033[1m\033[40;32m================ %s parse result START " \ - "================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_END() \ - do { \ - if (g_args.metaFile) \ - printf( \ - "\033[1m\033[40;32m================ %s parse result " \ - "END================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \ - do { \ - if (g_args.metaFile) \ - fprintf(fp, \ - "\033[1m\033[40;32m================ %s parse result " \ - "START ================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \ - do { \ - if (g_args.metaFile) \ - fprintf(fp, \ - "\033[1m\033[40;32m================ %s parse result " \ - "END================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -int getDbFromServer(TAOS *taos, SDbInfo **dbInfos) { - TAOS_RES *res; - TAOS_ROW row = NULL; - int count = 0; - - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); - - if (code != 0) { - errorPrint("failed to run , reason: %s\n", - taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while ((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { - continue; - } - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - errorPrint("failed to allocate memory for some dbInfo[%d]\n", - count); - return -1; - } - - tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - formatTimestamp(dbInfos[count]->create_time, - *(int64_t *)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], - TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = - *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - errorPrint("The database count overflow than %d\n", - MAX_DATABASE_COUNT); - break; - } - } - - return count; -} - -void xDumpFieldToFile(FILE *fp, const char *val, TAOS_FIELD *field, - int32_t length, int precision) { - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((int8_t *)val))) == 1) ? 1 : 0)); - break; - - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - fprintf(fp, "%d", *((uint8_t *)val)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - fprintf(fp, "%d", *((uint16_t *)val)); - break; - - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - - case TSDB_DATA_TYPE_UINT: - fprintf(fp, "%d", *((uint32_t *)val)); - break; - - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64 "", *((int64_t *)val)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - fprintf(fp, "%" PRId64 "", *((uint64_t *)val)); - break; - - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - formatTimestamp(buf, *(int64_t *)val, precision); - fprintf(fp, "'%s'", buf); - break; - - default: - break; - } -} - -int xDumpResultToFile(const char *fname, TAOS_RES *tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE *fp = fopen(fname, "at"); - if (fp == NULL) { - errorPrint("failed to open file: %s\n", fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); - - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); - } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - - int numOfRows = 0; - do { - int32_t *length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, (const char *)row[i], fields + i, length[i], - precision); - } - fputc('\n', fp); - - numOfRows++; - row = taos_fetch_row(tres); - } while (row != NULL); - - fclose(fp); - - return numOfRows; -} - -#ifndef TAOSDEMO_COMMIT_SHA1 -#define TAOSDEMO_COMMIT_SHA1 "unknown" -#endif - -#ifndef TD_VERNUMBER -#define TD_VERNUMBER "unknown" -#endif - -#ifndef TAOSDEMO_STATUS -#define TAOSDEMO_STATUS "unknown" -#endif - -void printVersion() { - char tdengine_ver[] = TD_VERNUMBER; - char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1; - char taosdemo_status[] = TAOSDEMO_STATUS; - - if (strlen(taosdemo_status) == 0) { - printf("taosdemo version %s-%s\n", tdengine_ver, taosdemo_ver); - } else { - printf("taosdemo version %s-%s, status:%s\n", tdengine_ver, - taosdemo_ver, taosdemo_status); - } - exit(EXIT_SUCCESS); -} - -void printHelp() { - char indent[10] = " "; - printf("%s\n\n", "Usage: taosdemo [OPTION...]"); - printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", - "The meta file to the execution procedure."); - printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", - "The user name to use when connecting to the server."); -#ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'powerdb'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/power/'."); -#elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'tqueue'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/tq/'."); -#elif (_TD_PRO_ == true) - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'prodb'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/ProDB/'."); -#else - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server."); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory."); -#endif - printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t", - "TDengine server FQDN to connect. The default host is localhost."); - printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", - "The TCP/IP port number to use for the connection."); - printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", - "The interface (taosc, rest, stmt, and sml(line protocol)) taosdemo " - "uses. By default " - "use 'taosc'."); - printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", - "Destination database. By default is 'test'."); - printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", - "Set the replica parameters of the database, By default use 1, min: " - "1, max: 3."); - printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", - "Table prefix name. By default use 'd'."); - printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t", - "Use escape character for Both Stable and normmal table name"); - printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", - "The select sql file."); - printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", - "Use normal table flag."); - printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", - "Direct output to the named file. By default use './output.txt'."); - printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", - "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); - printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", - "The data_type of columns, By default use: FLOAT,INT,FLOAT. NCHAR " - "and BINARY can also use custom length. Eg: NCHAR(16),BINARY(8)"); - printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", - "The width of data_type 'BINARY' or 'NCHAR'. By default use ", - g_args.binwidth); - printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", - "The number of columns per record. Demo mode by default is ", - DEFAULT_DATATYPE_NUM, " (float, int, float). Max values is ", - MAX_NUM_COLUMNS); - printf("%s%s%s%s\n", indent, indent, indent, - "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify " - "column type, -l will be ignored."); - printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", - "The number of threads. By default use ", DEFAULT_NTHREADS); - printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", - "The sleep time (ms) between insertion. By default is 0."); - printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", - "The timestamp step between insertion. By default is ", - DEFAULT_TIMESTAMP_STEP); - printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", - "The interlace rows of insertion. By default is ", - DEFAULT_INTERLACE_ROWS); - printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", - "The number of records per request. By default is 30000."); - printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", - "The number of tables. By default is 10000."); - printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", - "The number of records per table. By default is 10000."); - printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", - "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); - printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", - "Test aggregation functions after insertion."); - printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", - "Input yes for prompt."); - printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", - "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default " - "is in order."); - printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", - "Out of order data's range. Unit is ms. By default is 1000."); - printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", "Print debug info."); - printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", "Give this help list"); - printf("%s%s%s%s\n", indent, " --usage\t", "\t\t", - "Give a short usage message"); - printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version."); - /* printf("%s%s%s%s\n", indent, "-D", indent, - "Delete database if exists. 0: no, 1: yes, default is 1"); - */ - printf( - "\nMandatory or optional arguments to long options are also mandatory or optional\n\ -for any corresponding short options.\n\ -\n\ -Report bugs to .\n"); - exit(EXIT_SUCCESS); -} - -void printfInsertMeta() { - setupForAnsiEscape(); - SHOW_PARSE_RESULT_START(); - - if (g_args.demo_mode) { - printf( - "\ntaosdemo is simulating data generated by power equipment " - "monitoring...\n\n"); - } else { - printf("\ntaosdemo is simulating random data as you request..\n\n"); - } - - if (g_args.iface != INTERFACE_BUT) { - // first time if no iface specified - printf("interface: \033[33m%s\033[0m\n", - (g_args.iface == TAOSC_IFACE) ? "taosc" - : (g_args.iface == REST_IFACE) ? "rest" - : (g_args.iface == STMT_IFACE) ? "stmt" - : "sml"); - } - - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, - g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("configDir: \033[33m%s\033[0m\n", configDir); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", - g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", - g_Dbs.threadCountForCreateTbl); - printf("top insert interval: \033[33m%" PRIu64 "\033[0m\n", - g_args.insert_interval); - printf("number of records per req: \033[33m%u\033[0m\n", g_args.reqPerReq); - printf("max sql length: \033[33m%" PRIu64 "\033[0m\n", - g_args.max_sql_len); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database[%d] name: \033[33m%s\033[0m\n", i, - g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33m no\033[0m\n"); - } else { - printf(" drop: \033[33m yes\033[0m\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { - printf(" precision: \033[33m%s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } else { - printf("\033[1m\033[40;31m precision error: %s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - if (g_args.use_metric) { - printf(" super table count: \033[33m%" PRIu64 "\033[0m\n", - g_Dbs.db[i].superTblCount); - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%" PRIu64 "\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", - "no"); - } else if (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", - "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", - "error"); - } - - printf(" childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].dataSource); - printf(" iface: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].iface == TAOSC_IFACE) ? "taosc" - : (g_Dbs.db[i].superTbls[j].iface == REST_IFACE) ? "rest" - : (g_Dbs.db[i].superTbls[j].iface == STMT_IFACE) - ? "stmt" - : "sml"); - if (g_Dbs.db[i].superTbls[j].iface == SML_IFACE) { - printf(" lineProtocol: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].lineProtocol == - TSDB_SML_LINE_PROTOCOL) - ? "line" - : (g_Dbs.db[i].superTbls[j].lineProtocol == - TSDB_SML_TELNET_PROTOCOL) - ? "telnet" - : "json"); - } - - if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%" PRId64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblLimit); - } - if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { - printf(" childTblOffset: \033[33m%" PRIu64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblOffset); - } - printf(" insertRows: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].insertRows); - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); - } - */ - printf(" interlaceRows: \033[33m%u\033[0m\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%" PRIu64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - - printf(" disorderRange: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%" PRIu64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - printf(" timeStampStep: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFile); - printf(" useSampleTs: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].useSampleTs - ? "yes (warning: disorderRange/disorderRatio is " - "disabled)" - : "no"); - printf(" tagsFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].columns[k].dataType, - // g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", 6)) || - (0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", 5))) { - printf("column[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - printf("column[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - printf("\n"); - - printf(" tagCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].tags[k].dataType, - // g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == - strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) || - (0 == - strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - printf("\n"); - } - } else { - printf(" childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_args.ntables); - printf(" insertRows: \033[33m%" PRId64 "\033[0m\n", - g_args.insertRows); - } - printf("\n"); - } - - SHOW_PARSE_RESULT_END(); - resetAfterAnsiEscape(); -} - -void printfInsertMetaToFile(FILE *fp) { - SHOW_PARSE_RESULT_START_TO_FILE(fp); - - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "configDir: %s\n", configDir); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", - g_Dbs.threadCountForCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); - fprintf(fp, "max sql length: %" PRIu64 "\n", - g_args.max_sql_len); - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - } else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", - g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", - g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %" PRIu64 "\n", - g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", - g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " iface: %s\n", - (g_Dbs.db[i].superTbls[j].iface == TAOSC_IFACE) ? "taosc" - : (g_Dbs.db[i].superTbls[j].iface == REST_IFACE) ? "rest" - : (g_Dbs.db[i].superTbls[j].iface == STMT_IFACE) ? "stmt" - : "sml"); - fprintf(fp, " insertRows: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %" PRIu64 "\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - */ - fprintf(fp, " interlaceRows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - fprintf(fp, " disorderRange: %d\n", - g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %" PRIu64 "\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", - g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", - g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].columns[k].dataType, - // g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == - strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", strlen("binary"))) || - (0 == - strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "column[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); - - fprintf(fp, " tagCount: %d\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].tags[k].dataType, - // g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) || - (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "tag[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - fprintf(fp, "\n"); - } - fprintf(fp, "\n"); - } - - SHOW_PARSE_RESULT_END_TO_FILE(fp); -} - -void printfQueryMeta() { - setupForAnsiEscape(); - SHOW_PARSE_RESULT_START(); - - printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, - g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - - if ((SUBSCRIBE_TEST == g_args.test_mode) || - (QUERY_TEST == g_args.test_mode)) { - printf("specified table query info: \n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { - printf("specified tbl query times:\n"); - printf(" \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryTimes); - printf("query interval: \033[33m%" PRIu64 " ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%" PRIu64 "\033[0m\n", - g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); - printf( - "mod: \033[33m%s\033[0m\n", - (g_queryInfo.specifiedQueryInfo.asyncMode) ? "async" : "sync"); - printf("interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, - g_queryInfo.specifiedQueryInfo.sql[i]); - } - printf("\n"); - } - - printf("super table query info:\n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.sqlCount); - - if (g_queryInfo.superQueryInfo.sqlCount > 0) { - printf("query interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); - printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.stbName); - printf("stb query times:\033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); - - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.superQueryInfo.asyncMode) ? "async" : "sync"); - printf("interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, - g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - } - } - - SHOW_PARSE_RESULT_END(); -} - -void printfDbInfoForQueryToFile(char *filename, SDbInfo *dbInfos, int index) { - if (filename[0] == 0) return; - - FILE *fp = fopen(filename, "at"); - if (fp == NULL) { - errorPrint("failed to open file: %s\n", filename); - return; - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %" PRId64 "\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); -} - -void printfQuerySystemInfo(TAOS *taos) { - char filename[MAX_FILE_NAME_LEN] = {0}; - char buffer[SQL_BUFF_LEN] = {0}; - TAOS_RES *res; - - time_t t; - struct tm *lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", - lt->tm_year + 1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, - lt->tm_min, lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - // fetchResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - // fetchResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo **dbInfos = - (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) { - tmfree(dbInfos); - return; - } - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, SQL_BUFF_LEN, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - // show db.stables - snprintf(buffer, SQL_BUFF_LEN, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - free(dbInfos[i]); - } - - free(dbInfos); - resetAfterAnsiEscape(); -} - -void printStatPerThread(threadInfo *pThreadInfo) { - if (0 == pThreadInfo->totalDelay) pThreadInfo->totalDelay = 1; - - fprintf(stderr, - "====thread[%d] completed total inserted rows: %" PRIu64 - ", total affected rows: %" PRIu64 ". %.2f records/second====\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (double)(pThreadInfo->totalAffectedRows / - ((double)pThreadInfo->totalDelay / 1000000.0))); -} - -void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) { - pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); - if (pThreadInfo->fp == NULL) { - errorPrint( - "%s() LN%d, failed to open result file: %s, result will not save " - "to file\n", - __func__, __LINE__, pThreadInfo->filePath); - return; - } - - fprintf(pThreadInfo->fp, "%s", resultBuf); - tmfclose(pThreadInfo->fp); - pThreadInfo->fp = NULL; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoQuery.c b/src/kit/taosdemo/src/demoQuery.c deleted file mode 100644 index d8e8438fa7177db993c6da1cc5ac5cad98ef3010..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoQuery.c +++ /dev/null @@ -1,446 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void selectAndGetResult(threadInfo *pThreadInfo, char *command) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint("failed to execute sql:%s, reason:%s\n", command, - taos_errstr(res)); - taos_free_result(res); - return; - } - - fetchResult(res, pThreadInfo); - taos_free_result(res); - - } else if (0 == - strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int retCode = postProceSql(g_queryInfo.host, g_queryInfo.port, command, - pThreadInfo); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } - - } else { - errorPrint("unknown query mode: %s\n", g_queryInfo.queryMode); - } -} - -void *specifiedTableQuery(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - - setThreadName("specTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } else { - pThreadInfo->taos = taos; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); - return NULL; - } - - uint64_t st = 0; - uint64_t et = 0; - - uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; - - uint64_t totalQueried = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != - '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - while (queryTimes--) { - if (g_queryInfo.specifiedQueryInfo.queryInterval && - (et - st) < (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { - taosMsleep((int32_t)(g_queryInfo.specifiedQueryInfo.queryInterval - - (et - st))); // ms - } - - st = taosGetTimestampMs(); - - selectAndGetResult( - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - - et = taosGetTimestampMs(); - printf("=thread[%" PRId64 "] use %s complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), g_queryInfo.queryMode, - (et - st) / 1000.0); - - totalQueried++; - g_queryInfo.specifiedQueryInfo.totalQueried++; - - uint64_t currentPrintTime = taosGetTimestampMs(); - uint64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - debugPrint("%s() LN%d, endTs=%" PRIu64 " ms, startTs=%" PRIu64 - " ms\n", - __func__, __LINE__, endTs, startTs); - printf("thread[%d] has currently completed queries: %" PRIu64 - ", QPS: %10.6f\n", - pThreadInfo->threadID, totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - lastPrintTime = currentPrintTime; - } - } - return NULL; -} - -void *superTableQuery(void *sarg) { - char *sqlstr = calloc(1, BUFFER_SIZE); - if (NULL == sqlstr) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - threadInfo *pThreadInfo = (threadInfo *)sarg; - - setThreadName("superTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - free(sqlstr); - return NULL; - } else { - pThreadInfo->taos = taos; - } - } - - uint64_t st = 0; - uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; - - uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; - uint64_t totalQueried = 0; - uint64_t startTs = taosGetTimestampMs(); - - uint64_t lastPrintTime = taosGetTimestampMs(); - while (queryTimes--) { - if (g_queryInfo.superQueryInfo.queryInterval && - (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { - taosMsleep((int32_t)(g_queryInfo.superQueryInfo.queryInterval - - (et - st))); // ms - // printf("========sleep duration:%"PRId64 "========inserted - // rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, - // pThreadInfo->start_table_from, pThreadInfo->end_table_to); - } - - st = taosGetTimestampMs(); - for (int i = (int)pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr, 0, BUFFER_SIZE); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, - i); - if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[j], - pThreadInfo->threadID); - } - selectAndGetResult(pThreadInfo, sqlstr); - - totalQueried++; - g_queryInfo.superQueryInfo.totalQueried++; - - int64_t currentPrintTime = taosGetTimestampMs(); - int64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf( - "thread[%d] has currently completed queries: %" PRIu64 - ", QPS: %10.3f\n", - pThreadInfo->threadID, totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - lastPrintTime = currentPrintTime; - } - } - } - et = taosGetTimestampMs(); - printf("####thread[%" PRId64 - "] complete all sqls to allocate all sub-tables[%" PRIu64 - " - %" PRIu64 "] once queries duration:%.4fs\n\n", - taosGetSelfPthreadId(), pThreadInfo->start_table_from, - pThreadInfo->end_table_to, (double)(et - st) / 1000.0); - } - - free(sqlstr); - return NULL; -} - -int queryTestProcess() { - printfQueryMeta(); - - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - prompt(); - - if (g_args.debug_print || g_args.verbose_print) { - printfQuerySystemInfo(taos); - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - if (convertHostToServAddr(g_queryInfo.host, g_queryInfo.port, - &g_queryInfo.serv_addr) != 0) - ERROR_EXIT("convert host to server address"); - } - - pthread_t * pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; - uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - - uint64_t startTs = taosGetTimestampMs(); - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t)); - infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - taos_close(taos); - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - for (uint64_t i = 0; i < nSqlCount; i++) { - for (int j = 0; j < nConcurrent; j++) { - uint64_t seq = i * nConcurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(taos); - free(infos); - free(pids); - errorPrint("use database %s failed!\n\n", - g_queryInfo.dbName); - return -1; - } - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", - WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = connect( - sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, - __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - - pthread_create(pids + seq, NULL, specifiedTableQuery, - pThreadInfo); - } - } - } else { - g_queryInfo.specifiedQueryInfo.concurrent = 0; - } - - taos_close(taos); - - pthread_t * pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.superQueryInfo.sqlCount > 0) && - (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfSub = - calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = calloc( - 1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - uint64_t tableFrom = 0; - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infosOfSub + i; - pThreadInfo->threadID = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = - i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", - WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = - connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, - __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - } else { - g_queryInfo.superQueryInfo.threadCnt = 0; - } - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - for (int i = 0; i < nConcurrent; i++) { - for (int j = 0; j < nSqlCount; j++) { - pthread_join(pids[i * nSqlCount + j], NULL); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infos + i * nSqlCount + j; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - } - } - - tmfree((char *)pids); - tmfree((char *)infos); - - for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infosOfSub + i; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - - tmfree((char *)pidsOfSub); - tmfree((char *)infosOfSub); - - // taos_close(taos);// workaround to use separate taos connection; - uint64_t endTs = taosGetTimestampMs(); - - uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + - g_queryInfo.superQueryInfo.totalQueried; - - fprintf(stderr, - "==== completed total queries: %" PRIu64 - ", the QPS of all threads: %10.3f====\n", - totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoSubscribe.c b/src/kit/taosdemo/src/demoSubscribe.c deleted file mode 100644 index 1386193f4059ad60eb66c08e7078173a99c29da2..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoSubscribe.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void stable_sub_callback(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint( - "%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -void specified_sub_callback(TAOS_SUB *tsub, TAOS_RES *res, void *param, - int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint( - "%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -TAOS_SUB *subscribeImpl(QUERY_CLASS class, threadInfo *pThreadInfo, char *sql, - char *topic, bool restart, uint64_t interval) { - TAOS_SUB *tsub = NULL; - - if ((SPECIFIED_CLASS == class) && - (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, restart, topic, sql, specified_sub_callback, - (void *)pThreadInfo, - (int)g_queryInfo.specifiedQueryInfo.subscribeInterval); - } else if ((STABLE_CLASS == class) && - (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { - tsub = - taos_subscribe(pThreadInfo->taos, restart, topic, sql, - stable_sub_callback, (void *)pThreadInfo, - (int)g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe(pThreadInfo->taos, restart, topic, sql, NULL, - NULL, (int)interval); - } - - if (tsub == NULL) { - errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, - sql); - return NULL; - } - - return tsub; -} - -void *specifiedSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - // TAOS_SUB* tsub = NULL; - - setThreadName("specSub"); - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - return NULL; - } - - sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - "taosdemo-subscribe-%" PRIu64 "-%d", pThreadInfo->querySeq, - pThreadInfo->threadID); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != - '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - - // start loop to consume result - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while ((g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq] == -1) || - (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < - g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq])) { - printf("consumed[%d]: %d, endAfterConsum[%" PRId64 "]: %d\n", - pThreadInfo->threadID, - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID], - pThreadInfo->querySeq, - g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq]); - if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { - continue; - } - - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = - taos_consume( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); - if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { - if (g_queryInfo.specifiedQueryInfo - .result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo - .result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID]++; - if ((g_queryInfo.specifiedQueryInfo - .resubAfterConsume[pThreadInfo->querySeq] != -1) && - (g_queryInfo.specifiedQueryInfo - .consumed[pThreadInfo->threadID] >= - g_queryInfo.specifiedQueryInfo - .resubAfterConsume[pThreadInfo->querySeq])) { - printf("keepProgress:%d, resub specified query: %" PRIu64 "\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = - 0; - taos_unsubscribe( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - g_queryInfo.specifiedQueryInfo - .tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo - .tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - } - } - } - taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); - taos_close(pThreadInfo->taos); - - return NULL; -} - -static void *superSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - char * subSqlStr = calloc(1, BUFFER_SIZE); - if (NULL == subSqlStr) { - errorPrint("%s", "failed to allocate memory\n"); - } - - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT] = {0}; - uint64_t tsubSeq; - - setThreadName("superSub"); - - if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { - free(subSqlStr); - errorPrint("The table number(%" PRId64 - ") of the thread is more than max query sql count: %d\n", - pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - exit(EXIT_FAILURE); - } - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - free(subSqlStr); - return NULL; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); - free(subSqlStr); - return NULL; - } - - char topic[32] = {0}; - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - verbosePrint("%s() LN%d, [%d], start=%" PRId64 " end=%" PRId64 - " i=%" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->threadID, - pThreadInfo->start_table_from, pThreadInfo->end_table_to, - i); - sprintf(topic, "taosdemo-subscribe-%" PRIu64 "-%" PRIu64 "", i, - pThreadInfo->querySeq); - memset(subSqlStr, 0, BUFFER_SIZE); - replaceChildTblName( - g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], subSqlStr, - (int)i); - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", __func__, __LINE__, - pThreadInfo->threadID, subSqlStr); - tsub[tsubSeq] = - subscribeImpl(STABLE_CLASS, pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; - } - } - - // start loop to consume result - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { - consumed[i] = 0; - } - TAOS_RES *res = NULL; - - uint64_t st = 0, et = 0; - - while ( - (g_queryInfo.superQueryInfo.endAfterConsume == -1) || - (g_queryInfo.superQueryInfo.endAfterConsume > - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { - verbosePrint("super endAfterConsume: %d, consumed: %d\n", - g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - - pThreadInfo->start_table_from]); - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { - continue; - } - - st = taosGetTimestampMs(); - performancePrint("st: %" PRIu64 " et: %" PRIu64 " st-et: %" PRIu64 - "\n", - st, et, (st - et)); - res = taos_consume(tsub[tsubSeq]); - et = taosGetTimestampMs(); - performancePrint("st: %" PRIu64 " et: %" PRIu64 " delta: %" PRIu64 - "\n", - st, et, (et - st)); - - if (res) { - if (g_queryInfo.superQueryInfo - .result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo - .result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } - consumed[tsubSeq]++; - - if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) && - (consumed[tsubSeq] >= - g_queryInfo.superQueryInfo.resubAfterConsume)) { - verbosePrint( - "%s() LN%d, keepProgress:%d, resub super table query: " - "%" PRIu64 "\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - taos_unsubscribe( - tsub[tsubSeq], - g_queryInfo.superQueryInfo.subscribeKeepProgress); - consumed[tsubSeq] = 0; - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; - } - } - } - } - } - verbosePrint( - "%s() LN%d, super endAfterConsume: %d, consumed: %d\n", __func__, - __LINE__, g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]); - taos_free_result(res); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - taos_unsubscribe(tsub[tsubSeq], 0); - } - - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; -} - -int subscribeTestProcess() { - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - prompt(); - - TAOS *taos = NULL; - taos = - taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - taos_close(taos); // workaround to use separate taos connection; - - pthread_t * pids = NULL; - threadInfo *infos = NULL; - - pthread_t * pidsOfStable = NULL; - threadInfo *infosOfStable = NULL; - - //==== create threads for query for specified table - if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, specified query sqlCount %d.\n", __func__, - __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - } else { - if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("specified query sqlCount %d.\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - exit(EXIT_FAILURE); - } - - pids = calloc(1, g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(pthread_t)); - if (pids == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - - infos = calloc(1, g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(threadInfo)); - - if (infos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; - j++) { - uint64_t seq = - i * g_queryInfo.specifiedQueryInfo.concurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - pthread_create(pids + seq, NULL, specifiedSubscribe, - pThreadInfo); - } - } - } - - //==== create threads for super table query - if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %d.\n", __func__, - __LINE__, g_queryInfo.superQueryInfo.sqlCount); - } else { - if ((g_queryInfo.superQueryInfo.sqlCount > 0) && - (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfStable = calloc(1, g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(pthread_t)); - - if (pidsOfStable) { - errorPrint("%s", "failed to allocate memory\n"); - } - - infosOfStable = calloc(1, g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(threadInfo)); - - if (infosOfStable) { - errorPrint("%s", "failed to allocate memmory\n"); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - uint64_t tableFrom = 0; - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - threadInfo *pThreadInfo = infosOfStable + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = j < b ? a + 1 : a; - pThreadInfo->end_table_to = - j < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - pthread_create(pidsOfStable + seq, NULL, superSubscribe, - pThreadInfo); - } - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - pthread_join(pidsOfStable[seq], NULL); - } - } - } - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - pthread_join(pids[seq], NULL); - } - } - - tmfree((char *)pids); - tmfree((char *)infos); - - tmfree((char *)pidsOfStable); - tmfree((char *)infosOfStable); - // taos_close(taos); - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoUtil.c b/src/kit/taosdemo/src/demoUtil.c deleted file mode 100644 index bae2e30f53db95df6024eee4f7c48d601b5240e3..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoUtil.c +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) { - fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, - wrong_value); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorUnrecognized(char *program, char *wrong_arg) { - fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option requires an argument -- '%s'\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg2(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option requires a number argument '-%s'\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg3(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option '%s' requires an argument\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } -} - -void tmfree(void *buf) { - if (NULL != buf) { - free(buf); - buf = NULL; - } -} - -void ERROR_EXIT(const char *msg) { - errorPrint("%s", msg); - exit(EXIT_FAILURE); -} - -#ifdef WINDOWS -#define _CRT_RAND_S -#include -#include - -typedef unsigned __int32 uint32_t; - -#pragma comment(lib, "ws2_32.lib") -// Some old MinGW/CYGWIN distributions don't define this: -#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING -#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 -#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING - -HANDLE g_stdoutHandle; -DWORD g_consoleMode; - -void setupForAnsiEscape(void) { - DWORD mode = 0; - g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); - - if (g_stdoutHandle == INVALID_HANDLE_VALUE) { - exit(GetLastError()); - } - - if (!GetConsoleMode(g_stdoutHandle, &mode)) { - exit(GetLastError()); - } - - g_consoleMode = mode; - - // Enable ANSI escape codes - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; - - if (!SetConsoleMode(g_stdoutHandle, mode)) { - exit(GetLastError()); - } -} - -void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); - - // Reset console mode - if (!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { - exit(GetLastError()); - } -} - -int taosRandom() { - int number; - rand_s(&number); - - return number; -} -#else // Not windows -void setupForAnsiEscape(void) {} - -void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); -} - -#include - -int taosRandom() { return rand(); } - -#endif - -bool isStringNumber(char *input) { - int len = (int)strlen(input); - if (0 == len) { - return false; - } - - for (int i = 0; i < len; i++) { - if (!isdigit(input[i])) return false; - } - - return true; -} - -char *formatTimestamp(char *buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } - if (precision == TSDB_TIME_PRECISION_NANO) { - tt = (time_t)(val / 1000000000); - } else { - tt = (time_t)(val / 1000); - } - - /* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ - -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif - - struct tm *ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); - - if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else if (precision == TSDB_TIME_PRECISION_NANO) { - sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } - - return buf; -} - -int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName, - char * stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl, - int64_t limit, uint64_t offset, - bool escapChar) { - char command[SQL_BUFF_LEN] = "\0"; - char limitBuf[100] = "\0"; - - TAOS_RES *res; - TAOS_ROW row = NULL; - int64_t childTblCount = (limit < 0) ? DEFAULT_CHILDTABLES : limit; - int64_t count = 0; - char * childTblName = *childTblNameOfSuperTbl; - - if (childTblName == NULL) { - childTblName = (char *)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - if (childTblName == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - } - char *pTblName = childTblName; - - snprintf(limitBuf, 100, " limit %" PRId64 " offset %" PRIu64 "", limit, - offset); - - // get all child table name use cmd: select tbname from superTblName; - snprintf(command, SQL_BUFF_LEN, - escapChar ? "select tbname from %s.`%s` %s" - : "select tbname from %s.%s %s", - dbName, stbName, limitBuf); - - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - taos_free_result(res); - taos_close(taos); - errorPrint("failed to run command %s, reason: %s\n", command, - taos_errstr(res)); - exit(EXIT_FAILURE); - } - - while ((row = taos_fetch_row(res)) != NULL) { - int32_t *len = taos_fetch_lengths(res); - - if (0 == strlen((char *)row[0])) { - errorPrint("No.%" PRId64 " table return empty name\n", count); - exit(EXIT_FAILURE); - } - - tstrncpy(pTblName, (char *)row[0], len[0] + 1); - // printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc( - childTblName, - (size_t)(childTblCount * 1.5 * TSDB_TABLE_NAME_LEN + 1)); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount * 1.5); - memset(childTblName + count * TSDB_TABLE_NAME_LEN, 0, - (size_t)((childTblCount - count) * TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - tmfree(childTblName); - taos_free_result(res); - taos_close(taos); - errorPrint( - "%s() LN%d, realloc fail for save child table name of " - "%s.%s\n", - __func__, __LINE__, dbName, stbName); - exit(EXIT_FAILURE); - } - } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; - - taos_free_result(res); - return 0; -} - -int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl) { - return getChildNameOfSuperTableWithLimitAndOffset( - taos, dbName, stbName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, - -1, 0, false); -} - -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr) { - uint16_t rest_port = port + TSDB_PORT_HTTP; - struct hostent *server = gethostbyname(host); - if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "no such host"); - return -1; - } - - debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", - server->h_name, server->h_addr, - (server->h_addrtype == AF_INET) ? "ipv4" : "ipv6", - server->h_length); - - memset(serv_addr, 0, sizeof(struct sockaddr_in)); - serv_addr->sin_family = AF_INET; - serv_addr->sin_port = htons(rest_port); -#ifdef WINDOWS - serv_addr->sin_addr.s_addr = inet_addr(host); -#else - memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); -#endif - return 0; -} - -void prompt() { - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } -} - -void replaceChildTblName(char *inSql, char *outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[TSDB_TABLE_NAME_LEN]; - sprintf(subTblName, "%s.%s", g_queryInfo.dbName, - g_queryInfo.superQueryInfo.childTblName + - tblIndex * TSDB_TABLE_NAME_LEN); - - // printf("inSql: %s\n", inSql); - - char *pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - tstrncpy(outSql, inSql, pos - inSql + 1); - // printf("1: %s\n", outSql); - strncat(outSql, subTblName, BUFFER_SIZE - 1); - // printf("2: %s\n", outSql); - strncat(outSql, pos + strlen(sourceString), BUFFER_SIZE - 1); - // printf("3: %s\n", outSql); -} - -int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} - -int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - ERROR_EXIT("Fail to compile regex\n"); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - regfree(®ex); - printf("Regex match failed: %s\n", msgbuf); - exit(EXIT_FAILURE); - } - return 0; -} - -int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - - if (code != 0) { - if (!quiet) { - errorPrint("Failed to execute <%s>, reason: %s\n", command, - taos_errstr(res)); - } - taos_free_result(res); - // taos_close(taos); - return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; -} - -int postProceSql(char *host, uint16_t port, char *sqlstr, - threadInfo *pThreadInfo) { - char *req_fmt = - "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: " - "Basic %s\r\nContent-Length: %d\r\nContent-Type: " - "application/x-www-form-urlencoded\r\n\r\n%s"; - - char *url = "/rest/sql"; - - int bytes, sent, received, req_str_len, resp_len; - char * request_buf; - char response_buf[RESP_BUF_LEN]; - uint16_t rest_port = port + TSDB_PORT_HTTP; - - int req_buf_len = (int)strlen(sqlstr) + REQ_EXTRA_BUF_LEN; - - request_buf = malloc(req_buf_len); - if (NULL == request_buf) { - errorPrint("%s", "cannot allocate memory.\n"); - exit(EXIT_FAILURE); - } - - char userpass_buf[INPUT_BUF_LEN]; - int mod_table[] = {0, 2, 1}; - - static char base64[] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', - 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'}; - - if (g_args.test_mode == INSERT_TEST) { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", g_Dbs.user, - g_Dbs.password); - } else { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", g_queryInfo.user, - g_queryInfo.password); - } - - size_t userpass_buf_len = strlen(userpass_buf); - size_t encoded_len = 4 * ((userpass_buf_len + 2) / 3); - - char base64_buf[INPUT_BUF_LEN]; - - memset(base64_buf, 0, INPUT_BUF_LEN); - - for (int n = 0, m = 0; n < userpass_buf_len;) { - uint32_t oct_a = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t oct_b = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t oct_c = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; - - base64_buf[m++] = base64[(triple >> 3 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 2 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 1 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 0 * 6) & 0x3f]; - } - - for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) - base64_buf[encoded_len - 1 - l] = '='; - - debugPrint("%s() LN%d: auth string base64 encoded: %s\n", __func__, - __LINE__, base64_buf); - char *auth = base64_buf; - - int r = snprintf(request_buf, req_buf_len, req_fmt, url, host, rest_port, - auth, strlen(sqlstr), sqlstr); - if (r >= req_buf_len) { - free(request_buf); - ERROR_EXIT("too long request"); - } - verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); - - req_str_len = (int)strlen(request_buf); - sent = 0; - do { -#ifdef WINDOWS - bytes = send(pThreadInfo->sockfd, request_buf + sent, - req_str_len - sent, 0); -#else - bytes = - write(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent); -#endif - if (bytes < 0) ERROR_EXIT("writing message to socket"); - if (bytes == 0) break; - sent += bytes; - } while (sent < req_str_len); - - memset(response_buf, 0, RESP_BUF_LEN); - resp_len = sizeof(response_buf) - 1; - received = 0; - - char resEncodingChunk[] = "Encoding: chunked"; - char resHttp[] = "HTTP/1.1 "; - char resHttpOk[] = "HTTP/1.1 200 OK"; - - do { -#ifdef WINDOWS - bytes = recv(pThreadInfo->sockfd, response_buf + received, - resp_len - received, 0); -#else - bytes = read(pThreadInfo->sockfd, response_buf + received, - resp_len - received); -#endif - verbosePrint("%s() LN%d: bytes:%d\n", __func__, __LINE__, bytes); - if (bytes < 0) { - free(request_buf); - ERROR_EXIT("reading response from socket"); - } - if (bytes == 0) break; - received += bytes; - - verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - - response_buf[RESP_BUF_LEN - 1] = '\0'; - if (strlen(response_buf)) { - verbosePrint( - "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - - if (((NULL != strstr(response_buf, resEncodingChunk)) && - (NULL != strstr(response_buf, resHttp))) || - ((NULL != strstr(response_buf, resHttpOk)) && - (NULL != strstr(response_buf, "\"status\":")))) { - debugPrint( - "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - break; - } - } - } while (received < resp_len); - - if (received == resp_len) { - free(request_buf); - ERROR_EXIT("storing complete response from socket"); - } - - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(response_buf, pThreadInfo); - } - - free(request_buf); - - if (NULL == strstr(response_buf, resHttpOk)) { - errorPrint("Response:\n%s\n", response_buf); - return -1; - } - return 0; -} - -void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - char *databuf = (char *)calloc(1, FETCH_BUFFER_SIZE); - if (databuf == NULL) { - errorPrint( - "%s() LN%d, failed to malloc, warning: save result to file " - "slowly!\n", - __func__, __LINE__); - return; - } - - int64_t totalLen = 0; - - // fetch the records row by row - while ((row = taos_fetch_row(res))) { - if (totalLen >= (FETCH_BUFFER_SIZE - HEAD_BUFF_LEN * 2)) { - if (strlen(pThreadInfo->filePath) > 0) - appendResultBufToFile(databuf, pThreadInfo); - totalLen = 0; - memset(databuf, 0, FETCH_BUFFER_SIZE); - } - num_rows++; - char temp[HEAD_BUFF_LEN] = {0}; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - // printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - verbosePrint("%s() LN%d, totalLen: %" PRId64 "\n", __func__, __LINE__, - totalLen); - } - - verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", __func__, __LINE__, - databuf, pThreadInfo->filePath); - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(databuf, pThreadInfo); - } - free(databuf); -} \ No newline at end of file diff --git a/src/kit/taosdemo/subscribe.json b/src/kit/taosdemo/subscribe.json deleted file mode 100644 index 9faf03a03d03b8baeffeb6a4397d1727dde0c594..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/subscribe.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "filetype": "subscribe", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "test", - "specified_table_query": { - "concurrent": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "resubAfterConsume": 10, - "sqls": [ - { - "sql": "select avg(col1) from meters where col1 > 1;", - "result": "./subscribe_res0.txt" - } - ] - }, - "super_table_query": { - "stblname": "meters", - "threads": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "sqls": [ - { - "sql": "select col1 from xxxx where col1 > 10;", - "result": "./subscribe_res1.txt" - } - ] - } -} diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt deleted file mode 100644 index 1daff0c75956072e02f8439acac2850b9315235a..0000000000000000000000000000000000000000 --- a/src/kit/taosdump/CMakeLists.txt +++ /dev/null @@ -1,92 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) -PROJECT(TDengine) - -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) -INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) -INCLUDE_DIRECTORIES(inc) -AUX_SOURCE_DIRECTORY(. SRC) - -FIND_PACKAGE(Git) -IF(GIT_FOUND) - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdump.c - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDUMP_COMMIT_SHA1 - ) - IF ("${TAOSDUMP_COMMIT_SHA1}" STREQUAL "") - SET(TAOSDUMP_COMMIT_SHA1 "unknown") - ELSE () - STRING(SUBSTRING "${TAOSDUMP_COMMIT_SHA1}" 0 7 TAOSDUMP_COMMIT_SHA1) - STRING(STRIP "${TAOSDUMP_COMMIT_SHA1}" TAOSDUMP_COMMIT_SHA1) - ENDIF () - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdump.c - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDUMP_STATUS - ) - IF (TD_LINUX) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDUMP_STATUS}' | awk '{print $1}'" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDUMP_STATUS - ) - ENDIF (TD_LINUX) -ELSE() - MESSAGE("Git not found") - SET(TAOSDUMP_COMMIT_SHA1 "unknown") - SET(TAOSDUMP_STATUS "unknown") -ENDIF (GIT_FOUND) - -MESSAGE("taosdump's latest commit in short is:" ${TAOSDUMP_COMMIT_SHA1}) -STRING(STRIP "${TAOSDUMP_STATUS}" TAOSDUMP_STATUS) - -IF (TAOSDUMP_STATUS MATCHES "M") - SET(TAOSDUMP_STATUS "modified") -ELSE() - SET(TAOSDUMP_STATUS "") -ENDIF () - -MESSAGE("taosdump's status is:" ${TAOSDUMP_STATUS}) - -ADD_DEFINITIONS(-DTAOSDUMP_COMMIT_SHA1="${TAOSDUMP_COMMIT_SHA1}") -ADD_DEFINITIONS(-DTAOSDUMP_STATUS="${TAOSDUMP_STATUS}") - -MESSAGE("TD_VER_NUMBER is:" ${TD_VER_NUMBER}) -IF ("${TD_VER_NUMBER}" STREQUAL "") - SET(TD_VERSION_NUMBER "TDengine-version-unknown") -ELSE() - SET(TD_VERSION_NUMBER ${TD_VER_NUMBER}) -ENDIF () -MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) -ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") - -LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64) - -IF (TD_LINUX) - ADD_EXECUTABLE(taosdump ${SRC}) - IF (TD_SOMODE_STATIC) - IF (TD_AVRO_SUPPORT) - TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson) - ELSE () - TARGET_LINK_LIBRARIES(taosdump taos_static) - ENDIF() - ELSE () - IF (TD_AVRO_SUPPORT) - TARGET_LINK_LIBRARIES(taosdump taos avro jansson) - ELSE () - TARGET_LINK_LIBRARIES(taosdump taos) - ENDIF () - ENDIF () -ENDIF () - -IF (TD_DARWIN) - # missing for macosx - # ADD_EXECUTABLE(taosdump ${SRC}) - # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdump taos_static jansson) - # ELSE () - # TARGET_LINK_LIBRARIES(taosdump taos jansson) - # ENDIF () -ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c deleted file mode 100644 index d552e6123fd6d3e496006a0cb79f662d5c139cc1..0000000000000000000000000000000000000000 --- a/src/kit/taosdump/taosdump.c +++ /dev/null @@ -1,4050 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include - -#include "os.h" -#include "taos.h" -#include "taosdef.h" -#include "taosmsg.h" -#include "tglobal.h" -#include "tsclient.h" -#include "tsdb.h" -#include "tutil.h" - - -static char **g_tsDumpInSqlFiles = NULL; -static char g_tsCharset[63] = {0}; - -#ifdef AVRO_SUPPORT -#include -#include - -static char **g_tsDumpInAvroFiles = NULL; - -static void print_json_aux(json_t *element, int indent); - -#endif /* AVRO_SUPPORT */ - -#define TSDB_SUPPORT_NANOSECOND 1 - -#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 -#define MAX_PATH_LEN 4096 // max path length on linux is 4095 -#define COMMAND_SIZE 65536 -#define MAX_RECORDS_PER_REQ 32766 -//#define DEFAULT_DUMP_FILE "taosdump.sql" - -// for strncpy buffer overflow -#define min(a, b) (((a) < (b)) ? (a) : (b)) - -static int converStringToReadable(char *str, int size, char *buf, int bufsize); -static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); - -typedef struct { - short bytes; - int8_t type; -} SOColInfo; - -#define debugPrint(fmt, ...) \ - do { if (g_args.debug_print || g_args.verbose_print) \ - fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) - -#define verbosePrint(fmt, ...) \ - do { if (g_args.verbose_print) \ - fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) - -#define performancePrint(fmt, ...) \ - do { if (g_args.performance_print) \ - fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) - -#define warnPrint(fmt, ...) \ - do { fprintf(stderr, "\033[33m"); \ - fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); } while(0) - -#define errorPrint(fmt, ...) \ - do { fprintf(stderr, "\033[31m"); \ - fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); } while(0) - -#define okPrint(fmt, ...) \ - do { fprintf(stderr, "\033[32m"); \ - fprintf(stderr, "OK: "fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); } while(0) - -static bool isStringNumber(char *input) -{ - int len = strlen(input); - if (0 == len) { - return false; - } - - for (int i = 0; i < len; i++) { - if (!isdigit(input[i])) - return false; - } - - return true; -} - -// -------------------------- SHOW DATABASE INTERFACE----------------------- -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- -enum _show_tables_index { - TSDB_SHOW_TABLES_NAME_INDEX, - TSDB_SHOW_TABLES_CREATED_TIME_INDEX, - TSDB_SHOW_TABLES_COLUMNS_INDEX, - TSDB_SHOW_TABLES_METRIC_INDEX, - TSDB_SHOW_TABLES_UID_INDEX, - TSDB_SHOW_TABLES_TID_INDEX, - TSDB_SHOW_TABLES_VGID_INDEX, - TSDB_MAX_SHOW_TABLES -}; - -// ---------------------------------- DESCRIBE STABLE CONFIGURE ------------------------------ -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -#define COL_NOTE_LEN 4 -#define COL_TYPEBUF_LEN 16 -#define COL_VALUEBUF_LEN 32 - -typedef struct { - char field[TSDB_COL_NAME_LEN]; - char type[COL_TYPEBUF_LEN]; - int length; - char note[COL_NOTE_LEN]; - char value[COL_VALUEBUF_LEN]; - char *var_value; -} ColDes; - -typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - ColDes cols[]; -} TableDef; - -extern char version[]; - -#define DB_PRECISION_LEN 8 -#define DB_STATUS_LEN 16 - -typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - bool belongStb; - char stable[TSDB_TABLE_NAME_LEN]; -} TableInfo; - -typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - char stable[TSDB_TABLE_NAME_LEN]; -} TableRecord; - -typedef struct { - bool isStb; - bool belongStb; - int64_t dumpNtbCount; - TableRecord **dumpNtbInfos; - TableRecord tableRecord; -} TableRecordInfo; - -typedef struct { - char name[TSDB_DB_NAME_LEN]; - char create_time[32]; - int64_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[32]; - //int16_t daysToKeep; - //int16_t daysToKeep1; - //int16_t daysToKeep2; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[DB_PRECISION_LEN]; // time resolution - int8_t update; - char status[DB_STATUS_LEN]; - int64_t dumpTbCount; - TableRecordInfo **dumpTbInfos; -} SDbInfo; - -typedef struct { - pthread_t threadID; - int32_t threadIndex; - char dbName[TSDB_DB_NAME_LEN]; - char stbName[TSDB_TABLE_NAME_LEN]; - int precision; - TAOS *taos; - int64_t rowsOfDumpOut; - int64_t count; - int64_t from; -} threadInfo; - -typedef struct { - int64_t totalRowsOfDumpOut; - int64_t totalChildTblsOfDumpOut; - int32_t totalSuperTblsOfDumpOut; - int32_t totalDatabasesOfDumpOut; -} resultStatistics; - -#ifdef AVRO_SUPPORT - -enum enAvro_Codec { - AVRO_CODEC_START = 0, - AVRO_CODEC_NULL = AVRO_CODEC_START, - AVRO_CODEC_DEFLATE, - AVRO_CODEC_SNAPPY, - AVRO_CODEC_LZMA, - AVRO_CODEC_UNKNOWN = 255 -}; - -char *g_avro_codec[] = { - "null", - "deflate", - "snappy", - "lzma", - "unknown" -}; - -/* avro sectin begin */ -#define RECORD_NAME_LEN 64 -#define FIELD_NAME_LEN 64 -#define TYPE_NAME_LEN 16 - -typedef struct FieldStruct_S { - char name[FIELD_NAME_LEN]; - char type[TYPE_NAME_LEN]; -} FieldStruct; - -typedef struct RecordSchema_S { - char name[RECORD_NAME_LEN]; - char *fields; - int num_fields; -} RecordSchema; - -/* avro section end */ -#endif - -static int64_t g_totalDumpOutRows = 0; - -SDbInfo **g_dbInfos = NULL; -TableInfo *g_tablesList = NULL; - -const char *argp_program_version = version; -const char *argp_program_bug_address = ""; - -/* Program documentation. */ -static char doc[] = ""; -/* "Argp example #4 -- a program with somewhat more complicated\ */ -/* options\ */ -/* \vThis part of the documentation comes *after* the options;\ */ -/* note that the text is automatically filled, but it's possible\ */ -/* to force a line-break, e.g.\n<-- here."; */ - -/* A description of the arguments we accept. */ -static char args_doc[] = "dbname [tbname ...]\n--databases db1,db2,... \n--all-databases\n-i inpath\n-o outpath"; - -/* Keys for options without short-options. */ -#define OPT_ABORT 1 /* –abort */ - -/* The options we understand. */ -static struct argp_option options[] = { - // connection option - {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, - {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, -#ifdef _TD_POWER_ - {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, -#else - {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, -#endif - {"port", 'P', "PORT", 0, "Port to connect", 0}, - {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, - // input/output file - {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, - {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, - {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, -#ifdef _TD_POWER_ - {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, -#else - {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, -#endif - {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, - // dump unit options - {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'D', "DATABASES", 0, "Dump inputed databases. Use comma to seprate databases\' name.", 2}, - {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2}, - // dump format options - {"schemaonly", 's', 0, 0, "Only dump schema.", 2}, - {"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, -#ifdef AVRO_SUPPORT - {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3}, - {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4}, -#endif - {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8}, - {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9}, - {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10}, - {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10}, - {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10}, - {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10}, - {"debug", 'g', 0, 0, "Print debug info.", 15}, - {0} -}; - -#define HUMAN_TIME_LEN 28 - -/* Used by main to communicate with parse_opt. */ -typedef struct arguments { - // connection option - char *host; - char *user; - char password[SHELL_MAX_PASSWORD_LEN]; - uint16_t port; - uint16_t mysqlFlag; - // output file - char outpath[MAX_FILE_NAME_LEN]; - char inpath[MAX_FILE_NAME_LEN]; - // result file - char *resultFile; - char *encode; - // dump unit option - bool all_databases; - bool databases; - char *databasesSeq; - // dump format option - bool schemaonly; - bool with_property; -#ifdef AVRO_SUPPORT - bool avro; - int avro_codec; -#endif - int64_t start_time; - char humanStartTime[HUMAN_TIME_LEN]; - int64_t end_time; - char humanEndTime[HUMAN_TIME_LEN]; - char precision[8]; - - int32_t data_batch; - int32_t max_sql_len; - int32_t table_batch; // num of table which will be dump into one output file. - bool allow_sys; - // other options - int32_t thread_num; - int abort; - char **arg_list; - int arg_list_len; - bool isDumpIn; - bool debug_print; - bool verbose_print; - bool performance_print; - - int dumpDbCount; -} SArguments; - -/* Our argp parser. */ -static error_t parse_opt(int key, char *arg, struct argp_state *state); - -static struct argp argp = {options, parse_opt, args_doc, doc}; -static resultStatistics g_resultStatistics = {0}; -static FILE *g_fpOfResult = NULL; -static int g_numOfCores = 1; - -struct arguments g_args = { - // connection option - NULL, - "root", -#ifdef _TD_POWER_ - "powerdb", -#else - "taosdata", -#endif - 0, - 0, - // outpath and inpath - "", - "", - "./dump_result.txt", - NULL, - // dump unit option - false, // all_databases - false, // databases - NULL, // databasesSeq - // dump format option - false, // schemaonly - true, // with_property -#ifdef AVRO_SUPPORT - false, // avro - AVRO_CODEC_SNAPPY, // avro_codec -#endif - -INT64_MAX + 1, // start_time - {0}, // humanStartTime - INT64_MAX, // end_time - {0}, // humanEndTime - "ms", // precision - 1, // data_batch - TSDB_MAX_SQL_LEN, // max_sql_len - 1, // table_batch - false, // allow_sys - // other options - 8, // thread_num - 0, // abort - NULL, // arg_list - 0, // arg_list_len - false, // isDumpIn - false, // debug_print - false, // verbose_print - false, // performance_print - 0, // dumpDbCount -}; - -// get taosdump commit number version -#ifndef TAOSDUMP_COMMIT_SHA1 -#define TAOSDUMP_COMMIT_SHA1 "unknown" -#endif - -#ifndef TD_VERNUMBER -#define TD_VERNUMBER "unknown" -#endif - -#ifndef TAOSDUMP_STATUS -#define TAOSDUMP_STATUS "unknown" -#endif - -static void printVersion() { - char tdengine_ver[] = TD_VERNUMBER; - char taosdump_ver[] = TAOSDUMP_COMMIT_SHA1; - char taosdump_status[] = TAOSDUMP_STATUS; - - if (strlen(taosdump_status) == 0) { - printf("taosdump version %s-%s\n", - tdengine_ver, taosdump_ver); - } else { - printf("taosdump version %s-%s, status:%s\n", - tdengine_ver, taosdump_ver, taosdump_status); - } -} - -void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) -{ - fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value); - fprintf(stderr, "Try `taosdump --help' or `taosdump --usage' for more information.\n"); -} - -static void errorUnrecognized(char *program, char *wrong_arg) -{ - fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); - fprintf(stderr, "Try `taosdump --help' or `taosdump --usage' for more information.\n"); -} - -static void errorPrintReqArg(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option requires an argument -- '%s'\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdump --help' or `taosdump --usage' for more information.\n"); -} - -static void errorPrintReqArg2(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option requires a number argument '-%s'\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdump --help' or `taosdump --usage' for more information.\n"); -} - -static void errorPrintReqArg3(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option '%s' requires an argument\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdump --help' or `taosdump --usage' for more information.\n"); -} - -/* Parse a single option. */ -static error_t parse_opt(int key, char *arg, struct argp_state *state) { - /* Get the input argument from argp_parse, which we - know is a pointer to our arguments structure. */ - wordexp_t full_path; - - switch (key) { - // connection option - case 'a': - g_args.allow_sys = true; - break; - case 'h': - g_args.host = arg; - break; - case 'u': - g_args.user = arg; - break; - case 'p': - break; - case 'P': - if (!isStringNumber(arg)) { - errorPrintReqArg2("taosdump", "P"); - exit(EXIT_FAILURE); - } - - uint64_t port = atoi(arg); - if (port > 65535) { - errorWrongValue("taosdump", "-P or --port", arg); - exit(EXIT_FAILURE); - } - g_args.port = (uint16_t)port; - - break; - case 'q': - g_args.mysqlFlag = atoi(arg); - break; - case 'o': - if (wordexp(arg, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", arg); - return -1; - } - - if (full_path.we_wordv[0]) { - tstrncpy(g_args.outpath, full_path.we_wordv[0], - MAX_FILE_NAME_LEN); - wordfree(&full_path); - } else { - errorPrintReqArg3("taosdump", "-o or --outpath"); - exit(EXIT_FAILURE); - } - break; - - case 'g': - g_args.debug_print = true; - break; - - case 'i': - g_args.isDumpIn = true; - if (wordexp(arg, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", arg); - return -1; - } - - if (full_path.we_wordv[0]) { - tstrncpy(g_args.inpath, full_path.we_wordv[0], - MAX_FILE_NAME_LEN); - wordfree(&full_path); - } else { - errorPrintReqArg3("taosdump", "-i or --inpath"); - exit(EXIT_FAILURE); - } - break; - -#ifdef AVRO_SUPPORT - case 'v': - g_args.avro = true; - break; - - case 'd': - for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) { - if (0 == strcmp(arg, g_avro_codec[i])) { - g_args.avro_codec = i; - break; - } - } - break; -#endif - - case 'r': - g_args.resultFile = arg; - break; - case 'c': - if (0 == strlen(arg)) { - errorPrintReqArg3("taosdump", "-c or --config-dir"); - exit(EXIT_FAILURE); - } - if (wordexp(arg, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", arg); - exit(EXIT_FAILURE); - } - tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN); - wordfree(&full_path); - break; - case 'e': - g_args.encode = arg; - break; - // dump unit option - case 'A': - break; - case 'D': - g_args.databases = true; - break; - // dump format option - case 's': - g_args.schemaonly = true; - break; - case 'N': - g_args.with_property = false; - break; - case 'S': - // parse time here. - break; - case 'E': - break; - case 'B': - g_args.data_batch = atoi(arg); - if (g_args.data_batch > MAX_RECORDS_PER_REQ) { - g_args.data_batch = MAX_RECORDS_PER_REQ; - } - break; - case 'L': - { - int32_t len = atoi(arg); - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; - } - g_args.max_sql_len = len; - break; - } - case 't': - g_args.table_batch = atoi(arg); - break; - case 'T': - if (!isStringNumber(arg)) { - errorPrint("%s", "\n\t-T need a number following!\n"); - exit(EXIT_FAILURE); - } - g_args.thread_num = atoi(arg); - break; - case OPT_ABORT: - g_args.abort = 1; - break; - case ARGP_KEY_ARG: - if (strlen(state->argv[state->next - 1])) { - g_args.arg_list = &state->argv[state->next - 1]; - g_args.arg_list_len = state->argc - state->next + 1; - } - state->next = state->argc; - break; - - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} - -static void freeTbDes(TableDef *tableDes) -{ - for (int i = 0; i < TSDB_MAX_COLUMNS; i ++) { - if (tableDes->cols[i].var_value) { - free(tableDes->cols[i].var_value); - } - } - - free(tableDes); -} - -static int queryDbImpl(TAOS *taos, char *command) { - TAOS_RES *res = NULL; - int32_t code = -1; - - res = taos_query(taos, command); - code = taos_errno(res); - - if (code != 0) { - errorPrint("Failed to run <%s>, reason: %s\n", - command, taos_errstr(res)); - taos_free_result(res); - //taos_close(taos); - return code; - } - - taos_free_result(res); - return 0; -} - -static void parse_args( - int argc, char *argv[], SArguments *arguments) { - - for (int i = 1; i < argc; i++) { - if ((strncmp(argv[i], "-p", 2) == 0) - || (strncmp(argv[i], "--password", 10) == 0)) { - if ((strlen(argv[i]) == 2) - || (strncmp(argv[i], "--password", 10) == 0)) { - printf("Enter password: "); - taosSetConsoleEcho(false); - if(scanf("%20s", arguments->password) > 1) { - errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); - } - taosSetConsoleEcho(true); - } else { - tstrncpy(arguments->password, (char *)(argv[i] + 2), - SHELL_MAX_PASSWORD_LEN); - strcpy(argv[i], "-p"); - } - } else if (strcmp(argv[i], "-gg") == 0) { - arguments->verbose_print = true; - strcpy(argv[i], ""); - } else if (strcmp(argv[i], "-PP") == 0) { - arguments->performance_print = true; - strcpy(argv[i], ""); - } else if ((strcmp(argv[i], "-A") == 0) - || (0 == strncmp( - argv[i], "--all-database", - strlen("--all-database")))) { - g_args.all_databases = true; - } else if ((strncmp(argv[i], "-D", strlen("-D")) == 0) - || (0 == strncmp( - argv[i], "--database", - strlen("--database")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "D"); - exit(EXIT_FAILURE); - } - arguments->databasesSeq = argv[++i]; - } else if (0 == strncmp(argv[i], "--databases=", strlen("--databases="))) { - arguments->databasesSeq = (char *)(argv[i] + strlen("--databases=")); - } else if (0 == strncmp(argv[i], "-D", strlen("-D"))) { - arguments->databasesSeq = (char *)(argv[i] + strlen("-D")); - } else if (strlen("--databases") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--databases"); - exit(EXIT_FAILURE); - } - arguments->databasesSeq = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - g_args.databases = true; - } else if (0 == strncmp(argv[i], "--version", strlen("--version")) || - 0 == strncmp(argv[i], "-V", strlen("-V"))) { - printVersion(); - exit(EXIT_SUCCESS); - } else { - continue; - } - - } -} - -static void copyHumanTimeToArg(char *timeStr, bool isStartTime) -{ - if (isStartTime) - tstrncpy(g_args.humanStartTime, timeStr, HUMAN_TIME_LEN); - else - tstrncpy(g_args.humanEndTime, timeStr, HUMAN_TIME_LEN); -} - -static void copyTimestampToArg(char *timeStr, bool isStartTime) -{ - if (isStartTime) - g_args.start_time = atol(timeStr); - else - g_args.end_time = atol(timeStr); -} - -static void parse_timestamp( - int argc, char *argv[], SArguments *arguments) { - for (int i = 1; i < argc; i++) { - char *tmp; - bool isStartTime = false; - bool isEndTime = false; - - if (strcmp(argv[i], "-S") == 0) { - isStartTime = true; - } else if (strcmp(argv[i], "-E") == 0) { - isEndTime = true; - } - - if (isStartTime || isEndTime) { - if (NULL == argv[i+1]) { - errorPrint("%s need a valid value following!\n", argv[i]); - exit(-1); - } - tmp = strdup(argv[i+1]); - - if (strchr(tmp, ':') && strchr(tmp, '-')) { - copyHumanTimeToArg(tmp, isStartTime); - } else { - copyTimestampToArg(tmp, isStartTime); - } - - free(tmp); - } - } -} - -static int getPrecisionByString(char *precision) -{ - if (0 == strncasecmp(precision, - "ms", 2)) { - return TSDB_TIME_PRECISION_MILLI; - } else if (0 == strncasecmp(precision, - "us", 2)) { - return TSDB_TIME_PRECISION_MICRO; -#if TSDB_SUPPORT_NANOSECOND == 1 - } else if (0 == strncasecmp(precision, - "ns", 2)) { - return TSDB_TIME_PRECISION_NANO; -#endif - } else { - errorPrint("Invalid time precision: %s", - precision); - } - - return -1; -} - -static void freeDbInfos() { - if (g_dbInfos == NULL) return; - for (int i = 0; i < g_args.dumpDbCount; i++) - tfree(g_dbInfos[i]); - tfree(g_dbInfos); -} - -// check table is normal table or super table -static int getTableRecordInfo( - char *dbName, - char *table, TableRecordInfo *pTableRecordInfo) { - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, - dbName, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return -1; - } - - TAOS_ROW row = NULL; - bool isSet = false; - TAOS_RES *result = NULL; - - memset(pTableRecordInfo, 0, sizeof(TableRecordInfo)); - - char command[COMMAND_SIZE]; - - sprintf(command, "USE %s", dbName); - result = taos_query(taos, command); - int32_t code = taos_errno(result); - if (code != 0) { - errorPrint("invalid database %s, reason: %s\n", - dbName, taos_errstr(result)); - return 0; - } - - sprintf(command, "SHOW TABLES LIKE \'%s\'", table); - - result = taos_query(taos, command); - code = taos_errno(result); - - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - while ((row = taos_fetch_row(result)) != NULL) { - isSet = true; - pTableRecordInfo->isStb = false; - tstrncpy(pTableRecordInfo->tableRecord.name, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - min(TSDB_TABLE_NAME_LEN, - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes + 1)); - if (strlen((char *)row[TSDB_SHOW_TABLES_METRIC_INDEX]) > 0) { - pTableRecordInfo->belongStb = true; - tstrncpy(pTableRecordInfo->tableRecord.stable, - (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - min(TSDB_TABLE_NAME_LEN, - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes + 1)); - } else { - pTableRecordInfo->belongStb = false; - } - break; - } - - taos_free_result(result); - result = NULL; - - if (isSet) { - return 0; - } - - sprintf(command, "SHOW STABLES LIKE \'%s\'", table); - - result = taos_query(taos, command); - code = taos_errno(result); - - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - taos_free_result(result); - return -1; - } - - while ((row = taos_fetch_row(result)) != NULL) { - isSet = true; - pTableRecordInfo->isStb = true; - tstrncpy(pTableRecordInfo->tableRecord.stable, table, - TSDB_TABLE_NAME_LEN); - break; - } - - taos_free_result(result); - result = NULL; - - if (isSet) { - return 0; - } - errorPrint("%s() LN%d, invalid table/stable %s\n", - __func__, __LINE__, table); - return -1; -} - -static int inDatabasesSeq( - char *name, - int len) -{ - if (strstr(g_args.databasesSeq, ",") == NULL) { - if (0 == strncmp(g_args.databasesSeq, name, len)) { - return 0; - } - } else { - char *dupSeq = strdup(g_args.databasesSeq); - char *running = dupSeq; - char *dbname = strsep(&running, ","); - while (dbname) { - if (0 == strncmp(dbname, name, len)) { - tfree(dupSeq); - return 0; - } - - dbname = strsep(&running, ","); - } - } - - return -1; -} - -static int getDumpDbCount() -{ - int count = 0; - - TAOS *taos = NULL; - TAOS_RES *result = NULL; - char *command = "show databases"; - TAOS_ROW row; - - /* Connect to server */ - taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (NULL == taos) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return 0; - } - - result = taos_query(taos, command); - int32_t code = taos_errno(result); - - if (0 != code) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - taos_close(taos); - return 0; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'log', but subsequent version changed to 'log' - if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - && (!g_args.allow_sys)) { - continue; - } - - if (g_args.databases) { // input multi dbs - if (inDatabasesSeq( - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) - continue; - } else if (!g_args.all_databases) { // only input one db - if (strncasecmp(g_args.arg_list[0], - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) - continue; - } - - count++; - } - - if (count == 0) { - errorPrint("%d databases valid to dump\n", count); - } - - taos_close(taos); - return count; -} - -static void dumpCreateMTableClause( - char* dbName, - char *stable, - TableDef *tableDes, - int numOfCols, - FILE *fp - ) { - int counter = 0; - int count_temp = 0; - - char* tmpBuf = (char *)malloc(COMMAND_SIZE); - if (tmpBuf == NULL) { - errorPrint("%s() LN%d, failed to allocate %d memory\n", - __func__, __LINE__, COMMAND_SIZE); - return; - } - - char *pstr = NULL; - pstr = tmpBuf; - - pstr += sprintf(tmpBuf, - "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", - dbName, tableDes->name, dbName, stable); - - for (; counter < numOfCols; counter++) { - if (tableDes->cols[counter].note[0] != '\0') break; - } - - assert(counter < numOfCols); - count_temp = counter; - - for (; counter < numOfCols; counter++) { - if (counter != count_temp) { - if (0 == strcasecmp(tableDes->cols[counter].type, "binary") - || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { - //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); - if (tableDes->cols[counter].var_value) { - pstr += sprintf(pstr, ", \'%s\'", - tableDes->cols[counter].var_value); - } else { - pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); - } - } else { - pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); - } - } else { - if (0 == strcasecmp(tableDes->cols[counter].type, "binary") - || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { - //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); - if (tableDes->cols[counter].var_value) { - pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value); - } else { - pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].value); - } - } else { - pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].value); - } - /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ - } - - /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") - * == 0) { */ - /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ - /* } */ - } - - pstr += sprintf(pstr, ");"); - - fprintf(fp, "%s\n", tmpBuf); - free(tmpBuf); -} - -static int64_t getNtbCountOfStb(char *dbName, char *stbName) -{ - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, - dbName, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return -1; - } - - int64_t count = 0; - - char command[COMMAND_SIZE]; - - sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); - - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } - - TAOS_ROW row = NULL; - - if ((row = taos_fetch_row(res)) != NULL) { - count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; - } - - taos_close(taos); - return count; -} - -static int getTableDes( - TAOS *taos, - char* dbName, char *table, - TableDef *tableDes, bool isSuperTable) { - TAOS_ROW row = NULL; - TAOS_RES* res = NULL; - int colCount = 0; - - char sqlstr[COMMAND_SIZE]; - sprintf(sqlstr, "describe %s.%s;", dbName, table); - - res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while ((row = taos_fetch_row(res)) != NULL) { - tstrncpy(tableDes->cols[colCount].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - min(TSDB_COL_NAME_LEN, - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); - tstrncpy(tableDes->cols[colCount].type, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1)); - tableDes->cols[colCount].length = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(tableDes->cols[colCount].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(COL_NOTE_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); - colCount++; - } - - taos_free_result(res); - res = NULL; - - if (isSuperTable) { - return colCount; - } - - // if child-table have tag, using select tagName from table to get tagValue - for (int i = 0 ; i < colCount; i++) { - if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; - - sprintf(sqlstr, "select %s from %s.%s", - tableDes->cols[i].field, dbName, table); - - res = taos_query(taos, sqlstr); - code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } - - fields = taos_fetch_fields(res); - - row = taos_fetch_row(res); - if (NULL == row) { - errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } - - if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { - sprintf(tableDes->cols[i].note, "%s", "NUL"); - sprintf(tableDes->cols[i].value, "%s", "NULL"); - taos_free_result(res); - res = NULL; - continue; - } - - int32_t* length = taos_fetch_lengths(res); - - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[i].value, "%d", - ((((int32_t)(*((char *) - row[TSDB_SHOW_TABLES_NAME_INDEX])))==1) - ?1:0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[i].value, "%d", - *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[i].value, "%d", - *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[i].value, "%d", - *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[i].value, "%" PRId64 "", - *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[i].value, "%f", - GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[i].value, "%f", - GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BINARY: - memset(tableDes->cols[i].value, 0, - sizeof(tableDes->cols[i].value)); - int len = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - // FIXME for long value - if (len < (COL_VALUEBUF_LEN - 2)) { - converStringToReadable( - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - length[0], - tableDes->cols[i].value, - len); - } else { - tableDes->cols[i].var_value = calloc(1, len * 2); - if (tableDes->cols[i].var_value == NULL) { - errorPrint("%s() LN%d, memory alalocation failed!\n", - __func__, __LINE__); - taos_free_result(res); - return -1; - } - converStringToReadable((char *)row[0], - length[0], - (char *)(tableDes->cols[i].var_value), len); - } - break; - - case TSDB_DATA_TYPE_NCHAR: - memset(tableDes->cols[i].value, 0, - sizeof(tableDes->cols[i].note)); - int nlen = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - if (nlen < (COL_VALUEBUF_LEN-2)) { - char tbuf[COL_VALUEBUF_LEN-2]; // need reserve 2 bytes for ' ' - convertNCharToReadable( - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - length[0], tbuf, COL_VALUEBUF_LEN-2); - sprintf(tableDes->cols[i].value, "%s", tbuf); - } else { - tableDes->cols[i].var_value = calloc(1, nlen * 4); - if (tableDes->cols[i].var_value == NULL) { - errorPrint("%s() LN%d, memory alalocation failed!\n", - __func__, __LINE__); - taos_free_result(res); - return -1; - } - converStringToReadable( - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - length[0], - (char *)(tableDes->cols[i].var_value), nlen); - } - break; - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[i].value, "%" PRId64 "", - *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); -#if 0 - if (!g_args.mysqlFlag) { - sprintf(tableDes->cols[i].value, "%" PRId64 "", - *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, - (int)(ts % 1000)); - } -#endif - break; - default: - break; - } - - taos_free_result(res); - } - - return colCount; -} - -static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, - FILE *fp, char* dbName) { - int counter = 0; - int count_temp = 0; - char sqlstr[COMMAND_SIZE]; - - char* pstr = sqlstr; - - pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", - dbName, tableDes->name); - - for (; counter < numOfCols; counter++) { - if (tableDes->cols[counter].note[0] != '\0') break; - - if (counter == 0) { - pstr += sprintf(pstr, " (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } - - if (0 == strcasecmp(tableDes->cols[counter].type, "binary") - || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); - } - } - - count_temp = counter; - - for (; counter < numOfCols; counter++) { - if (counter == count_temp) { - pstr += sprintf(pstr, ") TAGS (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } - - if (0 == strcasecmp(tableDes->cols[counter].type, "binary") - || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); - } - } - - pstr += sprintf(pstr, ");"); - - debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr); - return fprintf(fp, "%s\n\n", sqlstr); -} - -static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp) -{ - uint64_t sizeOfTableDes = - (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes); - if (NULL == tableDes) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, sizeOfTableDes); - exit(-1); - } - - int colCount = getTableDes(taos, dbInfo->name, - stbName, tableDes, true); - - if (colCount < 0) { - free(tableDes); - errorPrint("%s() LN%d, failed to get stable[%s] schema\n", - __func__, __LINE__, stbName); - exit(-1); - } - - dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); - free(tableDes); - - return 0; -} - -static int64_t dumpCreateSTableClauseOfDb( - SDbInfo *dbInfo, FILE *fp) -{ - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; - } - - TAOS_ROW row; - char command[COMMAND_SIZE] = {0}; - - sprintf(command, "SHOW %s.STABLES", dbInfo->name); - - TAOS_RES* res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - exit(-1); - } - - int64_t superTblCnt = 0; - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == dumpStableClasuse(taos, dbInfo, - row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { - superTblCnt ++; - } - } - - taos_free_result(res); - - fprintf(g_fpOfResult, - "# super table counter: %"PRId64"\n", - superTblCnt); - g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; - - taos_close(taos); - - return superTblCnt; -} - -static void dumpCreateDbClause( - SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - - char *pstr = sqlstr; - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); - if (isDumpProperty) { - pstr += sprintf(pstr, - "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", - dbInfo->replica, dbInfo->quorum, dbInfo->days, - dbInfo->keeplist, - dbInfo->cache, - dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, - dbInfo->fsync, - dbInfo->cachelast, - dbInfo->comp, dbInfo->precision, dbInfo->update); - } - - pstr += sprintf(pstr, ";"); - fprintf(fp, "%s\n\n", sqlstr); -} - -static FILE* openDumpInFile(char *fptr) { - wordexp_t full_path; - - if (wordexp(fptr, &full_path, 0) != 0) { - errorPrint("illegal file name: %s\n", fptr); - return NULL; - } - - char *fname = full_path.we_wordv[0]; - - FILE *f = NULL; - if ((fname) && (strlen(fname) > 0)) { - f = fopen(fname, "r"); - if (f == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, fname); - } - } - - wordfree(&full_path); - return f; -} - -static uint64_t getFilesNum(char *ext) -{ - uint64_t count = 0; - - int namelen, extlen; - struct dirent *pDirent; - DIR *pDir; - - extlen = strlen(ext); - - bool isSql = (0 == strcmp(ext, "sql")); - - pDir = opendir(g_args.inpath); - if (pDir != NULL) { - while ((pDirent = readdir(pDir)) != NULL) { - namelen = strlen (pDirent->d_name); - - if (namelen > extlen) { - if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { - if (isSql) { - if (0 == strcmp(pDirent->d_name, "dbs.sql")) { - continue; - } - } - verbosePrint("%s found\n", pDirent->d_name); - count ++; - } - } - } - closedir (pDir); - } - - debugPrint("%"PRId64" .%s files found!\n", count, ext); - return count; -} - -static void freeFileList(char **fileList, int64_t count) -{ - for (int64_t i = 0; i < count; i++) { - tfree(fileList[i]); - } - tfree(fileList); -} - -static void createDumpinList(char *ext, int64_t count) -{ - bool isSql = (0 == strcmp(ext, "sql")); - - if (isSql) { - g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *)); - assert(g_tsDumpInSqlFiles); - - for (int64_t i = 0; i < count; i++) { - g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); - assert(g_tsDumpInSqlFiles[i]); - } - } -#ifdef AVRO_SUPPORT - else { - g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *)); - assert(g_tsDumpInAvroFiles); - - for (int64_t i = 0; i < count; i++) { - g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN); - assert(g_tsDumpInAvroFiles[i]); - } - - } -#endif - - int namelen, extlen; - struct dirent *pDirent; - DIR *pDir; - - extlen = strlen(ext); - - count = 0; - pDir = opendir(g_args.inpath); - if (pDir != NULL) { - while ((pDirent = readdir(pDir)) != NULL) { - namelen = strlen (pDirent->d_name); - - if (namelen > extlen) { - if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { - verbosePrint("%s found\n", pDirent->d_name); - if (isSql) { - if (0 == strcmp(pDirent->d_name, "dbs.sql")) { - continue; - } - strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); - } -#ifdef AVRO_SUPPORT - else { - strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); - } -#endif - } - } - } - closedir (pDir); - } - - debugPrint("%"PRId64" .%s files filled to list!\n", count, ext); -} - -#ifdef AVRO_SUPPORT - -static int convertTbDesToJson( - char *dbName, char *tbName, TableDef *tableDes, int colCount, - char **jsonSchema) -{ - // { - // "type": "record", - // "name": "dbname.tbname", - // "fields": [ - // { - // "name": "col0 name", - // "type": "long" - // }, - // { - // "name": "col1 name", - // "type": "int" - // }, - // { - // "name": "col2 name", - // "type": "float" - // }, - // { - // "name": "col3 name", - // "type": "boolean" - // }, - // ... - // { - // "name": "coln name", - // "type": "string" - // } - // ] - // } - *jsonSchema = (char *)calloc(1, - 17 + TSDB_DB_NAME_LEN /* dbname section */ - + 17 /* type: record */ - + 11 + TSDB_TABLE_NAME_LEN /* tbname section */ - + 10 /* fields section */ - + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */ - if (*jsonSchema == NULL) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); - return -1; - } - - char *pstr = *jsonSchema; - pstr += sprintf(pstr, - "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [", - dbName, tbName); - for (int i = 0; i < colCount; i ++) { - if (0 == i) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "long"); - } else { - if (strcasecmp(tableDes->cols[i].type, "binary") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "string"); - } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "bytes"); - } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "boolean"); - } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "int"); - } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "int"); - } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "long"); - } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "long"); - } else { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, - strtolower(tableDes->cols[i].type, tableDes->cols[i].type)); - } - } - if ((i != (colCount -1)) - && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) { - pstr += sprintf(pstr, "},"); - } else { - pstr += sprintf(pstr, "}"); - break; - } - } - - pstr += sprintf(pstr, "]}"); - - debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema); - - return 0; -} - -static void print_json_indent(int indent) { - int i; - for (i = 0; i < indent; i++) { - putchar(' '); - } -} - -const char *json_plural(size_t count) { return count == 1 ? "" : "s"; } - -static void print_json_object(json_t *element, int indent) { - size_t size; - const char *key; - json_t *value; - - print_json_indent(indent); - size = json_object_size(element); - - printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size)); - json_object_foreach(element, key, value) { - print_json_indent(indent + 2); - printf("JSON Key: \"%s\"\n", key); - print_json_aux(value, indent + 2); - } -} - -static void print_json_array(json_t *element, int indent) { - size_t i; - size_t size = json_array_size(element); - print_json_indent(indent); - - printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size)); - for (i = 0; i < size; i++) { - print_json_aux(json_array_get(element, i), indent + 2); - } -} - -static void print_json_string(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON String: \"%s\"\n", json_string_value(element)); -} - -static void print_json_integer(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element)); -} - -static void print_json_real(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON Real: %f\n", json_real_value(element)); -} - -static void print_json_true(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON True\n"); -} - -static void print_json_false(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON False\n"); -} - -static void print_json_null(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON Null\n"); -} - -static void print_json_aux(json_t *element, int indent) -{ - switch(json_typeof(element)) { - case JSON_OBJECT: - print_json_object(element, indent); - break; - - case JSON_ARRAY: - print_json_array(element, indent); - break; - - case JSON_STRING: - print_json_string(element, indent); - break; - - case JSON_INTEGER: - print_json_integer(element, indent); - break; - - case JSON_REAL: - print_json_real(element, indent); - break; - - case JSON_TRUE: - print_json_true(element, indent); - break; - - case JSON_FALSE: - print_json_false(element, indent); - break; - - case JSON_NULL: - print_json_null(element, indent); - break; - - default: - fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element)); - } -} - -static void print_json(json_t *root) { print_json_aux(root, 0); } - -static json_t *load_json(char *jsonbuf) -{ - json_t *root; - json_error_t error; - - root = json_loads(jsonbuf, 0, &error); - - if (root) { - return root; - } else { - fprintf(stderr, "json error on line %d: %s\n", error.line, error.text); - return NULL; - } -} - -static RecordSchema *parse_json_to_recordschema(json_t *element) -{ - RecordSchema *recordSchema = malloc(sizeof(RecordSchema)); - assert(recordSchema); - - if (JSON_OBJECT != json_typeof(element)) { - fprintf(stderr, "%s() LN%d, json passed is not an object\n", - __func__, __LINE__); - return NULL; - } - - const char *key; - json_t *value; - - json_object_foreach(element, key, value) { - if (0 == strcmp(key, "name")) { - tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1); - } else if (0 == strcmp(key, "fields")) { - if (JSON_ARRAY == json_typeof(value)) { - - size_t i; - size_t size = json_array_size(value); - - verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n", - __func__, __LINE__, - (long long)size, json_plural(size)); - - recordSchema->num_fields = size; - recordSchema->fields = malloc(sizeof(FieldStruct) * size); - assert(recordSchema->fields); - - for (i = 0; i < size; i++) { - FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); - json_t *arr_element = json_array_get(value, i); - const char *ele_key; - json_t *ele_value; - - json_object_foreach(arr_element, ele_key, ele_value) { - if (0 == strcmp(ele_key, "name")) { - tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1); - } else if (0 == strcmp(ele_key, "type")) { - if (JSON_STRING == json_typeof(ele_value)) { - tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1); - } else if (JSON_OBJECT == json_typeof(ele_value)) { - const char *obj_key; - json_t *obj_value; - - json_object_foreach(ele_value, obj_key, obj_value) { - if (0 == strcmp(obj_key, "type")) { - if (JSON_STRING == json_typeof(obj_value)) { - tstrncpy(field->type, - json_string_value(obj_value), TYPE_NAME_LEN-1); - } - } - } - } - } - } - } - } else { - fprintf(stderr, "%s() LN%d, fields have no array\n", - __func__, __LINE__); - return NULL; - } - - break; - } - } - - return recordSchema; -} - -static void freeRecordSchema(RecordSchema *recordSchema) -{ - if (recordSchema) { - if (recordSchema->fields) { - free(recordSchema->fields); - } - free(recordSchema); - } -} - -static int64_t writeResultToAvro( - char *avroFilename, - char *jsonSchema, - TAOS_RES *res) -{ - avro_schema_t schema; - if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) { - errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n", - __func__, __LINE__, jsonSchema, avro_strerror()); - exit(EXIT_FAILURE); - } - - json_t *json_root = load_json(jsonSchema); - debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); - - RecordSchema *recordSchema; - if (json_root) { - if (g_args.debug_print || g_args.verbose_print) { - print_json(json_root); - } - - recordSchema = parse_json_to_recordschema(json_root); - if (NULL == recordSchema) { - fprintf(stderr, "Failed to parse json to recordschema\n"); - exit(EXIT_FAILURE); - } - - json_decref(json_root); - } else { - errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema); - exit(EXIT_FAILURE); - } - - avro_file_writer_t db; - - int rval = avro_file_writer_create_with_codec - (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0); - if (rval) { - errorPrint("There was an error creating %s. reason: %s\n", - avroFilename, avro_strerror()); - exit(EXIT_FAILURE); - } - - TAOS_ROW row = NULL; - - int numFields = taos_field_count(res); - assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(res); - - avro_value_iface_t *wface = - avro_generic_class_from_schema(schema); - - avro_value_t record; - avro_generic_value_new(wface, &record); - - int64_t count = 0; - while ((row = taos_fetch_row(res)) != NULL) { - avro_value_t value; - - for (int col = 0; col < numFields; col++) { - if (0 != avro_value_get_by_name( - &record, fields[col].name, &value, NULL)) { - errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed", - __func__, __LINE__, fields[col].name); - continue; - } - - int len; - switch (fields[col].type) { - case TSDB_DATA_TYPE_BOOL: - if (NULL == row[col]) { - avro_value_set_int(&value, TSDB_DATA_BOOL_NULL); - } else { - avro_value_set_boolean(&value, - ((((int32_t)(*((char *)row[col])))==1)?1:0)); - } - break; - - case TSDB_DATA_TYPE_TINYINT: - if (NULL == row[col]) { - avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL); - } else { - avro_value_set_int(&value, *((int8_t *)row[col])); - } - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (NULL == row[col]) { - avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL); - } else { - avro_value_set_int(&value, *((int16_t *)row[col])); - } - break; - - case TSDB_DATA_TYPE_INT: - if (NULL == row[col]) { - avro_value_set_int(&value, TSDB_DATA_INT_NULL); - } else { - avro_value_set_int(&value, *((int32_t *)row[col])); - } - break; - - case TSDB_DATA_TYPE_BIGINT: - if (NULL == row[col]) { - avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); - } else { - avro_value_set_long(&value, *((int64_t *)row[col])); - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (NULL == row[col]) { - avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL); - } else { - avro_value_set_float(&value, GET_FLOAT_VAL(row[col])); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (NULL == row[col]) { - avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL); - } else { - avro_value_set_double(&value, GET_DOUBLE_VAL(row[col])); - } - break; - - case TSDB_DATA_TYPE_BINARY: - if (NULL == row[col]) { - avro_value_set_string(&value, - (char *)NULL); - } else { - avro_value_set_string(&value, (char *)row[col]); - } - break; - - case TSDB_DATA_TYPE_NCHAR: - if (NULL == row[col]) { - avro_value_set_bytes(&value, - (void*)NULL,0); - } else { - len = strlen((char*)row[col]); - avro_value_set_bytes(&value, (void*)(row[col]),len); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - if (NULL == row[col]) { - avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); - } else { - avro_value_set_long(&value, *((int64_t *)row[col])); - } - break; - - default: - break; - } - } - - if (0 != avro_file_writer_append_value(db, &record)) { - errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n", - __func__, __LINE__, - avro_strerror()); - } else { - count ++; - } - } - - avro_value_decref(&record); - avro_value_iface_decref(wface); - freeRecordSchema(recordSchema); - avro_file_writer_close(db); - avro_schema_decref(schema); - - return count; -} - -void freeBindArray(char *bindArray, int onlyCol) -{ - TAOS_BIND *bind; - - for (int j = 0; j < onlyCol; j++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j)); - if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type) - && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) { - tfree(bind->buffer); - } - } -} - -static int dumpInOneAvroFile(char* fcharset, - char* encode, char *avroFilepath) -{ - debugPrint("avroFilepath: %s\n", avroFilepath); - - avro_file_reader_t reader; - - if(avro_file_reader(avroFilepath, &reader)) { - fprintf(stderr, "Unable to open avro file %s: %s\n", - avroFilepath, avro_strerror()); - return -1; - } - - int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4; - char *jsonbuf = calloc(1, buf_len); - assert(jsonbuf); - - avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);; - - avro_schema_t schema; - schema = avro_file_reader_get_writer_schema(reader); - avro_schema_to_json(schema, jsonwriter); - - if (0 == strlen(jsonbuf)) { - errorPrint("Failed to parse avro file: %s schema. reason: %s\n", - avroFilepath, avro_strerror()); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - return -1; - } - debugPrint("Schema:\n %s\n", jsonbuf); - - json_t *json_root = load_json(jsonbuf); - debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); - if (g_args.debug_print) { - print_json(json_root); - } - - const char *namespace = avro_schema_namespace((const avro_schema_t)schema); - debugPrint("Namespace: %s\n", namespace); - - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, - namespace, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return -1; - } - - TAOS_STMT *stmt = taos_stmt_init(taos); - if (NULL == stmt) { - taos_close(taos); - errorPrint("%s() LN%d, stmt init failed! reason: %s\n", - __func__, __LINE__, taos_errstr(NULL)); - return -1; - } - - RecordSchema *recordSchema = parse_json_to_recordschema(json_root); - if (NULL == recordSchema) { - errorPrint("Failed to parse json to recordschema. reason: %s\n", - avro_strerror()); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - return -1; - } - json_decref(json_root); - - TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) - + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false); - - if (allColCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - recordSchema->name); - free(tableDes); - freeRecordSchema(recordSchema); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - return -1; - } - - char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); - assert(stmtBuffer); - char *pstr = stmtBuffer; - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - - int onlyCol = 1; // at least timestamp - for (int col = 1; col < allColCount; col++) { - if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue; - pstr += sprintf(pstr, ",?"); - onlyCol ++; - } - pstr += sprintf(pstr, ")"); - - if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) { - errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n", - taos_stmt_errstr(stmt)); - - free(stmtBuffer); - free(tableDes); - freeRecordSchema(recordSchema); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - return -1; - } - - if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) { - errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n", - recordSchema->name, taos_stmt_errstr(stmt)); - - free(stmtBuffer); - free(tableDes); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - return -1; - } - - avro_value_iface_t *value_class = avro_generic_class_from_schema(schema); - avro_value_t value; - avro_generic_value_new(value_class, &value); - - char *bindArray = - malloc(sizeof(TAOS_BIND) * onlyCol); - assert(bindArray); - - int success = 0; - int failed = 0; - while(!avro_file_reader_read_value(reader, &value)) { - memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol); - TAOS_BIND *bind; - - for (int i = 0; i < recordSchema->num_fields; i++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - - avro_value_t field_value; - - FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); - - bind->is_null = NULL; - int is_null = 1; - if (0 == i) { - int64_t *ts = malloc(sizeof(int64_t)); - assert(ts); - - avro_value_get_by_name(&value, field->name, &field_value, NULL); - avro_value_get_long(&field_value, ts); - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = ts; - bind->length = &bind->buffer_length; - } else if (0 == avro_value_get_by_name( - &value, field->name, &field_value, NULL)) { - - if (0 == strcasecmp(tableDes->cols[i].type, "int")) { - int32_t *n32 = malloc(sizeof(int32_t)); - assert(n32); - - avro_value_get_int(&field_value, n32); - debugPrint("%d | ", *n32); - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = n32; - } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) { - int32_t *n8 = malloc(sizeof(int32_t)); - assert(n8); - - avro_value_get_int(&field_value, n8); - debugPrint("%d | ", *n8); - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = (int8_t *)n8; - } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) { - int32_t *n16 = malloc(sizeof(int32_t)); - assert(n16); - - avro_value_get_int(&field_value, n16); - debugPrint("%d | ", *n16); - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = (int32_t*)n16; - } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) { - int64_t *n64 = malloc(sizeof(int64_t)); - assert(n64); - - avro_value_get_long(&field_value, n64); - debugPrint("%"PRId64" | ", *n64); - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = n64; - } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) { - int64_t *n64 = malloc(sizeof(int64_t)); - assert(n64); - - avro_value_get_long(&field_value, n64); - debugPrint("%"PRId64" | ", *n64); - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = n64; - } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) { - float *f = malloc(sizeof(float)); - assert(f); - - avro_value_get_float(&field_value, f); - if (TSDB_DATA_FLOAT_NULL == *f) { - debugPrint("%s | ", "NULL"); - bind->is_null = &is_null; - } else { - debugPrint("%f | ", *f); - bind->buffer = f; - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) { - double *dbl = malloc(sizeof(double)); - assert(dbl); - - avro_value_get_double(&field_value, dbl); - if (TSDB_DATA_DOUBLE_NULL == *dbl) { - debugPrint("%s | ", "NULL"); - bind->is_null = &is_null; - } else { - debugPrint("%f | ", *dbl); - bind->buffer = dbl; - } - bind->buffer = dbl; - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) { - size_t size; - - char *buf = NULL; - avro_value_get_string(&field_value, (const char **)&buf, &size); - debugPrint("%s | ", (char *)buf); - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - bind->buffer_length = tableDes->cols[i].length; - bind->buffer = buf; - } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) { - size_t bytessize; - void *bytesbuf = NULL; - - avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize); - debugPrint("%s | ", (char*)bytesbuf); - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - bind->buffer_length = tableDes->cols[i].length; - bind->buffer = bytesbuf; - } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) { - int32_t *bl = malloc(sizeof(int32_t)); - assert(bl); - - avro_value_get_boolean(&field_value, bl); - debugPrint("%s | ", (*bl)?"true":"false"); - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = (int8_t*)bl; - } - - bind->length = &bind->buffer_length; - } - - } - debugPrint("%s", "\n"); - - if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - freeBindArray(bindArray, onlyCol); - failed --; - continue; - } - if (0 != taos_stmt_add_batch(stmt)) { - errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - freeBindArray(bindArray, onlyCol); - failed --; - continue; - } - - freeBindArray(bindArray, onlyCol); - - success ++; - continue; - } - - if (0 != taos_stmt_execute(stmt)) { - errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - failed = success; - } - - avro_value_decref(&value); - avro_value_iface_decref(value_class); - - tfree(bindArray); - - tfree(stmtBuffer); - tfree(tableDes); - - freeRecordSchema(recordSchema); - avro_schema_decref(schema); - avro_file_reader_close(reader); - avro_writer_free(jsonwriter); - - tfree(jsonbuf); - - taos_stmt_close(stmt); - taos_close(taos); - - if (failed < 0) - return failed; - return success; -} - -static void* dumpInAvroWorkThreadFp(void *arg) -{ - threadInfo *pThread = (threadInfo*)arg; - setThreadName("dumpInAvroWorkThrd"); - verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n", - pThread->threadIndex, pThread->count, pThread->from); - - for (int64_t i = 0; i < pThread->count; i++) { - char avroFile[MAX_PATH_LEN]; - sprintf(avroFile, "%s/%s", g_args.inpath, - g_tsDumpInAvroFiles[pThread->from + i]); - - if (0 == dumpInOneAvroFile(g_tsCharset, - g_args.encode, - avroFile)) { - okPrint("[%d] Success dump in file: %s\n", - pThread->threadIndex, avroFile); - } - } - - return NULL; -} - -static int64_t dumpInAvroWorkThreads() -{ - int64_t ret = 0; - - int32_t threads = g_args.thread_num; - - uint64_t avroFileCount = getFilesNum("avro"); - if (0 == avroFileCount) { - debugPrint("No .avro file found in %s\n", g_args.inpath); - return 0; - } - - createDumpinList("avro", avroFileCount); - - threadInfo *pThread; - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = (threadInfo *)calloc( - threads, sizeof(threadInfo)); - assert(pids); - assert(infos); - - int64_t a = avroFileCount / threads; - if (a < 1) { - threads = avroFileCount; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = avroFileCount % threads; - } - - int64_t from = 0; - - for (int32_t t = 0; t < threads; ++t) { - pThread = infos + t; - pThread->threadIndex = t; - - pThread->from = from; - pThread->count = tcount; - verbosePrint( - "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n", - t, pThread->count, pThread->from); - - if (pthread_create(pids + t, NULL, - dumpInAvroWorkThreadFp, (void*)pThread) != 0) { - errorPrint("%s() LN%d, thread[%d] failed to start\n", - __func__, __LINE__, pThread->threadIndex); - exit(EXIT_FAILURE); - } - } - - for (int t = 0; t < threads; ++t) { - pthread_join(pids[t], NULL); - } - - free(infos); - free(pids); - - freeFileList(g_tsDumpInAvroFiles, avroFileCount); - - return ret; -} - -#endif /* AVRO_SUPPORT */ - -static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) -{ - int64_t totalRows = 0; - - int32_t sql_buf_len = g_args.max_sql_len; - char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); - assert(tmpBuffer); - - char *pstr = tmpBuffer; - - TAOS_ROW row = NULL; - int rowFlag = 0; - int64_t lastRowsPrint = 5000000; - int count = 0; - - int numFields = taos_field_count(res); - assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(res); - - int32_t curr_sqlstr_len = 0; - int32_t total_sqlstr_len = 0; - - while ((row = taos_fetch_row(res)) != NULL) { - curr_sqlstr_len = 0; - - int32_t* length = taos_fetch_lengths(res); // act len - - if (count == 0) { - total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "INSERT INTO %s.%s VALUES (", dbName, tbName); - } else { - if (g_args.mysqlFlag) { - if (0 == rowFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - rowFlag++; - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); - } - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - } - } - - for (int col = 0; col < numFields; col++) { - if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); - - if (row[col] == NULL) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); - continue; - } - - switch (fields[col].type) { - case TSDB_DATA_TYPE_BOOL: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - ((((int32_t)(*((char *)row[col])))==1)?1:0)); - break; - - case TSDB_DATA_TYPE_TINYINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - *((int8_t *)row[col])); - break; - - case TSDB_DATA_TYPE_SMALLINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - *((int16_t *)row[col])); - break; - - case TSDB_DATA_TYPE_INT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - *((int32_t *)row[col])); - break; - - case TSDB_DATA_TYPE_BIGINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "%" PRId64 "", - *((int64_t *)row[col])); - break; - - case TSDB_DATA_TYPE_FLOAT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", - GET_FLOAT_VAL(row[col])); - break; - - case TSDB_DATA_TYPE_DOUBLE: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", - GET_DOUBLE_VAL(row[col])); - break; - - case TSDB_DATA_TYPE_BINARY: - { - char tbuf[COMMAND_SIZE] = {0}; - converStringToReadable((char *)row[col], length[col], - tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_NCHAR: - { - char tbuf[COMMAND_SIZE] = {0}; - convertNCharToReadable((char *)row[col], length[col], - tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - if (!g_args.mysqlFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "%" PRId64 "", - *(int64_t *)row[col]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[col]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "\'%s.%03d\'", - buf, (int)(ts % 1000)); - } - break; - default: - break; - } - } - - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); - - totalRows++; - count++; - fprintf(fp, "%s", tmpBuffer); - - if (totalRows >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from %s.%s\n", - totalRows, dbName, tbName); - lastRowsPrint += 5000000; - } - - total_sqlstr_len += curr_sqlstr_len; - - if ((count >= g_args.data_batch) - || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { - fprintf(fp, ";\n"); - count = 0; - } - } - - debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); - - fprintf(fp, "\n"); - free(tmpBuffer); - - return totalRows; -} - -static int64_t dumpTableData(FILE *fp, char *tbName, - char* dbName, int precision, - char *jsonSchema) { - int64_t totalRows = 0; - - char sqlstr[1024] = {0}; - - int64_t start_time, end_time; - if (strlen(g_args.humanStartTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanStartTime, &start_time, - strlen(g_args.humanStartTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", - g_args.humanStartTime); - return -1; - } - } else { - start_time = g_args.start_time; - } - - if (strlen(g_args.humanEndTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", g_args.humanEndTime); - return -1; - } - } else { - end_time = g_args.end_time; - } - - sprintf(sqlstr, - "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbName, start_time, end_time); - - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbName, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbName); - return -1; - } - - TAOS_RES* res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("failed to run command %s, reason: %s\n", - sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } - -#ifdef AVRO_SUPPORT - if (g_args.avro) { - char avroFilename[MAX_PATH_LEN] = {0}; - - if (g_args.outpath[0] != 0) { - sprintf(avroFilename, "%s/%s.%s.avro", - g_args.outpath, dbName, tbName); - } else { - sprintf(avroFilename, "%s.%s.avro", - dbName, tbName); - } - - totalRows = writeResultToAvro(avroFilename, jsonSchema, res); - } else -#endif - totalRows = writeResultToSql(res, fp, dbName, tbName); - - taos_free_result(res); - taos_close(taos); - return totalRows; -} - -static int64_t dumpNormalTable( - TAOS *taos, - char *dbName, - char *stable, - char *tbName, - int precision, - FILE *fp - ) { - int colCount = 0; - - TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) - + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table - colCount = getTableDes(taos, dbName, tbName, tableDes, false); - - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } - - // create child-table using super-table - dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); - } else { // dump table definition - colCount = getTableDes(taos, dbName, tbName, tableDes, false); - - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } - - // create normal-table or super-table - dumpCreateTableClause(tableDes, colCount, fp, dbName); - } - - char *jsonSchema = NULL; -#ifdef AVRO_SUPPORT - if (g_args.avro) { - if (0 != convertTbDesToJson( - dbName, tbName, tableDes, colCount, &jsonSchema)) { - errorPrint("%s() LN%d, convertTbDesToJson failed\n", - __func__, - __LINE__); - freeTbDes(tableDes); - return -1; - } - } -#endif - - int64_t totalRows = 0; - if (!g_args.schemaonly) { - totalRows = dumpTableData(fp, tbName, dbName, precision, - jsonSchema); - } - - tfree(jsonSchema); - freeTbDes(tableDes); - return totalRows; -} - -static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName) -{ - int64_t count = 0; - - char tmpBuf[MAX_PATH_LEN] = {0}; - FILE *fp = NULL; - - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } - - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; - } - - count = dumpNormalTable( - taos, - dbInfo->name, - NULL, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); - if (count > 0) { - atomic_add_fetch_64(&g_totalDumpOutRows, count); - } - fclose(fp); - return count; -} - -static int64_t dumpNormalTableBelongStb( - TAOS *taos, - SDbInfo *dbInfo, char *stbName, char *ntbName) -{ - int64_t count = 0; - - char tmpBuf[MAX_PATH_LEN] = {0}; - FILE *fp = NULL; - - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } - - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; - } - - count = dumpNormalTable( - taos, - dbInfo->name, - stbName, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); - if (count > 0) { - atomic_add_fetch_64(&g_totalDumpOutRows, count); - } - - fclose(fp); - return count; -} - -static void *dumpNtbOfDb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); - debugPrint("dump table count = \t%"PRId64"\n", - pThreadInfo->count); - - FILE *fp = NULL; - char tmpBuf[MAX_PATH_LEN] = {0}; - - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%d.sql", - g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%d.sql", - pThreadInfo->dbName, pThreadInfo->threadIndex); - } - - fp = fopen(tmpBuf, "w"); - - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; - } - - int64_t count; - for (int64_t i = 0; i < pThreadInfo->count; i++) { - debugPrint("[%d] No.\t%"PRId64" table name: %s\n", - pThreadInfo->threadIndex, i, - ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name); - count = dumpNormalTable( - pThreadInfo->taos, - pThreadInfo->dbName, - ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable, - ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name, - pThreadInfo->precision, - fp); - if (count < 0) { - break; - } else { - atomic_add_fetch_64(&g_totalDumpOutRows, count); - } - } - - fclose(fp); - return NULL; -} - -static int checkParam() { - if (g_args.all_databases && g_args.databases) { - errorPrint("%s", "conflict option --all-databases and --databases\n"); - return -1; - } - - if (g_args.start_time > g_args.end_time) { - errorPrint("%s", "start time is larger than end time\n"); - return -1; - } - - if (g_args.arg_list_len == 0) { - if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { - errorPrint("%s", "taosdump requires parameters\n"); - return -1; - } - } - /* - if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file path\n"); - return -1; - } - */ - if (!g_args.isDumpIn && g_args.encode != NULL) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; - } - - if (g_args.table_batch <= 0) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; - } - - return 0; -} - -/* -static bool isEmptyCommand(char *cmd) { - char *pchar = cmd; - - while (*pchar != '\0') { - if (*pchar != ' ') return false; - pchar++; - } - - return true; -} - -static void taosReplaceCtrlChar(char *str) { - bool ctrlOn = false; - char *pstr = NULL; - - for (pstr = str; *str != '\0'; ++str) { - if (ctrlOn) { - switch (*str) { - case 'n': - *pstr = '\n'; - pstr++; - break; - case 'r': - *pstr = '\r'; - pstr++; - break; - case 't': - *pstr = '\t'; - pstr++; - break; - case '\\': - *pstr = '\\'; - pstr++; - break; - case '\'': - *pstr = '\''; - pstr++; - break; - default: - break; - } - ctrlOn = false; - } else { - if (*str == '\\') { - ctrlOn = true; - } else { - *pstr = *str; - pstr++; - } - } - } - - *pstr = '\0'; -} -*/ - -char *ascii_literal_list[] = { - "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", - "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", - "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", - "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", - "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", - "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", - "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", - "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", - "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", - "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", - "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", - "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", - "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", - "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", - "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", - "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", - "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; - -static int converStringToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - while (size > 0) { - if (*pstr == '\0') break; - pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); - pstr++; - size--; - } - *pbuf = '\0'; - return 0; -} - -static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - wchar_t wc; - while (size > 0) { - if (*pstr == '\0') break; - int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); - if (byte_width < 0) { - errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); - exit(-1); - } - - if ((int)wc < 256) { - pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); - } else { - memcpy(pbuf, pstr, byte_width); - pbuf += byte_width; - } - pstr += byte_width; - } - - *pbuf = '\0'; - - return 0; -} - -static void dumpCharset(FILE *fp) { - char charsetline[256]; - - (void)fseek(fp, 0, SEEK_SET); - sprintf(charsetline, "#!%s\n", tsCharset); - (void)fwrite(charsetline, strlen(charsetline), 1, fp); -} - -static void loadFileCharset(FILE *fp, char *fcharset) { - char * line = NULL; - size_t line_size = 0; - - (void)fseek(fp, 0, SEEK_SET); - ssize_t size = getline(&line, &line_size, fp); - if (size <= 2) { - goto _exit_no_charset; - } - - if (strncmp(line, "#!", 2) != 0) { - goto _exit_no_charset; - } - if (line[size - 1] == '\n') { - line[size - 1] = '\0'; - size--; - } - strcpy(fcharset, line + 2); - - tfree(line); - return; - -_exit_no_charset: - (void)fseek(fp, 0, SEEK_SET); - *fcharset = '\0'; - tfree(line); - return; -} - -// ======== dumpIn support multi threads functions ================================// - -static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset, - char* encode, char* fileName) { - int read_len = 0; - char * cmd = NULL; - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); - if (cmd == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - int lastRowsPrint = 5000000; - int lineNo = 0; - while ((read_len = getline(&line, &line_len, fp)) != -1) { - ++lineNo; - if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; - line[--read_len] = '\0'; - - //if (read_len == 0 || isCommentLine(line)) { // line starts with # - if (read_len == 0 ) { - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - cmd[read_len + cmd_len]= '\0'; - if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", - __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); - } - - memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; - - if (lineNo >= lastRowsPrint) { - printf(" %d lines already be executed from file %s\n", lineNo, fileName); - lastRowsPrint += 5000000; - } - } - - tfree(cmd); - tfree(line); - return 0; -} - -static void* dumpInSqlWorkThreadFp(void *arg) -{ - threadInfo *pThread = (threadInfo*)arg; - setThreadName("dumpInSqlWorkThrd"); - fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n", - pThread->threadIndex, pThread->count, pThread->from); - - for (int64_t i = 0; i < pThread->count; i++) { - char sqlFile[MAX_PATH_LEN]; - sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]); - - FILE* fp = openDumpInFile(sqlFile); - if (NULL == fp) { - errorPrint("[%d] Failed to open input file: %s\n", - pThread->threadIndex, sqlFile); - continue; - } - - if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode, - sqlFile)) { - okPrint("[%d] Success dump in file: %s\n", - pThread->threadIndex, sqlFile); - } - fclose(fp); - } - - return NULL; -} - -static int dumpInSqlWorkThreads() -{ - int32_t threads = g_args.thread_num; - - uint64_t sqlFileCount = getFilesNum("sql"); - if (0 == sqlFileCount) { - debugPrint("No .sql file found in %s\n", g_args.inpath); - return 0; - } - - createDumpinList("sql", sqlFileCount); - - threadInfo *pThread; - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = (threadInfo *)calloc( - threads, sizeof(threadInfo)); - assert(pids); - assert(infos); - - int64_t a = sqlFileCount / threads; - if (a < 1) { - threads = sqlFileCount; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = sqlFileCount % threads; - } - - int64_t from = 0; - - for (int32_t t = 0; t < threads; ++t) { - pThread = infos + t; - pThread->threadIndex = t; - - pThread->from = from; - pThread->count = tcount; - verbosePrint( - "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n", - t, pThread->count, pThread->from); - - pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (pThread->taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - free(infos); - free(pids); - return -1; - } - - if (pthread_create(pids + t, NULL, - dumpInSqlWorkThreadFp, (void*)pThread) != 0) { - errorPrint("%s() LN%d, thread[%d] failed to start\n", - __func__, __LINE__, pThread->threadIndex); - exit(EXIT_FAILURE); - } - } - - for (int t = 0; t < threads; ++t) { - pthread_join(pids[t], NULL); - } - - for (int t = 0; t < threads; ++t) { - taos_close(infos[t].taos); - } - free(infos); - free(pids); - - freeFileList(g_tsDumpInSqlFiles, sqlFileCount); - - return 0; -} - -static int dumpInDbs() -{ - TAOS *taos = taos_connect( - g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - - if (taos == NULL) { - errorPrint("%s() LN%d, failed to connect to TDengine server\n", - __func__, __LINE__); - return -1; - } - - char dbsSql[MAX_PATH_LEN]; - sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql"); - - FILE *fp = openDumpInFile(dbsSql); - if (NULL == fp) { - errorPrint("%s() LN%d, failed to open input file %s\n", - __func__, __LINE__, dbsSql); - return -1; - } - debugPrint("Success Open input file: %s\n", dbsSql); - loadFileCharset(fp, g_tsCharset); - - if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) { - okPrint("Success dump in file: %s !\n", dbsSql); - } - - fclose(fp); - taos_close(taos); - - return 0; -} - -static int64_t dumpIn() { - assert(g_args.isDumpIn); - - int64_t ret = 0; - if (dumpInDbs()) { - errorPrint("%s", "Failed to dump dbs in!\n"); - exit(EXIT_FAILURE); - } - - ret = dumpInSqlWorkThreads(); - -#ifdef AVRO_SUPPORT - if (0 == ret) { - ret = dumpInAvroWorkThreads(); - } -#endif - - return ret; -} - -static void *dumpNormalTablesOfStb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); - debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count); - - char command[COMMAND_SIZE]; - - sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", - pThreadInfo->dbName, pThreadInfo->stbName, - pThreadInfo->count, pThreadInfo->from); - - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - int32_t code = taos_errno(res); - if (code) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return NULL; - } - - FILE *fp = NULL; - char tmpBuf[MAX_PATH_LEN] = {0}; - - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.%d.sql", - g_args.outpath, - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%s.%d.sql", - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } - - fp = fopen(tmpBuf, "w"); - - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; - } - - TAOS_ROW row = NULL; - int64_t i = 0; - int64_t count; - while((row = taos_fetch_row(res)) != NULL) { - debugPrint("[%d] sub table %"PRId64": name: %s\n", - pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - - count = dumpNormalTable( - pThreadInfo->taos, - pThreadInfo->dbName, - pThreadInfo->stbName, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - pThreadInfo->precision, - fp); - if (count < 0) { - break; - } else { - atomic_add_fetch_64(&g_totalDumpOutRows, count); - } - } - - fclose(fp); - return NULL; -} - -static int64_t dumpNtbOfDbByThreads( - SDbInfo *dbInfo, - int64_t ntbCount) -{ - if (ntbCount <= 0) { - return 0; - } - - int threads = g_args.thread_num; - - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } - - assert(threads); - int64_t b = ntbCount % threads; - - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - assert(pids); - assert(infos); - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); - - return -1; - } - - pThreadInfo->threadIndex = i; - pThreadInfo->count = (ifrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->from + - ((threadInfo *)(infos + i - 1))->count; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo); - } - - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } - - free(pids); - free(infos); - - return 0; -} - -static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) -{ - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; - } - - char command[COMMAND_SIZE]; - TAOS_RES *result; - int32_t code; - - sprintf(command, "USE %s", dbInfo->name); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("invalid database %s, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; - } - - sprintf(command, "SHOW TABLES"); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("Failed to show %s\'s tables, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; - } - - g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); - assert(g_tablesList); - - TAOS_ROW row; - int64_t count = 0; - while(NULL != (row = taos_fetch_row(result))) { - debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", - __func__, __LINE__, - count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - tstrncpy(((TableInfo *)(g_tablesList + count))->name, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN); - char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; - if (stbName) { - tstrncpy(((TableInfo *)(g_tablesList + count))->stable, - (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); - ((TableInfo *)(g_tablesList + count))->belongStb = true; - } - count ++; - } - taos_close(taos); - - int64_t records = dumpNtbOfDbByThreads(dbInfo, count); - - free(g_tablesList); - g_tablesList = NULL; - - return records; -} - -static int64_t dumpNtbOfStbByThreads( - SDbInfo *dbInfo, char *stbName) -{ - int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); - - if (ntbCount <= 0) { - return 0; - } - - int threads = g_args.thread_num; - - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } - - assert(threads); - int64_t b = ntbCount % threads; - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - assert(pids); - assert(infos); - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); - - return -1; - } - - pThreadInfo->threadIndex = i; - pThreadInfo->count = (ifrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->from + - ((threadInfo *)(infos + i - 1))->count; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - strcpy(pThreadInfo->stbName, stbName); - pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo); - } - - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - int64_t records = 0; - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - records += pThreadInfo->rowsOfDumpOut; - taos_close(pThreadInfo->taos); - } - - free(pids); - free(infos); - - return records; -} - -static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) -{ - dumpCreateDbClause(dbInfo, g_args.with_property, fp); - - fprintf(g_fpOfResult, "\n#### database: %s\n", - dbInfo->name); - g_resultStatistics.totalDatabasesOfDumpOut++; - - dumpCreateSTableClauseOfDb(dbInfo, fp); - - return dumpNTablesOfDb(dbInfo); -} - -static int dumpOut() { - TAOS *taos = NULL; - TAOS_RES *result = NULL; - - TAOS_ROW row; - FILE *fp = NULL; - int32_t count = 0; - - char tmpBuf[MAX_PATH_LEN] = {0}; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); - } else { - sprintf(tmpBuf, "dbs.sql"); - } - - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; - } - - g_args.dumpDbCount = getDumpDbCount(); - debugPrint("%s() LN%d, dump db count: %d\n", - __func__, __LINE__, g_args.dumpDbCount); - - if (0 == g_args.dumpDbCount) { - errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); - fclose(fp); - return -1; - } - - g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); - if (g_dbInfos == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", - __func__, __LINE__); - goto _exit_failure; - } - - char command[COMMAND_SIZE]; - - /* Connect to server */ - taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - goto _exit_failure; - } - - /* --------------------------------- Main Code -------------------------------- */ - /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ - /* */ - dumpCharset(fp); - - sprintf(command, "show databases"); - result = taos_query(taos, command); - int32_t code = taos_errno(result); - - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - goto _exit_failure; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); - - while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'log', but subsequent version changed to 'log' - if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - && (!g_args.allow_sys)) { - continue; - } - - if (g_args.databases) { // input multi dbs - if (inDatabasesSeq( - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { - continue; - } - } else if (!g_args.all_databases) { // only input one db - if (strncasecmp(g_args.arg_list[0], - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) - continue; - } - - g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (g_dbInfos[count] == NULL) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); - goto _exit_failure; - } - - okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); - tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - min(TSDB_DB_NAME_LEN, - fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1)); - if (g_args.with_property) { - g_dbInfos[count]->ntables = - *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - g_dbInfos[count]->vgroups = - *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - g_dbInfos[count]->replica = - *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - g_dbInfos[count]->quorum = - *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - g_dbInfos[count]->days = - *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(g_dbInfos[count]->keeplist, - (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); - //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - //g_dbInfos[count]->daysToKeep1; - //g_dbInfos[count]->daysToKeep2; - g_dbInfos[count]->cache = - *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - g_dbInfos[count]->blocks = - *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - g_dbInfos[count]->minrows = - *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - g_dbInfos[count]->maxrows = - *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - g_dbInfos[count]->wallevel = - *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - g_dbInfos[count]->fsync = - *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - g_dbInfos[count]->comp = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - g_dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(g_dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - DB_PRECISION_LEN); - g_dbInfos[count]->update = - *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - } - count++; - - if (g_args.databases) { - if (count > g_args.dumpDbCount) - break; - } else if (!g_args.all_databases) { - if (count >= 1) - break; - } - } - - if (count == 0) { - errorPrint("%d databases valid to dump\n", count); - goto _exit_failure; - } - - if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases - for (int i = 0; i < count; i++) { - int64_t records = 0; - records = dumpWholeDatabase(g_dbInfos[i], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[i]->name); - g_totalDumpOutRows += records; - } - } - } else { - if (1 == g_args.arg_list_len) { - int64_t records = dumpWholeDatabase(g_dbInfos[0], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[0]->name); - g_totalDumpOutRows += records; - } - } else { - dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); - } - - int superTblCnt = 0 ; - for (int i = 1; g_args.arg_list[i]; i++) { - TableRecordInfo tableRecordInfo; - - if (getTableRecordInfo(g_dbInfos[0]->name, - g_args.arg_list[i], - &tableRecordInfo) < 0) { - errorPrint("input the invalid table %s\n", - g_args.arg_list[i]); - continue; - } - - int64_t records = 0; - if (tableRecordInfo.isStb) { // dump all table of this stable - int ret = dumpStableClasuse( - taos, - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - if (ret >= 0) { - superTblCnt++; - records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); - } - } else if (tableRecordInfo.belongStb){ - dumpStableClasuse( - taos, - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - records = dumpNormalTableBelongStb( - taos, - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - g_args.arg_list[i]); - } else { - records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]); - } - - if (records >= 0) { - okPrint("table: %s dumped\n", g_args.arg_list[i]); - g_totalDumpOutRows += records; - } - } - } - - taos_close(taos); - - /* Close the handle and return */ - fclose(fp); - taos_free_result(result); - freeDbInfos(); - fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return 0; - -_exit_failure: - fclose(fp); - taos_close(taos); - taos_free_result(result); - freeDbInfos(); - errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return -1; -} - -int main(int argc, char *argv[]) { - static char verType[32] = {0}; - sprintf(verType, "version: %s\n", version); - argp_program_version = verType; - - int ret = 0; - /* Parse our arguments; every option seen by parse_opt will be - reflected in arguments. */ - if (argc > 1) { -// parse_precision_first(argc, argv, &g_args); - parse_timestamp(argc, argv, &g_args); - parse_args(argc, argv, &g_args); - } - - argp_parse(&argp, argc, argv, 0, 0, &g_args); - - if (g_args.abort) { -#ifndef _ALPINE - error(10, 0, "ABORTED"); -#else - abort(); -#endif - } - - printf("====== arguments config ======\n"); - - printf("host: %s\n", g_args.host); - printf("user: %s\n", g_args.user); - printf("password: %s\n", g_args.password); - printf("port: %u\n", g_args.port); - printf("mysqlFlag: %d\n", g_args.mysqlFlag); - printf("outpath: %s\n", g_args.outpath); - printf("inpath: %s\n", g_args.inpath); - printf("resultFile: %s\n", g_args.resultFile); - printf("encode: %s\n", g_args.encode); - printf("all_databases: %s\n", g_args.all_databases?"true":"false"); - printf("databases: %d\n", g_args.databases); - printf("databasesSeq: %s\n", g_args.databasesSeq); - printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); - printf("with_property: %s\n", g_args.with_property?"true":"false"); -#ifdef AVRO_SUPPORT - printf("avro format: %s\n", g_args.avro?"true":"false"); - printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]); -#endif - printf("start_time: %" PRId64 "\n", g_args.start_time); - printf("human readable start time: %s \n", g_args.humanStartTime); - printf("end_time: %" PRId64 "\n", g_args.end_time); - printf("human readable end time: %s \n", g_args.humanEndTime); - printf("precision: %s\n", g_args.precision); - printf("data_batch: %d\n", g_args.data_batch); - printf("max_sql_len: %d\n", g_args.max_sql_len); - printf("table_batch: %d\n", g_args.table_batch); - printf("thread_num: %d\n", g_args.thread_num); - printf("allow_sys: %d\n", g_args.allow_sys); - printf("abort: %d\n", g_args.abort); - printf("isDumpIn: %d\n", g_args.isDumpIn); - printf("arg_list_len: %d\n", g_args.arg_list_len); - printf("debug_print: %d\n", g_args.debug_print); - - for (int32_t i = 0; i < g_args.arg_list_len; i++) { - if (g_args.databases || g_args.all_databases) { - errorPrint("%s is an invalid input if database(s) be already specified.\n", - g_args.arg_list[i]); - exit(EXIT_FAILURE); - } else { - printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]); - } - } - - printf("==============================\n"); - if (checkParam(&g_args) < 0) { - exit(EXIT_FAILURE); - } - - g_fpOfResult = fopen(g_args.resultFile, "a"); - if (NULL == g_fpOfResult) { - errorPrint("Failed to open %s for save result\n", g_args.resultFile); - exit(-1); - }; - - fprintf(g_fpOfResult, "#############################################################################\n"); - fprintf(g_fpOfResult, "============================== arguments config =============================\n"); - - fprintf(g_fpOfResult, "host: %s\n", g_args.host); - fprintf(g_fpOfResult, "user: %s\n", g_args.user); - fprintf(g_fpOfResult, "password: %s\n", g_args.password); - fprintf(g_fpOfResult, "port: %u\n", g_args.port); - fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag); - fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath); - fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath); - fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile); - fprintf(g_fpOfResult, "encode: %s\n", g_args.encode); - fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false"); - fprintf(g_fpOfResult, "databases: %d\n", g_args.databases); - fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq); - fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); - fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); -#ifdef AVRO_SUPPORT - fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); - fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]); -#endif - fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); - fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); - fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); - fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime); - fprintf(g_fpOfResult, "precision: %s\n", g_args.precision); - fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); - fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); - fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch); - fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num); - fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys); - fprintf(g_fpOfResult, "abort: %d\n", g_args.abort); - fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn); - fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len); - - for (int32_t i = 0; i < g_args.arg_list_len; i++) { - fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]); - } - - g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); - - time_t tTime = time(NULL); - struct tm tm = *localtime(&tTime); - - if (g_args.isDumpIn) { - fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); - fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", - tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (dumpIn() < 0) { - errorPrint("%s\n", "dumpIn() failed!"); - ret = -1; - } - } else { - fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); - fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", - tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (dumpOut() < 0) { - ret = -1; - } else { - fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); - fprintf(g_fpOfResult, "# total database count: %d\n", - g_resultStatistics.totalDatabasesOfDumpOut); - fprintf(g_fpOfResult, "# total super table count: %d\n", - g_resultStatistics.totalSuperTblsOfDumpOut); - fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", - g_resultStatistics.totalChildTblsOfDumpOut); - fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", - g_resultStatistics.totalRowsOfDumpOut); - } - } - - fprintf(g_fpOfResult, "\n"); - fclose(g_fpOfResult); - - if (g_tablesList) { - free(g_tablesList); - } - - return ret; -} diff --git a/src/kit/taosdump/taosdump.sh b/src/kit/taosdump/taosdump.sh deleted file mode 100755 index 6d32c090dbb0f538b0fc0abb4a9588ee08037a95..0000000000000000000000000000000000000000 --- a/src/kit/taosdump/taosdump.sh +++ /dev/null @@ -1,48 +0,0 @@ -taos1_6="/root/mnt/work/test/td1.6/build/bin/taos" -taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump" -taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg" - -taos2_0="/root/mnt/work/test/td2.0/build/bin/taos" -taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump" -taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg" - -data_dir="/root/mnt/work/test/td1.6/output" -table_list="/root/mnt/work/test/td1.6/tables" - -DBNAME="test" -NTABLES=$(wc -l ${table_list} | awk '{print $1;}') -NTABLES_PER_DUMP=101 - -mkdir -p ${data_dir} -i=0 -round=0 -command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" -while IFS= read -r line -do - i=$((i+1)) - - command="${command} ${line}" - - if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then - round=$((round+1)) - echo "Starting round ${round} dump out..." - rm -f ${data_dir}/* - ${command} - echo "Starting round ${round} dump in..." - ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} - - # Reset variables - # command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}" - command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" - i=0 - fi -done < "${table_list}" - -if [[ ${i} -ne "0" ]]; then - round=$((round+1)) - echo "Starting round ${round} dump out..." - rm -f ${data_dir}/* - ${command} - echo "Starting round ${round} dump in..." - ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} -fi diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 960dab6a5bd74f5f49afa42cf3b1f3583d37ac84..9ba0afa90ee2f23573275baf3d11d4b7727c34cb 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -3092,7 +3092,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { // add the user-defined-function information for(int32_t i = 0; i < pInfo->numOfUdfs; ++i, ++t) { char buf[TSDB_FUNC_NAME_LEN] = {0}; - strcpy(buf, nameList[t]); + tstrncpy(buf, nameList[t], TSDB_FUNC_NAME_LEN); SFuncObj* pFuncObj = mnodeGetFunc(buf); if (pFuncObj == NULL) { diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h index 798a08e3e6e16470a750cbd8bfed429539b44d8d..52e6c376a6c240d8c10b8596effa8b398e1e61c4 100644 --- a/src/os/inc/osTime.h +++ b/src/os/inc/osTime.h @@ -103,6 +103,8 @@ int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, char* int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision); int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth); +int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth); + void deltaToUtcInitOnce(); int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision); diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index cc12968c72eef5b3970ca68cf660de502b402e1e..039d688526c4cb1bbcc3ad3163bf3d47437ee625 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -28,6 +28,7 @@ void taosClose(FileFd fd) { void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { const char *tdengineTmpFileNamePrefix = "tdengine-"; char tmpPath[PATH_MAX]; + static uint64_t seqId = 0; int32_t len = (int32_t)strlen(tsTempDir); memcpy(tmpPath, tsTempDir, len); @@ -43,8 +44,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[8] = {0}; - taosRandStr(rand, tListLen(rand) - 1); + char rand[32] = {0}; + + sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); } diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index af8f2dcdaf1568ba5e10656aacb7d0958155dde2..0542407c3ba8e8d17c79f16ef0f3560e3bc10693 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -379,9 +379,9 @@ bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, nouse0, &o_rbytes, &rpackets, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets); - if (rbytes) *rbytes = o_rbytes; - if (tbytes) *tbytes = o_tbytes; - if (bytes) *bytes += (o_rbytes + o_tbytes); + if (rbytes) *rbytes += o_rbytes; + if (tbytes) *tbytes += o_tbytes; + if (bytes) *bytes += (o_rbytes + o_tbytes); } tfree(line); diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index 73345426c9a266c57ac286efac716f5c5490b8bf..a76010b37f4dec456d1be1134efbf6153451f911 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -121,6 +121,10 @@ bool checkTzPresent(char *str, int32_t len) { } +inline int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { + return taosParseTime(timestr, time, len, timePrec, day_light); +} + char* forwardToTimeStringEnd(char* str) { int32_t i = 0; int32_t numOfSep = 0; diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c index 35ca64d79f8b7a883014fd6ca980300ede22d6e2..84c873202b685e690252890e347632e096a4b39e 100644 --- a/src/os/src/linux/linuxEnv.c +++ b/src/os/src/linux/linuxEnv.c @@ -39,6 +39,20 @@ void osInit() { strcpy(tsDataDir, "/var/lib/ProDB"); strcpy(tsLogDir, "/var/log/ProDB"); strcpy(tsScriptDir, "/etc/ProDB"); +#elif (_TD_KH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/kinghistorian"); + } + strcpy(tsDataDir, "/var/lib/kinghistorian"); + strcpy(tsLogDir, "/var/log/kinghistorian"); + strcpy(tsScriptDir, "/etc/kinghistorian"); +#elif (_TD_JH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/jh_taos"); + } + strcpy(tsDataDir, "/var/lib/jh_taos"); + strcpy(tsLogDir, "/var/log/jh_taos"); + strcpy(tsScriptDir, "/etc/jh_taos"); #else if (configDir[0] == 0) { strcpy(configDir, "/etc/taos"); diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index 6f46bb43c75ff2c9735fc53a11bce585c1c213f6..6e087c9b29d7468b7c5a4e82c0f69b38f2c01223 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -33,12 +33,12 @@ void osInit() { strcpy(tsScriptDir, "C:/PowerDB/script"); #elif (_TD_TQ_ == true) if (configDir[0] == 0) { - strcpy(configDir, "C:/TQ/cfg"); + strcpy(configDir, "C:/TQueue/cfg"); } - strcpy(tsVnodeDir, "C:/TQ/data"); - strcpy(tsDataDir, "C:/TQ/data"); - strcpy(tsLogDir, "C:/TQ/log"); - strcpy(tsScriptDir, "C:/TQ/script"); + strcpy(tsVnodeDir, "C:/TQueue/data"); + strcpy(tsDataDir, "C:/TQueue/data"); + strcpy(tsLogDir, "C:/TQueue/log"); + strcpy(tsScriptDir, "C:/TQueue/script"); #elif (_TD_PRO_ == true) if (configDir[0] == 0) { strcpy(configDir, "C:/ProDB/cfg"); @@ -47,6 +47,22 @@ void osInit() { strcpy(tsDataDir, "C:/ProDB/data"); strcpy(tsLogDir, "C:/ProDB/log"); strcpy(tsScriptDir, "C:/ProDB/script"); +#elif (_TD_KH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/KingHistorian/cfg"); + } + strcpy(tsVnodeDir, "C:/KingHistorian/data"); + strcpy(tsDataDir, "C:/KingHistorian/data"); + strcpy(tsLogDir, "C:/KingHistorian/log"); + strcpy(tsScriptDir, "C:/KingHistorian/script"); +#elif (_TD_JH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/jh_iot/cfg"); + } + strcpy(tsVnodeDir, "C:/jh_iot/data"); + strcpy(tsDataDir, "C:/jh_iot/data"); + strcpy(tsLogDir, "C:/jh_iot/log"); + strcpy(tsScriptDir, "C:/jh_iot/script"); #else if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 9e0de204d78cb54bea240a734f2373b709b6c6f9..c7221a6d301ae09e47bd68c76a90599fd85dff2a 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -43,7 +43,7 @@ ELSE () COMMAND git clean -f -d BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 > /dev/null && ./upx taosadapter || : + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 68d160c3cfd5fd06df36fce98badea036d77907e..c00d8bdebdcead98943628a22d4b886a03532f15 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -53,7 +53,7 @@ static void httpStopThread(HttpThread *pThread) { break; } } while (0); - if (r) { + if (r && taosCheckPthreadValid(pThread->thread)) { pthread_cancel(pThread->thread); } #else @@ -63,15 +63,21 @@ static void httpStopThread(HttpThread *pThread) { httpError("%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno)); pThread->stop = true; - pthread_cancel(pThread->thread); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_cancel(pThread->thread); + } } else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { httpError("%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno)); - pthread_cancel(pThread->thread); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_cancel(pThread->thread); + } } #endif // __APPLE__ - pthread_join(pThread->thread, NULL); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_join(pThread->thread, NULL); + } #ifdef __APPLE__ if (sv[0] != -1) { @@ -398,9 +404,13 @@ static bool httpReadData(HttpContext *pContext) { return true; } } else if (nread < 0) { - if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { + if (errno == EINTR) { + httpDebug("context:%p, fd:%d, read from socket error:%d, continue", pContext, pContext->fd, errno); + continue; + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno); - continue; // later again + httpReleaseContext(pContext/*, false */); + return false; } else { httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno); taosCloseSocket(pContext->fd); diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index fc06b920939b1edb0ebfb1ed16da9dcb60edfd3a..b93d85140c230ae5b010a3559fed9488cc6b0b9f 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -21,7 +21,6 @@ #include "tlog.h" #include "ttimer.h" #include "tutil.h" -#include "tscUtil.h" #include "tsclient.h" #include "dnode.h" #include "vnode.h" @@ -43,6 +42,8 @@ #define QUERY_ID_LEN 24 #define CHECK_INTERVAL 1000 +#define SQL_STR_FMT "\"%s\"" + static SMonHttpStatus monHttpStatusTable[] = { {"HTTP_CODE_CONTINUE", 100}, {"HTTP_CODE_SWITCHING_PROTOCOL", 101}, @@ -611,11 +612,11 @@ static int32_t monGetRowElemCharLen(TAOS_FIELD field, char *rowElem) { } static int32_t monBuildFirstEpSql(char *sql) { - return snprintf(sql, SQL_LENGTH, ", \"%s\"", tsFirst); + return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, tsFirst); } static int32_t monBuildVersionSql(char *sql) { - return snprintf(sql, SQL_LENGTH, ", \"%s\"", version); + return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, version); } static int32_t monBuildMasterUptimeSql(char *sql) { @@ -628,7 +629,8 @@ static int32_t monBuildMasterUptimeSql(char *sql) { while ((row = taos_fetch_row(result))) { for (int i = 0; i < num_fields; ++i) { - if (strcmp(fields[i].name, "role") == 0 && strcmp((char *)row[i], "master") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strcmp(fields[i].name, "role") == 0 && strncmp((char *)row[i], "master", charLen) == 0) { if (strcmp(fields[i + 1].name, "role_time") == 0) { int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); //master uptime in seconds @@ -768,11 +770,11 @@ static int32_t monGetVnodesTotalStats(char *ep, int32_t *totalVnodes, int32_t *totalVnodesAlive) { char subsql[TSDB_EP_LEN + 15]; memset(subsql, 0, sizeof(subsql)); - snprintf(subsql, TSDB_EP_LEN, "show vnodes \"%s\"", ep); + snprintf(subsql, TSDB_EP_LEN, "show vnodes "SQL_STR_FMT, ep); TAOS_RES *result = taos_query(tsMonitor.conn, subsql); int32_t code = taos_errno(result); if (code != TSDB_CODE_SUCCESS) { - monError("failed to execute cmd: show vnodes \"%s\", reason:%s", ep, tstrerror(code)); + monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", ep, tstrerror(code)); } TAOS_ROW row; @@ -931,11 +933,11 @@ static int32_t monBuildDnodeVnodesSql(char *sql) { int32_t vnodeNum = 0, masterNum = 0; char sqlStr[TSDB_EP_LEN + 15]; memset(sqlStr, 0, sizeof(sqlStr)); - snprintf(sqlStr, TSDB_EP_LEN + 14, "show vnodes \"%s\"", tsLocalEp); + snprintf(sqlStr, TSDB_EP_LEN + 14, "show vnodes "SQL_STR_FMT, tsLocalEp); TAOS_RES *result = taos_query(tsMonitor.conn, sqlStr); int32_t code = taos_errno(result); if (code != TSDB_CODE_SUCCESS) { - monError("failed to execute cmd: show vnodes \"%s\", reason:%s", tsLocalEp, tstrerror(code)); + monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", tsLocalEp, tstrerror(code)); } TAOS_ROW row; @@ -970,17 +972,18 @@ static int32_t monBuildDnodeMnodeSql(char *sql) { int32_t num_fields = taos_num_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result); + int32_t charLen; while ((row = taos_fetch_row(result))) { has_mnode_row = false; for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "end_point") == 0) { - int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); if (strncmp((char *)row[i], tsLocalEp, charLen) == 0) { has_mnode = true; has_mnode_row = true; } } else if (strcmp(fields[i].name, "role") == 0) { - int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); if (strncmp((char *)row[i], "master", charLen) == 0) { if (has_mnode_row) { monHasMnodeMaster = true; @@ -1107,7 +1110,7 @@ static int32_t checkCreateVgroupTable(int32_t vgId) { } static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { - char v_dnode_ids[256], v_dnode_status[1024]; + char v_dnode_ids[256] = {0}, v_dnode_status[1024] = {0}; int64_t ts = taosGetTimestampUs(); memset(sql, 0, SQL_LENGTH + 1); @@ -1122,6 +1125,7 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { int32_t num_fields = taos_num_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result); + int32_t charLen; while ((row = taos_fetch_row(result))) { int32_t vgId; int32_t pos = 0; @@ -1132,25 +1136,26 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { vgId = *(int32_t *)row[i]; if (checkCreateVgroupTable(vgId) == TSDB_CODE_SUCCESS) { memset(sql, 0, SQL_LENGTH + 1); - pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", \"%s\"", + pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", "SQL_STR_FMT, tsMonitorDbName, vgId, ts, dbName); } else { return TSDB_CODE_SUCCESS; } } else if (strcmp(fields[i].name, "tables") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); - + pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); } else if (strcmp(fields[i].name, "status") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); } else if (strcmp(fields[i].name, "onlines") == 0) { pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); } else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) { snprintf(v_dnode_ids, sizeof(v_dnode_ids), "%d;", *(int16_t *)row[i]); } else if (v_dnode_str && strcmp(v_dnode_str, "_status") == 0) { - snprintf(v_dnode_status, sizeof(v_dnode_status), "%s;", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + snprintf(v_dnode_status, charLen + 1, "%s;", (char *)row[i]); } else if (strcmp(fields[i].name, "compacting") == 0) { //flush dnode_ids and dnode_role in to sql - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\", \"%s\")", v_dnode_ids, v_dnode_status); + pos += snprintf(sql + pos, SQL_LENGTH, ", "SQL_STR_FMT", "SQL_STR_FMT")", v_dnode_ids, v_dnode_status); } } monDebug("save vgroups, sql:%s", sql); @@ -1209,15 +1214,19 @@ static void monSaveSlowQueryInfo() { int32_t num_fields = taos_num_fields(result); TAOS_FIELD *fields = taos_fetch_fields(result); + int32_t charLen; while ((row = taos_fetch_row(result))) { for (int i = 0; i < num_fields; ++i) { if (strcmp(fields[i].name, "query_id") == 0) { has_slowquery = true; - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); } else if (strcmp(fields[i].name, "user") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); } else if (strcmp(fields[i].name, "qid") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); } else if (strcmp(fields[i].name, "created_time") == 0) { int64_t create_time = *(int64_t *)row[i]; create_time = convertTimePrecision(create_time, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO); @@ -1225,9 +1234,11 @@ static void monSaveSlowQueryInfo() { } else if (strcmp(fields[i].name, "time") == 0) { pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", *(int64_t *)row[i]); } else if (strcmp(fields[i].name, "ep") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); } else if (strcmp(fields[i].name, "sql") == 0) { - pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\")", (char *)row[i]); + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 2, ", "SQL_STR_FMT")", (char *)row[i]); } } } diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1..88346a2e4e2e9282d2ec8b8c5264ca1ec23698a1 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1 +Subproject commit 88346a2e4e2e9282d2ec8b8c5264ca1ec23698a1 diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index 4b57843708ac8d1c24c69e68fe406b0edbeeabd2..37bf80ae5dcac8c9ee4d4816cc55ea9de5a81693 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -8,14 +8,22 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(query ${SRC}) SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w) -TARGET_LINK_LIBRARIES(query tsdb tutil lua) +TARGET_LINK_LIBRARIES(query tsdb tutil ${LINK_LUA}) IF (TD_LINUX) - TARGET_LINK_LIBRARIES(query m rt lua) + IF (TD_BUILD_LUA) + TARGET_LINK_LIBRARIES(query m rt ${LINK_LUA}) + ELSE () + TARGET_LINK_LIBRARIES(query m rt) + ENDIF () ADD_SUBDIRECTORY(tests) ENDIF () IF (TD_DARWIN) - TARGET_LINK_LIBRARIES(query m lua) + IF (TD_BUILD_LUA) + TARGET_LINK_LIBRARIES(query m ${LINK_LUA}) + ELSE () + TARGET_LINK_LIBRARIES(query m) + ENDIF () ADD_SUBDIRECTORY(tests) ENDIF () diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index c9a022d7a1210b31b81bf3895a9b804a03bd30ae..b1b82ae762d8c832ae47df515127525b7a1ae6cc 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -23,6 +23,7 @@ extern "C" { #include "os.h" #include "tname.h" +#include "texpr.h" #include "taosdef.h" #include "trpc.h" #include "tvariant.h" @@ -56,7 +57,7 @@ extern "C" { #define TSDB_FUNC_PRJ 21 #define TSDB_FUNC_TAGPRJ 22 -#define TSDB_FUNC_ARITHM 23 +#define TSDB_FUNC_SCALAR_EXPR 23 #define TSDB_FUNC_DIFF 24 #define TSDB_FUNC_FIRST_DST 25 @@ -69,15 +70,13 @@ extern "C" { #define TSDB_FUNC_TID_TAG 31 #define TSDB_FUNC_DERIVATIVE 32 -#define TSDB_FUNC_CEIL 33 -#define TSDB_FUNC_FLOOR 34 -#define TSDB_FUNC_ROUND 35 +#define TSDB_FUNC_CSUM 33 +#define TSDB_FUNC_MAVG 34 +#define TSDB_FUNC_SAMPLE 35 -#define TSDB_FUNC_CSUM 36 -#define TSDB_FUNC_MAVG 37 -#define TSDB_FUNC_SAMPLE 38 +#define TSDB_FUNC_BLKINFO 36 -#define TSDB_FUNC_BLKINFO 39 +#define TSDB_FUNC_ELAPSED 37 /////////////////////////////////////////// // the following functions is not implemented. @@ -93,7 +92,6 @@ extern "C" { #define TSDB_FUNCSTATE_OF 0x10u // outer forward #define TSDB_FUNCSTATE_NEED_TS 0x20u // timestamp is required during query processing #define TSDB_FUNCSTATE_SELECTIVITY 0x40u // selectivity functions, can exists along with tag columns -#define TSDB_FUNCSTATE_SCALAR 0x80u #define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF #define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF @@ -128,14 +126,14 @@ enum { #define QUERY_IS_PROJECTION_QUERY(type) (((type)&TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) #define QUERY_IS_FREE_RESOURCE(type) (((type)&TSDB_QUERY_TYPE_FREE_RESOURCE) != 0) -typedef struct SArithmeticSupport { +typedef struct SScalarExprSupport { SExprInfo *pExprInfo; int32_t numOfCols; SColumnInfo *colList; void *exprList; // client side used int32_t offset; char** data; -} SArithmeticSupport; +} SScalarExprSupport; typedef struct SQLPreAggVal { bool isSet; // statistics info set or not @@ -202,7 +200,7 @@ typedef struct SQLFunctionCtx { SResultRowCellInfo *resultInfo; - int16_t colId; + int16_t colId; // used for user-specified constant value SExtTagsInfo tagInfo; SPoint1 start; SPoint1 end; @@ -234,7 +232,6 @@ int32_t isValidFunction(const char* name, int32_t len); #define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0) #define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) #define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) -#define IS_SCALAR_FUNCTION(x) (((x)&TSDB_FUNCSTATE_SCALAR) != 0) // determine the real data need to calculated the result enum { @@ -256,7 +253,7 @@ void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw); void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDist); /* global sql function array */ -extern struct SAggFunctionInfo aAggs[]; +extern struct SAggFunctionInfo aAggs[40]; extern int32_t functionCompatList[]; // compatible check array list @@ -273,11 +270,11 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, int32_t bufLen) { pResInfo->initialized = true; // the this struct has been initialized flag - + pResInfo->complete = false; pResInfo->hasResult = false; pResInfo->numOfRes = 0; - + memset(GET_ROWCELL_INTERBUF(pResInfo), 0, bufLen); } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index fe4fb6c950d4f3e0186668d957900934ba243e5d..ba277b23018a58e3ed29122761aa65506c94078a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -230,6 +230,7 @@ typedef struct SQueryAttr { bool diffQuery; // is diff query bool simpleAgg; bool pointInterpQuery; // point interpolation query + bool needTableSeqScan; // need scan table by table bool needReverseScan; // need reverse scan bool distinct; // distinct query or not bool stateWindow; // window State on sub/normal table @@ -310,7 +311,7 @@ typedef struct SQueryRuntimeEnv { STSCursor cur; char* tagVal; // tag value of current data block - SArithmeticSupport *sasArray; + SScalarExprSupport*sasArray; SSDataBlock *outputBuf; STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure @@ -685,7 +686,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters); STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf); STableQueryInfo* createTmpTableQueryInfo(STimeWindow win); -int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg); +int32_t buildScalarExprFromMsg(SExprInfo * pExprInfo, void *pQueryMsg); bool isQueryKilled(SQInfo *pQInfo); int32_t checkForQueryBuf(size_t numOfTables); @@ -716,4 +717,5 @@ int32_t getMaximumIdleDurationSec(); void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t type); int32_t getColumnDataFromId(void *param, int32_t id, void **data); +void qInfoLogSSDataBlock(SSDataBlock* block, char* location); #endif // TDENGINE_QEXECUTOR_H diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index 6728809c4f685a9afd0306abef88e9cbe2f63256..ccdfd5c05994b71bd911c3a66d02dc1ffa58a474 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -236,6 +236,9 @@ typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); +void tColDataMergeSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); + + void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); int32_t compare_sa(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); diff --git a/src/query/inc/qFilter.h b/src/query/inc/qFilter.h index e10d7fdfb4f65bb2d9ed2d14ba3e2d04f9d76706..fe9ef0f47f1e4cf353f9dfbd0e9956e7690debfe 100644 --- a/src/query/inc/qFilter.h +++ b/src/query/inc/qFilter.h @@ -105,6 +105,7 @@ typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const v typedef int32_t(*filter_desc_compare_func)(const void *, const void *); typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SDataStatis *, int16_t); typedef int32_t (*filer_get_col_from_id)(void *, int32_t, void **); +typedef int32_t (*filer_get_col_from_name)(void *, int32_t, char*, void **); typedef struct SFilterRangeCompare { int64_t s; @@ -237,11 +238,12 @@ typedef struct SFilterInfo { uint32_t blkGroupNum; uint32_t *blkUnits; int8_t *blkUnitRes; - + void *pTable; + SFilterPCtx pctx; } SFilterInfo; -#define FILTER_NO_MERGE_DATA_TYPE(t) ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR) +#define FILTER_NO_MERGE_DATA_TYPE(t) ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON) #define FILTER_NO_MERGE_OPTR(o) ((o) == TSDB_RELATION_ISNULL || (o) == TSDB_RELATION_NOTNULL || (o) == FILTER_DUMMY_EMPTY_OPTR) #define MR_EMPTY_RES(ctx) (ctx->rs == NULL) @@ -286,6 +288,7 @@ typedef struct SFilterInfo { #define FILTER_GET_COL_FIELD_DATA(fi, ri) ((char *)(fi)->data + ((SSchema *)((fi)->desc))->bytes * (ri)) #define FILTER_GET_VAL_FIELD_TYPE(fi) (((tVariant *)((fi)->desc))->nType) #define FILTER_GET_VAL_FIELD_DATA(fi) ((char *)(fi)->data) +#define FILTER_GET_JSON_VAL_FIELD_DATA(fi) ((char *)(fi)->desc) #define FILTER_GET_TYPE(fl) ((fl) & FLD_TYPE_MAX) #define FILTER_GROUP_UNIT(i, g, uid) ((i)->units + (g)->unitIdxs[uid]) @@ -298,6 +301,7 @@ typedef struct SFilterInfo { #define FILTER_UNIT_COL_SIZE(i, u) FILTER_GET_COL_FIELD_SIZE(FILTER_UNIT_LEFT_FIELD(i, u)) #define FILTER_UNIT_COL_ID(i, u) FILTER_GET_COL_FIELD_ID(FILTER_UNIT_LEFT_FIELD(i, u)) #define FILTER_UNIT_VAL_DATA(i, u) FILTER_GET_VAL_FIELD_DATA(FILTER_UNIT_RIGHT_FIELD(i, u)) +#define FILTER_UNIT_JSON_VAL_DATA(i, u) FILTER_GET_JSON_VAL_FIELD_DATA(FILTER_UNIT_RIGHT_FIELD(i, u)) #define FILTER_UNIT_COL_IDX(u) ((u)->left.idx) #define FILTER_UNIT_OPTR(u) ((u)->compare.optr) #define FILTER_UNIT_COMP_FUNC(u) ((u)->compare.func) @@ -324,6 +328,7 @@ typedef struct SFilterInfo { extern int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options); extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols); extern int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from_id fp); +extern int32_t filterSetJsonColFieldData(SFilterInfo *info, void *param, filer_get_col_from_name fp); extern int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win); extern int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar); extern int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo); diff --git a/src/query/inc/qPercentile.h b/src/query/inc/qPercentile.h index f5b770593c1e30df8c3459676ffa42bbfc9cf0ae..55085c0404c9baa357c2255e1e466dd31e5d4237 100644 --- a/src/query/inc/qPercentile.h +++ b/src/query/inc/qPercentile.h @@ -33,7 +33,7 @@ typedef struct MinMaxEntry { union { double dMaxVal; int64_t i64MaxVal; - int64_t u64MaxVal; + uint64_t u64MaxVal; }; } MinMaxEntry; diff --git a/src/query/inc/qScript.h b/src/query/inc/qScript.h index 574bb51368afeaeddef5fbd5c5bd8469fbe0cdef..2dc9b5812bbfa34dcebdde5438516d3be42a51d2 100644 --- a/src/query/inc/qScript.h +++ b/src/query/inc/qScript.h @@ -15,7 +15,7 @@ #ifndef TDENGINE_QSCRIPT_H #define TDENGINE_QSCRIPT_H - +#ifdef LUA_EMBEDDED #include #include #include @@ -78,5 +78,5 @@ void destroyScriptCtx(void *pScriptCtx); int32_t scriptEnvPoolInit(); void scriptEnvPoolCleanup(); bool isValidScript(char *script, int32_t len); - +#endif //LUA_EMBEDDED #endif //TDENGINE_QSCRIPT_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index eb3a06e01d7034ebe4ee474574aa077dcbb5e87e..f3253c0d8396582454f9d4ef39e09f6ade181d5f 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -28,15 +28,17 @@ extern "C" { #define ParseTOKENTYPE SStrToken -#define NON_ARITHMEIC_EXPR 0 -#define NORMAL_ARITHMETIC 1 -#define AGG_ARIGHTMEIC 2 +#define SQLEXPR_TYPE_UNASSIGNED 0 +#define SQLEXPR_TYPE_SCALAR 1 +#define SQLEXPR_TYPE_AGG 2 +#define SQLEXPR_TYPE_VALUE 3 enum SQL_NODE_TYPE { SQL_NODE_TABLE_COLUMN= 1, SQL_NODE_SQLFUNCTION = 2, SQL_NODE_VALUE = 3, SQL_NODE_EXPR = 4, + SQL_NODE_DATA_TYPE = 5, }; enum SQL_NODE_FROM_TYPE { @@ -79,6 +81,15 @@ typedef struct tVariantListItem { uint8_t sortOrder; } tVariantListItem; +typedef struct CommonItem { + union { + tVariant pVar; + struct tSqlExpr *jsonExp; + }; + bool isJsonExp; + uint8_t sortOrder; +} CommonItem; + typedef struct SIntervalVal { int32_t token; SStrToken interval; @@ -161,7 +172,6 @@ typedef struct SAlterTableInfo { SStrToken name; int16_t tableType; int16_t type; - STagData tagData; SArray *pAddColumns; // SArray SArray *varList; // set t=val or: change src dst, SArray } SAlterTableInfo; @@ -263,6 +273,7 @@ typedef struct tSqlExpr { int32_t functionId; // function id, todo remove it SStrToken columnName; // table column info + TAOS_FIELD dataType; // data type tVariant value; // the use input value SStrToken exprToken; // original sql expr string uint32_t flags; // todo remove it @@ -278,6 +289,7 @@ typedef struct tSqlExprItem { bool distinct; } tSqlExprItem; +SArray *commonItemAppend(SArray *pList, tVariant *pVar, tSqlExpr *jsonExp, bool isJsonExp, uint8_t sortOrder); SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); @@ -291,6 +303,7 @@ SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrT tSqlExpr *tSqlExprCreateTimestamp(SStrToken *pToken, int32_t optrType); tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optrType); tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType); +tSqlExpr *tSqlExprCreateFuncWithParams(SSqlInfo *pInfo, tSqlExpr* col, TAOS_FIELD *colType, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType); SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken); tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType); diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index 422fdd13a6a6b17d63c35880eab27cad5272621a..d47189691ebbe2c4ec3ad55dd72306686586a56e 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -28,6 +28,7 @@ typedef struct STblCond { typedef struct SJoinNode { uint64_t uid; int16_t tagColId; + char tagJsonKeyName[TSDB_MAX_JSON_KEY_LEN + 1]; // for tag json key SArray* tsJoin; SArray* tagJoin; } SJoinNode; @@ -165,6 +166,7 @@ typedef struct SQueryInfo { bool stateWindow; bool globalMerge; bool multigroupResult; + bool isStddev; } SQueryInfo; /** diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index ce607f0fe20a2743579e99e71ddf78fc2e1dbcdc..0882df77c2a8bc38560269ce093568fd96467dae 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -106,5 +106,4 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t* offset); int32_t initUdfInfo(SUdfInfo* pUdfInfo); - #endif // TDENGINE_QUERYUTIL_H diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 30441334210727a87f8e1a042981be89b8de22ef..7f0ca21c7a616198cbbf9956d373b3712ecbe1ec 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -6,19 +6,19 @@ %default_type {SStrToken} %extra_argument {SSqlInfo* pInfo} -%fallback ID BOOL TINYINT SMALLINT INTEGER BIGINT FLOAT DOUBLE STRING TIMESTAMP BINARY NCHAR. +%fallback ID BOOL TINYINT SMALLINT INTEGER BIGINT FLOAT DOUBLE STRING TIMESTAMP BINARY NCHAR JSON. %left OR. %left AND. %right NOT. -%left EQ NE ISNULL NOTNULL IS LIKE MATCH NMATCH GLOB BETWEEN IN. +%left EQ NE ISNULL NOTNULL IS LIKE MATCH NMATCH CONTAINS GLOB BETWEEN IN. %left GT GE LT LE. %left BITAND BITOR LSHIFT RSHIFT. %left PLUS MINUS. %left DIVIDE TIMES. %left STAR SLASH REM. -%left CONCAT. %right UMINUS UPLUS BITNOT. +%right ARROW. %include { #include @@ -253,7 +253,7 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K intitemlist(A) ::= intitemlist(X) COMMA intitem(Y). { A = tVariantListAppend(X, &Y, -1); } intitemlist(A) ::= intitem(X). { A = tVariantListAppend(NULL, &X, -1); } -intitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } +intitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } %type keep {SArray*} %destructor keep {taosArrayDestroy($$);} @@ -438,39 +438,49 @@ column(A) ::= ids(X) typename(Y). { tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } tagitemlist(A) ::= tagitem(X). { A = tVariantListAppend(NULL, &X, -1); } -tagitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } -tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } -tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } -tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } -tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X, true); } -tagitem(A) ::= NOW(X). { X.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&A, &X, true);} +tagitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X); } +tagitem(A) ::= NOW(X). { X.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreateExt(&A, &X, TK_NOW, true);} + +tagitem(A) ::= NOW PLUS VARIABLE(X).{ + X.type = TSDB_DATA_TYPE_TIMESTAMP; + tVariantCreateExt(&A, &X, TK_PLUS, true); +} + +tagitem(A) ::= NOW MINUS VARIABLE(X).{ + X.type = TSDB_DATA_TYPE_TIMESTAMP; + tVariantCreateExt(&A, &X, TK_MINUS, true); +} tagitem(A) ::= MINUS(X) INTEGER(Y).{ X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X, true); + tVariantCreate(&A, &X); } tagitem(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X, true); + tVariantCreate(&A, &X); } tagitem(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X, true); + tVariantCreate(&A, &X); } tagitem(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X, true); + tVariantCreate(&A, &X); } //////////////////////// The SELECT statement ///////////////////////////////// @@ -609,7 +619,7 @@ fill_opt(N) ::= . { N = 0; } fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { tVariant A = {0}; toTSDBType(Y.type); - tVariantCreate(&A, &Y, true); + tVariantCreate(&A, &Y); tVariantListInsert(X, &A, -1, 0); N = X; @@ -630,26 +640,34 @@ sliding_opt(K) ::= . {K.n = 0; K.z = NULL; K.type = 0 %type sortlist {SArray*} %destructor sortlist {taosArrayDestroy($$);} -%type sortitem {tVariant} -%destructor sortitem {tVariantDestroy(&$$);} - orderby_opt(A) ::= . {A = 0;} orderby_opt(A) ::= ORDER BY sortlist(X). {A = X;} sortlist(A) ::= sortlist(X) COMMA item(Y) sortorder(Z). { - A = tVariantListAppend(X, &Y, Z); + A = commonItemAppend(X, &Y, NULL, false, Z); +} + +sortlist(A) ::= sortlist(X) COMMA arrow(Y) sortorder(Z). { + A = commonItemAppend(X, NULL, Y, true, Z); } sortlist(A) ::= item(Y) sortorder(Z). { - A = tVariantListAppend(NULL, &Y, Z); + A = commonItemAppend(NULL, &Y, NULL, false, Z); +} + +sortlist(A) ::= arrow(Y) sortorder(Z). { + A = commonItemAppend(NULL, NULL, Y, true, Z); } %type item {tVariant} -item(A) ::= ids(X) cpxName(Y). { +item(A) ::= ID(X). { toTSDBType(X.type); - X.n += Y.n; - - tVariantCreate(&A, &X, true); + tVariantCreate(&A, &X); +} +item(A) ::= ID(X) DOT ID(Y). { + toTSDBType(X.type); + X.n += (1+Y.n); + tVariantCreate(&A, &X); } %type sortorder {int} @@ -667,11 +685,19 @@ groupby_opt(A) ::= . { A = 0;} groupby_opt(A) ::= GROUP BY grouplist(X). { A = X;} grouplist(A) ::= grouplist(X) COMMA item(Y). { - A = tVariantListAppend(X, &Y, -1); + A = commonItemAppend(X, &Y, NULL, false, -1); +} + +grouplist(A) ::= grouplist(X) COMMA arrow(Y). { + A = commonItemAppend(X, NULL, Y, true, -1); } grouplist(A) ::= item(X). { - A = tVariantListAppend(NULL, &X, -1); + A = commonItemAppend(NULL, &X, NULL, false, -1); +} + +grouplist(A) ::= arrow(X). { + A = commonItemAppend(NULL, NULL, X, true, -1); } //having clause, ignore the input condition in having @@ -734,6 +760,9 @@ expr(A) ::= ID(X) LP exprlist(Y) RP(E). { tStrTokenAppend(pInfo->funcs, &X); A = // for parsing sql functions with wildcard for parameters. e.g., count(*)/first(*)/last(*) operation expr(A) ::= ID(X) LP STAR RP(Y). { tStrTokenAppend(pInfo->funcs, &X); A = tSqlExprCreateFunction(NULL, &X, &Y, X.type); } +// for parsing sql function CAST(column as typename) +expr(A) ::= ID(X) LP expr(B) AS typename(C) RP(Y). { tStrTokenAppend(pInfo->funcs, &X); A = tSqlExprCreateFuncWithParams(pInfo, B, &C, &X, &Y, X.type); } + // is (not) null expression expr(A) ::= expr(X) IS NULL. {A = tSqlExprCreate(X, NULL, TK_ISNULL);} expr(A) ::= expr(X) IS NOT NULL. {A = tSqlExprCreate(X, NULL, TK_NOTNULL);} @@ -765,6 +794,18 @@ expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); } expr(A) ::= expr(X) MATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_MATCH); } expr(A) ::= expr(X) NMATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_NMATCH); } +// contains expression +expr(A) ::= ID(X) CONTAINS STRING(Y). { tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &X, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &Y, TK_STRING); A = tSqlExprCreate(S, M, TK_CONTAINS); } +expr(A) ::= ID(X) DOT ID(Y) CONTAINS STRING(Z). { X.n += (1+Y.n); tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &X, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &Z, TK_STRING); A = tSqlExprCreate(S, M, TK_CONTAINS); } + +// arrow expression +%type arrow {tSqlExpr*} +%destructor arrow {tSqlExprDestroy($$);} +arrow(A) ::= ID(X) ARROW STRING(Y). {tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &X, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &Y, TK_STRING); A = tSqlExprCreate(S, M, TK_ARROW); } +arrow(A) ::= ID(X) DOT ID(Y) ARROW STRING(Z). {X.n += (1+Y.n); tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &X, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &Z, TK_STRING); A = tSqlExprCreate(S, M, TK_ARROW); } + +expr(A) ::= arrow(X). {A = X;} + //in expression expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSqlExprCreate(X, (tSqlExpr*)Y, TK_IN); } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 7545823d7d5c5b2140a7504bbe43041df53bb9f5..e033650b74fe503c73f75b95acdc0e466a241e9a 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -196,6 +196,20 @@ typedef struct { char *taglists; } SSampleFuncInfo; +typedef struct SElapsedInfo { + int8_t hasResult; + TSKEY min; + TSKEY max; +} SElapsedInfo; + +typedef struct { + bool valueAssigned; + union { + int64_t i64Prev; + double d64Prev; + }; +} SDiffFuncInfo; + int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int32_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) { if (!isValidDataType(dataType)) { @@ -203,21 +217,25 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_TSC_INVALID_OPERATION; } + assert(!TSDB_FUNC_IS_SCALAR(functionId)); + assert(functionId != TSDB_FUNC_SCALAR_EXPR); if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ || - functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_CEIL || - functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) + functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) { *type = (int16_t)dataType; *bytes = dataBytes; if (functionId == TSDB_FUNC_INTERP) { *interBytes = sizeof(SInterpInfoDetail); + } else if (functionId == TSDB_FUNC_DIFF) { + *interBytes = sizeof(SDiffFuncInfo); } else { *interBytes = 0; } + return TSDB_CODE_SUCCESS; } @@ -242,13 +260,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = 0; return TSDB_CODE_SUCCESS; } - - if (functionId == TSDB_FUNC_ARITHM) { - *type = TSDB_DATA_TYPE_DOUBLE; - *bytes = sizeof(double); - *interBytes = 0; - return TSDB_CODE_SUCCESS; - } + if (functionId == TSDB_FUNC_TS_COMP) { *type = TSDB_DATA_TYPE_BINARY; @@ -360,6 +372,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = sizeof(STwaInfo); *interBytes = *bytes; return TSDB_CODE_SUCCESS; + } else if (functionId == TSDB_FUNC_ELAPSED) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SElapsedInfo); + *interBytes = *bytes; + return TSDB_CODE_SUCCESS; } } @@ -460,6 +477,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = sizeof(SStddevdstInfo); *interBytes = (*bytes); + } else if (functionId == TSDB_FUNC_ELAPSED) { + *type = TSDB_DATA_TYPE_DOUBLE; + *bytes = tDataTypes[*type].bytes; + *interBytes = sizeof(SElapsedInfo); } else { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -469,7 +490,19 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI // TODO use hash table int32_t isValidFunction(const char* name, int32_t len) { - for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) { + + for (int32_t i = 0; i < TSDB_FUNC_SCALAR_MAX_NUM; ++i) { + int32_t nameLen = (int32_t) strlen(aScalarFunctions[i].name); + if (len != nameLen) { + continue; + } + + if (strncasecmp(aScalarFunctions[i].name, name, len) == 0) { + return aScalarFunctions[i].functionId; + } + } + + for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) { int32_t nameLen = (int32_t) strlen(aAggs[i].name); if (len != nameLen) { continue; @@ -479,7 +512,6 @@ int32_t isValidFunction(const char* name, int32_t len) { return i; } } - return -1; } @@ -1069,11 +1101,11 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, if ((*retVal < pData[i]) ^ isMin) { *retVal = pData[i]; - TSKEY k = tsList[i]; - - DO_UPDATE_TAG_COLUMNS(pCtx, k); + if(tsList) { + TSKEY k = tsList[i]; + DO_UPDATE_TAG_COLUMNS(pCtx, k); + } } - *notNullElems += 1; } #if defined(_DEBUG_VIEW) @@ -2903,8 +2935,7 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) { } static void col_project_function(SQLFunctionCtx *pCtx) { - // the number of output rows should not affect the final number of rows, so set it to be 0 - if (pCtx->numOfParams == 2) { + if (pCtx->colId <= TSDB_UD_COLUMN_INDEX && pCtx->colId > TSDB_RES_COL_ID) { // user-specified constant value return; } @@ -2938,6 +2969,7 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { assert(pCtx->inputBytes == pCtx->outputBytes); tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); + char* data = pCtx->pOutput; pCtx->pOutput += pCtx->outputBytes; @@ -2984,18 +3016,16 @@ static void full_copy_function(SQLFunctionCtx *pCtx) { } } -enum { - INITIAL_VALUE_NOT_ASSIGNED = 0, -}; static bool diff_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { if (!function_setup(pCtx, pResInfo)) { return false; } - // diff function require the value is set to -1 - pCtx->param[1].nType = INITIAL_VALUE_NOT_ASSIGNED; - return false; + SDiffFuncInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); + pDiffInfo->valueAssigned = false; + pDiffInfo->i64Prev = 0; + return true; } static bool deriv_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { @@ -3201,22 +3231,14 @@ static void deriv_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += notNullElems; } -#define DIFF_IMPL(ctx, d, type) \ - do { \ - if ((ctx)->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { \ - (ctx)->param[1].nType = (ctx)->inputType; \ - *(type *)&(ctx)->param[1].i64 = *(type *)(d); \ - } else { \ - *(type *)(ctx)->pOutput = *(type *)(d) - (*(type *)(&(ctx)->param[1].i64)); \ - *(type *)(&(ctx)->param[1].i64) = *(type *)(d); \ - *(int64_t *)(ctx)->ptsOutputBuf = GET_TS_DATA(ctx, index); \ - } \ - } while (0); // TODO difference in date column static void diff_function(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SDiffFuncInfo *pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); + void *data = GET_INPUT_DATA_LIST(pCtx); - bool isFirstBlock = (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED); + bool isFirstBlock = (pDiffInfo->valueAssigned == false); int32_t notNullElems = 0; @@ -3236,15 +3258,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int32_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { + *pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3258,15 +3280,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = pData[i] - pCtx->param[1].i64; // direct previous may be null + if (pDiffInfo->valueAssigned) { + *pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3280,15 +3302,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].dKey = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->d64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3302,15 +3324,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (float)(pData[i] - pCtx->param[1].dKey); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].dKey = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->d64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3324,15 +3346,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int16_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3347,15 +3369,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int8_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3365,7 +3387,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } // initial value is not set yet - if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED || notNullElems <= 0) { + if (!pDiffInfo->valueAssigned || notNullElems <= 0) { /* * 1. current block and blocks before are full of null * 2. current block may be null value @@ -3384,8 +3406,8 @@ static void diff_function(SQLFunctionCtx *pCtx) { } } -char *getArithColumnData(void *param, const char* name, int32_t colId) { - SArithmeticSupport *pSupport = (SArithmeticSupport *)param; +char *getScalarExprColumnData(void *param, const char* name, int32_t colId) { + SScalarExprSupport *pSupport = (SScalarExprSupport *)param; int32_t index = -1; for (int32_t i = 0; i < pSupport->numOfCols; ++i) { @@ -3399,11 +3421,12 @@ char *getArithColumnData(void *param, const char* name, int32_t colId) { return pSupport->data[index] + pSupport->offset * pSupport->colList[index].bytes; } -static void arithmetic_function(SQLFunctionCtx *pCtx) { +static void scalar_expr_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += pCtx->size; - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; - - arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); + SScalarExprSupport *sas = (SScalarExprSupport *)pCtx->param[1].pz; + tExprOperandInfo output; + output.data = pCtx->pOutput; + exprTreeNodeTraverse(sas->pExprInfo->pExpr, pCtx->size, &output, sas, pCtx->order, getScalarExprColumnData); } #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ @@ -3448,7 +3471,7 @@ static void spread_function(SQLFunctionCtx *pCtx) { SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t numOfElems = 0; - + // todo : opt with pre-calculated result // column missing cause the hasNull to be true if (pCtx->preAggVals.isSet) { @@ -3551,7 +3574,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { * the type of intermediate data is binary */ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + if (pCtx->currentStage == MERGE_STAGE) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); @@ -4415,185 +4438,6 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { doFinalizer(pCtx); } -///////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#define CFR_SET_VAL(type, data, pCtx, func, i, step) \ - do { \ - type *pData = (type *) data; \ - type *pOutput = (type *) pCtx->pOutput; \ - \ - for (; i < pCtx->size && i >= 0; i += step) { \ - if (pCtx->hasNull && isNull((const char *)&pData[i], pCtx->inputType)) { \ - *pOutput++ = pData[i]; \ - } else { \ - *pOutput++ = (type)func((double)pData[i]); \ - } \ - } \ - } while (0) - -static void ceil_function(SQLFunctionCtx *pCtx) { - void *data = GET_INPUT_DATA_LIST(pCtx); - - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - CFR_SET_VAL(int32_t, data, pCtx, ceil, i, step); - break; - }; - case TSDB_DATA_TYPE_UINT: { - CFR_SET_VAL(uint32_t, data, pCtx, ceil, i, step); - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - CFR_SET_VAL(int64_t, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - CFR_SET_VAL(uint64_t, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - CFR_SET_VAL(double, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - CFR_SET_VAL(float, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - CFR_SET_VAL(int16_t, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - CFR_SET_VAL(uint16_t, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - CFR_SET_VAL(int8_t, data, pCtx, ceil, i, step); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - CFR_SET_VAL(uint8_t, data, pCtx, ceil, i, step); - break; - } - default: - qError("error input type"); - } - - GET_RES_INFO(pCtx)->numOfRes += pCtx->size; -} - -static void floor_function(SQLFunctionCtx *pCtx) { - void *data = GET_INPUT_DATA_LIST(pCtx); - - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - CFR_SET_VAL(int32_t, data, pCtx, floor, i, step); - break; - }; - case TSDB_DATA_TYPE_UINT: { - CFR_SET_VAL(uint32_t, data, pCtx, floor, i, step); - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - CFR_SET_VAL(int64_t, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - CFR_SET_VAL(uint64_t, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - CFR_SET_VAL(double, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - CFR_SET_VAL(float, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - CFR_SET_VAL(int16_t, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - CFR_SET_VAL(uint16_t, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - CFR_SET_VAL(int8_t, data, pCtx, floor, i, step); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - CFR_SET_VAL(uint8_t, data, pCtx, floor, i, step); - break; - } - default: - qError("error input type"); - } - - GET_RES_INFO(pCtx)->numOfRes += pCtx->size; -} - -static void round_function(SQLFunctionCtx *pCtx) { - void *data = GET_INPUT_DATA_LIST(pCtx); - - int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); - int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - CFR_SET_VAL(int32_t, data, pCtx, round, i, step); - break; - }; - case TSDB_DATA_TYPE_UINT: { - CFR_SET_VAL(uint32_t, data, pCtx, round, i, step); - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - CFR_SET_VAL(int64_t, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - CFR_SET_VAL(uint64_t, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - CFR_SET_VAL(double, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - CFR_SET_VAL(float, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - CFR_SET_VAL(int16_t, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - CFR_SET_VAL(uint16_t, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - CFR_SET_VAL(int8_t, data, pCtx, round, i, step); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - CFR_SET_VAL(uint8_t, data, pCtx, round, i, step); - break; - } - default: - qError("error input type"); - } - GET_RES_INFO(pCtx)->numOfRes += pCtx->size; -} - -#undef CFR_SET_VAL - ////////////////////////////////////////////////////////////////////////////////// //cumulative_sum function @@ -4921,6 +4765,120 @@ static void sample_func_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } +static SElapsedInfo * getSElapsedInfo(SQLFunctionCtx *pCtx) { + if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) { + return (SElapsedInfo *)pCtx->pOutput; + } else { + return GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + } +} + +static bool elapsedSetup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { + return false; + } + + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + pInfo->min = MAX_TS_KEY; + pInfo->max = 0; + pInfo->hasResult = 0; + + return true; +} + +static int32_t elapsedRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { + return BLK_DATA_NO_NEEDED; +} + +static void elapsedFunction(SQLFunctionCtx *pCtx) { + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + if (pCtx->preAggVals.isSet) { + if (pInfo->min == MAX_TS_KEY) { + pInfo->min = pCtx->preAggVals.statis.min; + pInfo->max = pCtx->preAggVals.statis.max; + } else { + if (pCtx->order == TSDB_ORDER_ASC) { + pInfo->max = pCtx->preAggVals.statis.max; + } else { + pInfo->min = pCtx->preAggVals.statis.min; + } + } + } else { + // 0 == pCtx->size mean this is end interpolation. + if (0 == pCtx->size) { + if (pCtx->order == TSDB_ORDER_DESC) { + if (pCtx->end.key != INT64_MIN) { + pInfo->min = pCtx->end.key; + } + } else { + if (pCtx->end.key != INT64_MIN) { + pInfo->max = pCtx->end.key + 1; + } + } + goto elapsedOver; + } + + int64_t *ptsList = (int64_t *)GET_INPUT_DATA_LIST(pCtx); + // pCtx->start.key == INT64_MIN mean this is first window or there is actual start point of current window. + // pCtx->end.key == INT64_MIN mean current window does not end in current data block or there is actual end point of current window. + if (pCtx->order == TSDB_ORDER_DESC) { + if (pCtx->start.key == INT64_MIN) { + pInfo->max = (pInfo->max < ptsList[pCtx->size - 1]) ? ptsList[pCtx->size - 1] : pInfo->max; + } else { + pInfo->max = pCtx->start.key + 1; + } + + if (pCtx->end.key != INT64_MIN) { + pInfo->min = pCtx->end.key; + } else { + pInfo->min = ptsList[0]; + } + } else { + if (pCtx->start.key == INT64_MIN) { + pInfo->min = (pInfo->min > ptsList[0]) ? ptsList[0] : pInfo->min; + } else { + pInfo->min = pCtx->start.key; + } + + if (pCtx->end.key != INT64_MIN) { + pInfo->max = pCtx->end.key + 1; + } else { + pInfo->max = ptsList[pCtx->size - 1]; + } + } + } + +elapsedOver: + SET_VAL(pCtx, pCtx->size, 1); + + if (pCtx->size > 0) { + GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; + pInfo->hasResult = DATA_SET_FLAG; + } +} + +static void elapsedMerge(SQLFunctionCtx *pCtx) { + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + memcpy(pInfo, pCtx->pInput, (size_t)pCtx->inputBytes); + GET_RES_INFO(pCtx)->hasResult = pInfo->hasResult; +} + +static void elapsedFinalizer(SQLFunctionCtx *pCtx) { + if (GET_RES_INFO(pCtx)->hasResult != DATA_SET_FLAG) { + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + return; + } + + SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + *(double *)pCtx->pOutput = (double)pInfo->max - (double)pInfo->min; + if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) { + *(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64; + } + GET_RES_INFO(pCtx)->numOfRes = 1; + + doFinalizer(pCtx); +} + ///////////////////////////////////////////////////////////////////////////////////////////// /* * function compatible list. @@ -4939,13 +4897,13 @@ int32_t functionCompatList[] = { 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, // tag, colprj, tagprj, arithm, diff, first_dist, last_dist, stddev_dst, interp rate, irate 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, - // tid_tag, deriv, ceil, floor, round, csum, mavg, sample, - 6, 8, 1, 1, 1, -1, -1, -1, - // block_info - 7 + // tid_tag, deriv, csum, mavg, sample, + 6, 8, -1, -1, -1, + // block_info, elapsed + 7, 1 }; -SAggFunctionInfo aAggs[] = {{ +SAggFunctionInfo aAggs[40] = {{ // 0, count function does not invoke the finalize function "count", TSDB_FUNC_COUNT, @@ -5227,11 +5185,11 @@ SAggFunctionInfo aAggs[] = {{ { // 23 "arithmetic", - TSDB_FUNC_ARITHM, - TSDB_FUNC_ARITHM, + TSDB_FUNC_SCALAR_EXPR, + TSDB_FUNC_SCALAR_EXPR, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS, function_setup, - arithmetic_function, + scalar_expr_function, doFinalizer, copy_function, dataBlockRequired, @@ -5344,41 +5302,8 @@ SAggFunctionInfo aAggs[] = {{ noop1, dataBlockRequired, }, - {// 33 - "ceil", - TSDB_FUNC_CEIL, - TSDB_FUNC_CEIL, - TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, - function_setup, - ceil_function, - doFinalizer, - noop1, - dataBlockRequired - }, - {// 34 - "floor", - TSDB_FUNC_FLOOR, - TSDB_FUNC_FLOOR, - TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, - function_setup, - floor_function, - doFinalizer, - noop1, - dataBlockRequired - }, - {// 35 - "round", - TSDB_FUNC_ROUND, - TSDB_FUNC_ROUND, - TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, - function_setup, - round_function, - doFinalizer, - noop1, - dataBlockRequired - }, { - // 36 + // 33 "csum", TSDB_FUNC_CSUM, TSDB_FUNC_INVALID_ID, @@ -5390,7 +5315,7 @@ SAggFunctionInfo aAggs[] = {{ dataBlockRequired, }, { - // 37 + // 34 "mavg", TSDB_FUNC_MAVG, TSDB_FUNC_INVALID_ID, @@ -5402,7 +5327,7 @@ SAggFunctionInfo aAggs[] = {{ dataBlockRequired, }, { - // 38 + // 35 "sample", TSDB_FUNC_SAMPLE, TSDB_FUNC_SAMPLE, @@ -5414,7 +5339,7 @@ SAggFunctionInfo aAggs[] = {{ dataBlockRequired, }, { - // 39 + // 36 "_block_dist", TSDB_FUNC_BLKINFO, TSDB_FUNC_BLKINFO, @@ -5425,4 +5350,16 @@ SAggFunctionInfo aAggs[] = {{ block_func_merge, dataBlockRequired, }, + { + // 37 + "elapsed", + TSDB_FUNC_ELAPSED, + TSDB_FUNC_ELAPSED, + TSDB_BASE_FUNC_SO, + elapsedSetup, + elapsedFunction, + elapsedFinalizer, + elapsedMerge, + elapsedRequired, + } }; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 5f2533f08b69d95514ed22f9a10daa25f774b7d3..1249f6db3082b411b6d47ecc56501cdbc90b9bed 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -30,6 +30,9 @@ #include "tscompression.h" #include "qScript.h" #include "tscLog.h" +#include "cJSON.h" +#include "tsdbMeta.h" +#include "tscUtil.h" #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -134,10 +137,18 @@ do { \ } \ } while (0) +#define GET_JSON_KEY(exprInfo) \ +char* param = NULL; \ +int32_t paramLen = 0; \ +if(exprInfo->base.numOfParams > 0){ \ + param = exprInfo->base.param[0].pz; \ + paramLen = exprInfo->base.param[0].nLen; \ +} + uint64_t queryHandleId = 0; int32_t getMaximumIdleDurationSec() { - return tsShellActivityTimer * 2; + return tsShellActivityTimer * 10; } int64_t genQueryId(void) { int64_t uid = 0; @@ -403,6 +414,10 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput for (int32_t i = 0; i < numOfOutput; ++i) { int32_t functId = pCtx[i].functionId; + if (TSDB_FUNC_IS_SCALAR(functId)) { + continue; + } + if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { hasTags = true; continue; @@ -422,13 +437,14 @@ static bool isScalarWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) { for (int32_t i = 0; i < numOfOutput; ++i) { int32_t functId = pCtx[i].functionId; - if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { - hasTags = true; + if (TSDB_FUNC_IS_SCALAR(functId)) { + numOfScalar++; continue; } - if ((aAggs[functId].status & TSDB_FUNCSTATE_SCALAR) != 0) { - numOfScalar++; + if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { + hasTags = true; + continue; } } @@ -933,9 +949,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; - bool hasAggregates = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < numOfOutput; ++k) { + bool hasAggregates = pCtx[k].preAggVals.isSet; + pCtx[k].size = forwardStep; pCtx[k].startTs = pWin->skey; @@ -962,8 +979,10 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx if (functionId < 0) { // load the script and exec, pRuntimeEnv->pUdfInfo SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); - } else { + } else if (!TSDB_FUNC_IS_SCALAR(functionId)){ aAggs[functionId].xFunction(&pCtx[k]); + } else { + assert(0); } } @@ -1104,7 +1123,7 @@ static TSKEY getStartTsKey(SQueryAttr* pQueryAttr, STimeWindow* win, const TSKEY return ts; } -static void setArithParams(SArithmeticSupport* sas, SExprInfo *pExprInfo, SSDataBlock* pSDataBlock) { +static void setArithParams(SScalarExprSupport* sas, SExprInfo *pExprInfo, SSDataBlock* pSDataBlock) { sas->numOfCols = (int32_t) pSDataBlock->info.numOfCols; sas->pExprInfo = pExprInfo; if (sas->colList) { @@ -1140,8 +1159,8 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SQLFunctionCtx* pC } void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order) { - if (pCtx[0].functionId == TSDB_FUNC_ARITHM) { - SArithmeticSupport* pSupport = (SArithmeticSupport*) pCtx[0].param[1].pz; + if (pCtx[0].functionId == TSDB_FUNC_SCALAR_EXPR) { + SScalarExprSupport* pSupport = (SScalarExprSupport*) pCtx[0].param[1].pz; if (pSupport->colList == NULL) { doSetInputDataBlock(pOperator, pCtx, pBlock, order); } else { @@ -1164,8 +1183,8 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, setBlockStatisInfo(&pCtx[i], pBlock, &pOperator->pExpr[i].base.colInfo); - if (pCtx[i].functionId == TSDB_FUNC_ARITHM) { - setArithParams((SArithmeticSupport*)pCtx[i].param[1].pz, &pOperator->pExpr[i], pBlock); + if (pCtx[i].functionId == TSDB_FUNC_SCALAR_EXPR) { + setArithParams((SScalarExprSupport*)pCtx[i].param[1].pz, &pOperator->pExpr[i], pBlock); } else { SColIndex* pCol = &pOperator->pExpr[i].base.colInfo; if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || (pCtx[i].functionId == TSDB_FUNC_BLKINFO) || @@ -1178,10 +1197,9 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, pCtx[i].colId = p->info.colId; assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type); - if (pCtx[i].functionId < 0) { + if (pCtx[i].functionId < 0 || TSDB_FUNC_IS_SCALAR(pCtx[i].functionId)) { SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); - pCtx[i].ptsList = (int64_t*) tsInfo->pData; - + pCtx[i].ptsList = (int64_t*)tsInfo->pData; continue; } @@ -1204,7 +1222,7 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type); for(int32_t j = 0; j < pBlock->info.rows; ++j) { char* dst = p->pData + j * p->info.bytes; - tVariantDump(&pOperator->pExpr[i].base.param[1], dst, p->info.type, true); + tVariantDump(&pOperator->pExpr[i].base.param[0], dst, p->info.type, true); } } } @@ -1222,8 +1240,10 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction if (functionId < 0) { SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); - } else { + } else if (!TSDB_FUNC_IS_SCALAR(functionId)){ aAggs[functionId].xFunction(&pCtx[k]); + } else { + assert(0); } } } @@ -1243,8 +1263,10 @@ static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx // load the script and exec SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); - } else { + } else if (!TSDB_FUNC_IS_SCALAR(pCtx[k].functionId)) { aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); + } else { + assert(0); } } } @@ -1258,7 +1280,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { int32_t functionId = pCtx[k].functionId; - if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP) { + if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP && functionId != TSDB_FUNC_ELAPSED) { pCtx[k].start.key = INT64_MIN; continue; } @@ -1301,7 +1323,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; } } - } else if (functionId == TSDB_FUNC_TWA) { + } else if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_ELAPSED) { assert(curTs != windowKey); if (prevRowIndex == -1) { @@ -1444,34 +1466,34 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc } static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) { - STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info; + STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*)pOperatorInfo->info; SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; int32_t numOfOutput = pOperatorInfo->numOfOutput; SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); - bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); int32_t prevIndex = pResultRowInfo->curPos; TSKEY* tsCols = NULL; if (pSDataBlock->pDataBlock != NULL) { SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, 0); - tsCols = (int64_t*) pColDataInfo->pData; + tsCols = (int64_t*)pColDataInfo->pData; assert(tsCols[0] == pSDataBlock->info.window.skey && tsCols[pSDataBlock->info.rows - 1] == pSDataBlock->info.window.ekey); } - int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); - TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + int32_t startPos = ascQuery ? 0 : (pSDataBlock->info.rows - 1); + TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); - bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SResultRow* pResult = NULL; - int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, - numOfOutput, pInfo->rowCellInfoOffset); + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1487,31 +1509,31 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. SResultRow* pRes = getResultRow(pResultRowInfo, j); if (pRes->closed) { - assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); + assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && + resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); continue; } - STimeWindow w = pRes->win; - ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, - tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); + STimeWindow w = pRes->win; + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } - doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, - tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); + assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); - setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); + doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, + tsCols[startPos], startPos, QUERY_IS_ASC_QUERY(pQueryAttr) ? w.ekey : w.skey, RESULT_ROW_END_INTERP); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); - } + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); + } // restore current time window - ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, - numOfOutput, pInfo->rowCellInfoOffset); + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1530,8 +1552,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } // null data, failed to allocate more memory buffer - int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &nextWin, masterScan, &pResult, tableGroupId, - pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &nextWin, masterScan, + &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -1541,20 +1563,18 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &nextWin, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &nextWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &nextWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, + numOfOutput); } if (pQueryAttr->timeWindowInterpo) { - int32_t rowIndex = ascQuery? (pSDataBlock->info.rows-1):0; + int32_t rowIndex = ascQuery ? (pSDataBlock->info.rows - 1) : 0; saveDataBlockLastRow(pRuntimeEnv, &pSDataBlock->info, pSDataBlock->pDataBlock, rowIndex); } updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey); } - - - static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; STableQueryInfo* item = pRuntimeEnv->current; @@ -1578,9 +1598,6 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn int32_t num = 0; for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) { char* val = ((char*)pColInfoData->pData) + bytes * j; - if (isNull(val, type)) { - continue; - } // Compare with the previous row of this column, and do not set the output buffer again if they are identical. if (pInfo->prevData == NULL) { @@ -1824,7 +1841,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde pCtx->hasNull = hasNull(pColIndex, pStatis); // set the statistics data for primary time stamp column - if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if ((pCtx->functionId == TSDB_FUNC_SPREAD || pCtx->functionId == TSDB_FUNC_ELAPSED) && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { pCtx->preAggVals.isSet = true; pCtx->preAggVals.statis.min = pSDataBlock->info.window.skey; pCtx->preAggVals.statis.max = pSDataBlock->info.window.ekey; @@ -1848,11 +1865,14 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { for (int32_t i = 0; i < numOfOutput; ++i) { int32_t functionId = pCtx[i].functionId; + if (functionId < 0 || TSDB_FUNC_IS_SCALAR(functionId)) { + continue; + } if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pCtx[i].outputBytes; pTagCtx[num++] = &pCtx[i]; - } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0 || (aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) { + } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { p = &pCtx[i]; } else if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) { // tag function may be the group by tag column @@ -1906,6 +1926,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr pCtx->ptsOutputBuf = NULL; + pCtx->colId = pIndex->colId; pCtx->outputBytes = pSqlExpr->resBytes; pCtx->outputType = pSqlExpr->resType; @@ -1964,7 +1985,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[2].i64 = pQueryAttr->window.ekey; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - } else if (functionId == TSDB_FUNC_ARITHM) { + } else if (functionId == TSDB_FUNC_SCALAR_EXPR) { pCtx->param[1].pz = (char*) &pRuntimeEnv->sasArray[i]; } } @@ -2015,7 +2036,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf // NOTE: pTableCheckInfo need to update the query time range and the lastKey info pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); - pRuntimeEnv->sasArray = calloc(pQueryAttr->numOfOutput, sizeof(SArithmeticSupport)); + pRuntimeEnv->sasArray = calloc(pQueryAttr->numOfOutput, sizeof(SScalarExprSupport)); if (pRuntimeEnv->sasArray == NULL || pRuntimeEnv->pResultRowHashTable == NULL || pRuntimeEnv->keyBuf == NULL || pRuntimeEnv->prevRow == NULL || pRuntimeEnv->tagVal == NULL) { @@ -2162,7 +2183,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_MultiwayMergeSort: { - pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, 4096, merger); + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, 200, merger); // TD-10899 break; } @@ -2190,7 +2211,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_Order: { - pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); + if (pQueryAttr->pExpr2 != NULL) { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr2, + pQueryAttr->numOfExpr2, &pQueryAttr->order); + } else { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, + pQueryAttr->numOfOutput, &pQueryAttr->order); + } break; } @@ -2935,7 +2962,7 @@ void filterColRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSDataBlock* pBlock static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t numOfTags, int16_t colId); -static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes); +static void doSetTagValueInParam(void* pTable, char* param, int32_t paraLen, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes); static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) { SQLFunctionCtx* pCtx = pTableScanInfo->pCtx; @@ -2947,7 +2974,7 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData int32_t colId = pTableScanInfo->pExpr[i].base.colInfo.colId; // group by + first/last should not apply the first/last block filter - if (functionId < 0) { + if (functionId < 0 || TSDB_FUNC_IS_SCALAR(functionId)) { status |= BLK_DATA_ALL_NEEDED; return status; } else { @@ -3012,19 +3039,22 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa if (pQueryAttr->stableQuery) { // todo refactor SExprInfo* pExprInfo = &pTableScanInfo->pExpr[0]; - int16_t tagId = (int16_t)pExprInfo->base.param[0].i64; + int16_t tagId = (int16_t)pExprInfo->base.param[1].i64; SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagId); // compare tag first tVariant t = {0}; - doSetTagValueInParam(pRuntimeEnv->current->pTable, tagId, &t, pColInfo->type, pColInfo->bytes); + GET_JSON_KEY(pExprInfo) + doSetTagValueInParam(pRuntimeEnv->current->pTable, param, paramLen, tagId, &t, pColInfo->type, pColInfo->bytes); setTimestampListJoinInfo(pRuntimeEnv, &t, pRuntimeEnv->current); STSElem elem = tsBufGetElem(pRuntimeEnv->pTsBuf); if (!tsBufIsValidElem(&elem) || (tsBufIsValidElem(&elem) && (tVariantCompare(&t, elem.tag) != 0))) { (*status) = BLK_DATA_DISCARD; + tVariantDestroy(&t); return TSDB_CODE_SUCCESS; } + tVariantDestroy(&t); } } @@ -3212,7 +3242,7 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { * set tag value in SQLFunctionCtx * e.g.,tag information into input buffer */ -static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes) { +static void doSetTagValueInParam(void* pTable, char* param, int32_t paramLen, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes) { tVariantDestroy(tag); char* val = NULL; @@ -3220,7 +3250,7 @@ static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, val = tsdbGetTableName(pTable); assert(val != NULL); } else { - val = tsdbGetTableTagVal(pTable, tagColId, type, bytes); + val = tsdbGetTableTagVal(pTable, tagColId, type); } if (val == NULL || isNull(val, type)) { @@ -3228,11 +3258,19 @@ static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, return; } - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(type)) { int32_t maxLen = bytes - VARSTR_HEADER_SIZE; int32_t len = (varDataLen(val) > maxLen)? maxLen:varDataLen(val); tVariantCreateFromBinary(tag, varDataVal(val), len, type); //tVariantCreateFromBinary(tag, varDataVal(val), varDataLen(val), type); + } else if(type == TSDB_DATA_TYPE_JSON){ + char jsonVal[TSDB_MAX_JSON_TAGS_LEN] = {0}; + if(param){ + getJsonTagValueElment(pTable, param, paramLen, jsonVal, bytes); + }else{ + getJsonTagValueAll(val, jsonVal, TSDB_MAX_JSON_TAGS_LEN); + } + tVariantCreateFromBinary(tag, jsonVal, bytes, type); } else { tVariantCreateFromBinary(tag, val, bytes, type); } @@ -3258,12 +3296,12 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt SExprInfo* pExprInfo = &pExpr[0]; if (pQueryAttr->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP && pQueryAttr->stableQuery) { - assert(pExprInfo->base.numOfParams == 1); + assert(pExprInfo->base.numOfParams == 2); - int16_t tagColId = (int16_t)pExprInfo->base.param[0].i64; + int16_t tagColId = (int16_t)pExprInfo->base.param[1].i64; SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagColId); - - doSetTagValueInParam(pTable, tagColId, &pCtx[0].tag, pColInfo->type, pColInfo->bytes); + GET_JSON_KEY(pExprInfo) + doSetTagValueInParam(pTable, param, paramLen, tagColId, &pCtx[0].tag, pColInfo->type, pColInfo->bytes); return; } else { // set tag value, by which the results are aggregated. @@ -3279,7 +3317,8 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt } // todo use tag column index to optimize performance - doSetTagValueInParam(pTable, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType, + GET_JSON_KEY(pLocalExprInfo) + doSetTagValueInParam(pTable, param, paramLen, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType, pLocalExprInfo->base.resBytes); if (IS_NUMERIC_TYPE(pLocalExprInfo->base.resType) @@ -3645,8 +3684,10 @@ void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) { if (pCtx[j].functionId < 0) { // todo udf initialization continue; - } else { + } else if (!TSDB_FUNC_IS_SCALAR(pCtx[j].functionId)) { aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo); + } else { + assert(0); } } } @@ -3705,8 +3746,10 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult pCtx[j].startTs = buf->win.skey; if (pCtx[j].functionId < 0) { doInvokeUdf(pRuntimeEnv->pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); - } else { + } else if (!TSDB_FUNC_IS_SCALAR(pCtx[j].functionId)) { aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + } else { + assert(0); } } @@ -3722,8 +3765,10 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult for (int32_t j = 0; j < numOfOutput; ++j) { if (pCtx[j].functionId < 0) { doInvokeUdf(pRuntimeEnv->pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); - } else { + } else if (!TSDB_FUNC_IS_SCALAR(pCtx[j].functionId)) { aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + } else { + assert(0); } } } @@ -3811,9 +3856,6 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; - if (functionId < 0) { - continue; - } if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { @@ -3821,7 +3863,13 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe } if (!pResInfo->initialized) { - aAggs[functionId].init(&pCtx[i], pResInfo); + if (functionId < 0 ) { + doInvokeUdf(pRuntimeEnv->pUdfInfo, &pCtx[i], 0, TSDB_UDF_FUNC_INIT); + } else if (!TSDB_FUNC_IS_SCALAR(functionId)) { + aAggs[functionId].init(&pCtx[i], pResInfo); + } else { + assert(0); + } } } } @@ -3898,20 +3946,21 @@ void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExpr if (pQueryAttr->stableQuery && (pRuntimeEnv->pTsBuf != NULL) && (pExpr->functionId == TSDB_FUNC_TS || pExpr->functionId == TSDB_FUNC_PRJ) && (pExpr->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX)) { - assert(pExpr->numOfParams == 1); + assert(pExpr->numOfParams == 2); - int16_t tagColId = (int16_t)pExprInfo->base.param[0].i64; + int16_t tagColId = (int16_t)pExprInfo->base.param[1].i64; SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagColId); - doSetTagValueInParam(pTable, tagColId, &pCtx->tag, pColInfo->type, pColInfo->bytes); + GET_JSON_KEY(pExprInfo) + doSetTagValueInParam(pTable, param, paramLen, tagColId, &pCtx->tag, pColInfo->type, pColInfo->bytes); int16_t tagType = pCtx[0].tag.nType; - if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) { + if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR || tagType == TSDB_DATA_TYPE_JSON) { qDebug("QInfo:0x%"PRIx64" set tag value for join comparison, colId:%" PRId64 ", val:%s", GET_QID(pRuntimeEnv), - pExprInfo->base.param[0].i64, pCtx[0].tag.pz); + pExprInfo->base.param[1].i64, pCtx[0].tag.pz); } else { qDebug("QInfo:0x%"PRIx64" set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, GET_QID(pRuntimeEnv), - pExprInfo->base.param[0].i64, pCtx[0].tag.i64); + pExprInfo->base.param[1].i64, pCtx[0].tag.i64); } } } @@ -3929,7 +3978,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem)) { - if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR || pTag->nType == TSDB_DATA_TYPE_JSON) { qError("QInfo:0x%"PRIx64" failed to find tag:%s in ts_comp", GET_QID(pRuntimeEnv), pTag->pz); } else { qError("QInfo:0x%"PRIx64" failed to find tag:%" PRId64 " in ts_comp", GET_QID(pRuntimeEnv), pTag->i64); @@ -3940,7 +3989,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, // Keep the cursor info of current table pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); - if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR || pTag->nType == TSDB_DATA_TYPE_JSON) { qDebug("QInfo:0x%"PRIx64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { qDebug("QInfo:0x%"PRIx64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); @@ -3948,7 +3997,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, } else { tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); - if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR || pTag->nType == TSDB_DATA_TYPE_JSON) { qDebug("QInfo:0x%"PRIx64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { qDebug("QInfo:0x%"PRIx64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); @@ -5344,7 +5393,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, pInfo->multiGroupResults = groupResultMixedUp; pInfo->pMerge = param; - pInfo->bufCapacity = 4096; + pInfo->bufCapacity = 200; // TD-10899 pInfo->udfInfo = pUdfInfo; pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -5523,8 +5572,8 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI for(int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData col = {{0}}; col.info.colId = pExpr[i].base.colInfo.colId; - col.info.bytes = pExpr[i].base.colBytes; - col.info.type = pExpr[i].base.colType; + col.info.bytes = pExpr[i].base.resBytes; + col.info.type = pExpr[i].base.resType; taosArrayPush(pDataBlock->pDataBlock, &col); if (col.info.colId == pOrderVal->orderColId) { @@ -5746,7 +5795,15 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { break; } else { // init output buffer for a new group data for (int32_t j = 0; j < pOperator->numOfOutput; ++j) { - aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]); + int16_t functionId = pInfo->pCtx[j].functionId; + if (functionId < 0 ) { + SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; + doInvokeUdf(pUdfInfo, &pInfo->pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); + } else if (!TSDB_FUNC_IS_SCALAR(functionId)) { + aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]); + } else { + assert(0); + } } initCtxOutputBuffer(pInfo->pCtx, pOperator->numOfOutput); } @@ -6206,7 +6263,17 @@ group_finished_exit: return true; } +static void resetInterpolation(SQLFunctionCtx *pCtx, SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfOutput) { + if (!pRuntimeEnv->pQueryAttr->timeWindowInterpo) { + return; + } + for (int32_t i = 0; i < numOfOutput; ++i) { + pCtx[i].start.key = INT64_MIN; + pCtx[i].end.key = INT64_MIN; + } + *(TSKEY *)pRuntimeEnv->prevRow[0] = INT64_MIN; +} static void doTimeEveryImpl(SOperatorInfo* pOperator, SQLFunctionCtx *pCtx, SSDataBlock* pBlock, bool newgroup) { STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*) pOperator->info; @@ -6434,6 +6501,7 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; + STableId prevId = {0, 0}; while(1) { publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); @@ -6443,6 +6511,12 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { break; } + if (prevId.tid != pBlock->info.tid || prevId.uid != pBlock->info.uid) { + resetInterpolation(pIntervalInfo->pCtx, pRuntimeEnv, pOperator->numOfOutput); + prevId.uid = pBlock->info.uid; + prevId.tid = pBlock->info.tid; + } + // the pDataBlock are always the same one, no need to call this again STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; @@ -7414,7 +7488,16 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { if (pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { data = tsdbGetTableName(item->pTable); } else { - data = tsdbGetTableTagVal(item->pTable, pExprInfo->base.colInfo.colId, type, bytes); + data = tsdbGetTableTagVal(item->pTable, pExprInfo->base.colInfo.colId, type); + if(type == TSDB_DATA_TYPE_JSON){ + if(pExprInfo->base.numOfParams > 0){ // tag-> operation + getJsonTagValueElment(item->pTable, pExprInfo->base.param[0].pz, pExprInfo->base.param[0].nLen, output, bytes); + }else{ + getJsonTagValueAll(data, output, bytes); + } + count += 1; + continue; + } } doSetTagValueToResultBuf(output, data, type, bytes); @@ -7450,13 +7533,20 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { type = pExprInfo[j].base.resType; bytes = pExprInfo[j].base.resBytes; + dst = pColInfo->pData + count * pExprInfo[j].base.resBytes; if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { data = tsdbGetTableName(item->pTable); } else { - data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.colInfo.colId, type, bytes); + data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.colInfo.colId, type); + if(type == TSDB_DATA_TYPE_JSON){ + if(pExprInfo[j].base.numOfParams > 0){ // tag-> operation + getJsonTagValueElment(item->pTable, pExprInfo[j].base.param[0].pz, pExprInfo[j].base.param[0].nLen, dst, bytes); + }else{ + getJsonTagValueAll(data, dst, bytes); + } + continue; + } } - - dst = pColInfo->pData + count * pExprInfo[j].base.resBytes; doSetTagValueToResultBuf(dst, data, type, bytes); } @@ -8132,6 +8222,32 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { goto _cleanup; } + + +/* + //MSG EXTEND DEMO + if (pQueryMsg->extend) { + pMsg += pQueryMsg->sqlstrLen; + + STLV *tlv = NULL; + while (1) { + tlv = (STLV *)pMsg; + tlv->type = ntohs(tlv->type); + tlv->len = ntohl(tlv->len); + if (tlv->len > 0) { + *(int16_t *)tlv->value = ntohs(*(int16_t *)tlv->value); + qDebug("Got TLV,type:%d,len:%d,value:%d", tlv->type, tlv->len, *(int16_t*)tlv->value); + pMsg += sizeof(*tlv) + tlv->len; + continue; + } + + break; + } + } + +*/ + + qDebug("qmsg:%p query %d tables, type:%d, qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, order:%d, " "outputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptsLen:%d, compNumOfBlocks:%d, limit:%" PRId64 ", offset:%" PRId64, pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols, @@ -8181,7 +8297,7 @@ int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int return TSDB_CODE_SUCCESS; } -int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) { +int32_t buildScalarExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); tExprNode* pExprNode = NULL; @@ -8248,7 +8364,7 @@ void destroyUdfInfo(SUdfInfo* pUdfInfo) { taosCloseDll(pUdfInfo->handle); tfree(pUdfInfo); } - +#ifdef LUA_EMBEDDED static char* getUdfFuncName(char* funcname, char* name, int type) { switch (type) { case TSDB_UDF_FUNC_NORMAL: @@ -8273,8 +8389,9 @@ static char* getUdfFuncName(char* funcname, char* name, int type) { return funcname; } - +#endif int32_t initUdfInfo(SUdfInfo* pUdfInfo) { +#ifdef LUA_EMBEDDED if (pUdfInfo == NULL || pUdfInfo->handle) { return TSDB_CODE_SUCCESS; } @@ -8358,7 +8475,7 @@ int32_t initUdfInfo(SUdfInfo* pUdfInfo) { return (*(udfInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(&pUdfInfo->init); } } - +#endif //LUA_EMBEDDED return TSDB_CODE_SUCCESS; } @@ -8393,16 +8510,13 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp int16_t bytes = 0; // parse the arithmetic expression - if (pExprs[i].base.functionId == TSDB_FUNC_ARITHM) { - code = buildArithmeticExprFromMsg(&pExprs[i], pMsg); + if (pExprs[i].base.functionId == TSDB_FUNC_SCALAR_EXPR) { + code = buildScalarExprFromMsg(&pExprs[i], pMsg); if (code != TSDB_CODE_SUCCESS) { tfree(pExprs); return code; } - - type = TSDB_DATA_TYPE_DOUBLE; - bytes = tDataTypes[type].bytes; } else if (pExprs[i].base.functionId == TSDB_FUNC_BLKINFO) { SSchema s = {.type=TSDB_DATA_TYPE_BINARY, .bytes=TSDB_MAX_BINARY_LEN}; type = s.type; @@ -8415,8 +8529,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp // it is a user-defined constant value column assert(pExprs[i].base.functionId == TSDB_FUNC_PRJ); - type = pExprs[i].base.param[1].nType; - bytes = pExprs[i].base.param[1].nLen; + type = pExprs[i].base.param[0].nType; + bytes = pExprs[i].base.param[0].nLen; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { bytes += VARSTR_HEADER_SIZE; } @@ -8456,14 +8570,14 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp } int32_t param = (int32_t)pExprs[i].base.param[0].i64; - if (pExprs[i].base.functionId > 0 && pExprs[i].base.functionId != TSDB_FUNC_ARITHM && + if (pExprs[i].base.functionId > 0 && pExprs[i].base.functionId != TSDB_FUNC_SCALAR_EXPR && (type != pExprs[i].base.colType || bytes != pExprs[i].base.colBytes)) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } // todo remove it - if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, + if (pExprs[i].base.functionId != TSDB_FUNC_SCALAR_EXPR && getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, 0, isSuperTable, pUdfInfo) != TSDB_CODE_SUCCESS) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; @@ -8532,31 +8646,32 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t nu int16_t bytes = 0; // parse the arithmetic expression - if (pExprs[i].base.functionId == TSDB_FUNC_ARITHM) { - code = buildArithmeticExprFromMsg(&pExprs[i], pQueryMsg); + if (pExprs[i].base.functionId == TSDB_FUNC_SCALAR_EXPR) { + code = buildScalarExprFromMsg(&pExprs[i], pQueryMsg); if (code != TSDB_CODE_SUCCESS) { tfree(pExprs); return code; } - type = TSDB_DATA_TYPE_DOUBLE; - bytes = tDataTypes[type].bytes; + pExprs[i].base.resBytes = pExprs[i].pExpr->resultBytes; + pExprs[i].base.resType = pExprs[i].pExpr->resultType; + pExprs[i].base.interBytes = 0; } else { int32_t index = pExprs[i].base.colInfo.colIndex; assert(prevExpr[index].base.resColId == pExprs[i].base.colInfo.colId); - type = prevExpr[index].base.resType; + type = prevExpr[index].base.resType; bytes = prevExpr[index].base.resBytes; - } - int32_t param = (int32_t)pExprs[i].base.param[0].i64; - if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, - &pExprs[i].base.interBytes, 0, isSuperTable, pUdfInfo) != TSDB_CODE_SUCCESS) { - tfree(pExprs); - return TSDB_CODE_QRY_INVALID_MSG; + int32_t param = (int32_t)pExprs[i].base.param[0].i64; + if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, + &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, 0, isSuperTable, + pUdfInfo) != TSDB_CODE_SUCCESS) { + tfree(pExprs); + return TSDB_CODE_QRY_INVALID_MSG; + } } - assert(isValidDataType(pExprs[i].base.resType)); } @@ -8676,7 +8791,7 @@ static void doUpdateExprColumnIndex(SQueryAttr *pQueryAttr) { for (int32_t k = 0; k < pQueryAttr->numOfOutput; ++k) { SSqlExpr *pSqlExprMsg = &pQueryAttr->pExpr1[k].base; - if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM) { + if (pSqlExprMsg->functionId == TSDB_FUNC_SCALAR_EXPR) { continue; } @@ -8787,6 +8902,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S pQueryAttr->tsCompQuery = pQueryMsg->tsCompQuery; pQueryAttr->simpleAgg = pQueryMsg->simpleAgg; pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery; + pQueryAttr->needTableSeqScan = pQueryMsg->needTableSeqScan; pQueryAttr->needReverseScan = pQueryMsg->needReverseScan; pQueryAttr->stateWindow = pQueryMsg->stateWindow; pQueryAttr->vgId = vgId; @@ -9324,4 +9440,3 @@ void freeQueryAttr(SQueryAttr* pQueryAttr) { filterFreeInfo(pQueryAttr->pFilters); } } - diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 45b1f0c5534bea73502d74a758b88b1983bfc4b8..cc214b953303e3b10b053bbe0c183eaee520e32a 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -367,6 +367,14 @@ static int32_t tsCompareFunc(TSKEY k1, TSKEY k2, int32_t order) { } int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { + if (type == TSDB_DATA_TYPE_JSON){ + bool canReturn = true; + int32_t result = jsonCompareUnit(f1, f2, &canReturn); + if(canReturn) return result; + type = *f1; + f1 += CHAR_BYTES; + f2 += CHAR_BYTES; + } switch (type) { case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2)); case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); @@ -641,6 +649,89 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t printf("\n"); } +static void mergeSortIndicesByOrderColumns(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, + int32_t orderType, __col_compar_fn_t compareFn, int32_t* indices, int32_t* aux) { + if (end <= start) { + return; + } + + + int32_t mid = start + (end-start)/2; + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, start, mid, data, orderType, compareFn, indices, aux); + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, mid+1, end, data, orderType, compareFn, indices, aux); + int32_t left = start; + int32_t right = mid + 1; + int32_t k; + for (k = start; k <= end; ++k) { + if (left == mid+1) { + aux[k] = indices[right]; + ++right; + } else if (right == end+1) { + aux[k] = indices[left]; + ++left; + } else { + int32_t ret = compareFn(pDescriptor, numOfRows, indices[left], indices[right], data); + if (ret <= 0) { + aux[k] = indices[left]; + ++left; + } else { + aux[k] = indices[right]; + ++right; + } + } + } + + for (k = start; k <= end; ++k) { + indices[k] = aux[k]; + } +} + +static void columnwiseMergeSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char* data, + int32_t orderType, __col_compar_fn_t compareFn) { + int32_t* indices = malloc(numOfRows * sizeof(int32_t)); + int32_t* aux = malloc(numOfRows * sizeof(int32_t)); + + for (int32_t i = 0; i < numOfRows; ++i) { + indices[i] = i; + } + + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, 0, numOfRows-1, data, orderType, compareFn, indices, aux); + + int32_t numOfCols = pDescriptor->pColumnModel->numOfCols; + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int16_t colOffset = getColumnModelOffset(pDescriptor->pColumnModel, i); + int32_t colBytes = pDescriptor->pColumnModel->pFields[i].field.bytes; + // make sure memory buffer is enough + if (prevLength < colBytes) { + char *tmp = realloc(p, colBytes * numOfRows); + assert(tmp); + + p = tmp; + prevLength = colBytes; + } + + char* colData = data + colOffset * numOfRows; + memcpy(p, colData, colBytes * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = colData + colBytes * j; + + int32_t newPos = indices[j]; + char* src = p + (newPos * colBytes); + memcpy(dest, src, colBytes); + } + + } + + tfree(p); + tfree(aux); + tfree(indices); +} + static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType, __col_compar_fn_t compareFn, void* buf) { #ifdef _DEBUG_VIEW @@ -742,9 +833,35 @@ static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows } } -void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t order) { +void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType) { + // short array sort, incur another sort procedure instead of quick sort process + __col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd; + + SColumnModel* pModel = pDescriptor->pColumnModel; + + size_t width = 0; + for(int32_t i = 0; i < pModel->numOfCols; ++i) { + SSchema1* pSchema = &pModel->pFields[i].field; + if (width < pSchema->bytes) { + width = pSchema->bytes; + } + } + + char* buf = malloc(width); + assert(width > 0 && buf != NULL); + + if (end - start + 1 <= 8) { + tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf); + } else { + columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, orderType, compareFn, buf); + } + + free(buf); +} + +void tColDataMergeSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType) { // short array sort, incur another sort procedure instead of quick sort process - __col_compar_fn_t compareFn = (order == TSDB_ORDER_ASC) ? compare_sa : compare_sd; + __col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd; SColumnModel* pModel = pDescriptor->pColumnModel; @@ -762,12 +879,14 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta if (end - start + 1 <= 8) { tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf); } else { - columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, order, compareFn, buf); + columnwiseMergeSortImpl(pDescriptor, numOfRows, start, end, data, orderType, compareFn); } free(buf); } + + /* * deep copy of sschema */ diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 2ce8a49097af6229c36931cbf3db753c04580674..d5009c85ae3e7813ab0c35a62812e89d3d879ccf 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -18,6 +18,7 @@ #include "tcompare.h" #include "hash.h" #include "tscUtil.h" +#include "tsdbMeta.h" OptrStr gOptrStr[] = { {TSDB_RELATION_INVALID, "invalid"}, @@ -36,13 +37,14 @@ OptrStr gOptrStr[] = { {TSDB_RELATION_NOT, "not"}, {TSDB_RELATION_MATCH, "match"}, {TSDB_RELATION_NMATCH, "nmatch"}, + {TSDB_RELATION_CONTAINS, "contains"}, }; static FORCE_INLINE int32_t filterFieldColDescCompare(const void *desc1, const void *desc2) { const SSchema *sch1 = desc1; const SSchema *sch2 = desc2; - return sch1->colId != sch2->colId; + return !(strcmp(sch1->name, sch2->name) == 0 && sch1->colId == sch2->colId); } static FORCE_INLINE int32_t filterFieldValDescCompare(const void *desc1, const void *desc2) { @@ -60,15 +62,23 @@ filter_desc_compare_func gDescCompare [FLD_TYPE_MAX] = { }; bool filterRangeCompGi (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { + int32_t result = cfunc(maxv, minr); + if (result == TSDB_DATA_JSON_CAN_NOT_COMPARE) return false; return cfunc(maxv, minr) >= 0; } bool filterRangeCompGe (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { + int32_t result = cfunc(maxv, minr); + if (result == TSDB_DATA_JSON_CAN_NOT_COMPARE) return false; return cfunc(maxv, minr) > 0; } bool filterRangeCompLi (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { + int32_t result = cfunc(minv, maxr); + if (result == TSDB_DATA_JSON_CAN_NOT_COMPARE) return false; return cfunc(minv, maxr) <= 0; } bool filterRangeCompLe (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { + int32_t result = cfunc(minv, maxr); + if (result == TSDB_DATA_JSON_CAN_NOT_COMPARE) return false; return cfunc(minv, maxr) < 0; } bool filterRangeCompii (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { @@ -158,7 +168,8 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal, compareDoubleVal, compareLenPrefixedStr, compareStrPatternComp, compareFindItemInSet, compareWStrPatternComp, compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val, - setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexCompMatch, compareStrRegexCompNMatch + setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexCompMatch, + compareStrRegexCompNMatch, compareStrContainJson, compareJsonVal }; int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { @@ -196,7 +207,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_TIMESTAMP: comparFn = 3; break; case TSDB_DATA_TYPE_FLOAT: comparFn = 4; break; case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; - case TSDB_DATA_TYPE_BINARY: { + case TSDB_DATA_TYPE_BINARY:{ if (optr == TSDB_RELATION_MATCH) { comparFn = 19; } else if (optr == TSDB_RELATION_NMATCH) { @@ -212,7 +223,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { break; } - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR:{ if (optr == TSDB_RELATION_MATCH) { comparFn = 19; } else if (optr == TSDB_RELATION_NMATCH) { @@ -226,6 +237,20 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { } break; } + case TSDB_DATA_TYPE_JSON:{ + if (optr == TSDB_RELATION_MATCH) { + comparFn = 19; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = 20; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + comparFn = 9; + } else if (optr == TSDB_RELATION_CONTAINS) { + comparFn = 21; + } else { + comparFn = 22; + } + break; + } case TSDB_DATA_TYPE_UTINYINT: comparFn = 11; break; case TSDB_DATA_TYPE_USMALLINT: comparFn = 12;break; @@ -846,11 +871,10 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte return TSDB_CODE_SUCCESS; } - int32_t filterAddFieldFromNode(SFilterInfo *info, tExprNode *node, SFilterFieldId *fid) { CHK_LRET(node == NULL, TSDB_CODE_QRY_APP_ERROR, "empty node"); CHK_RET(node->nodeType != TSQL_NODE_COL && node->nodeType != TSQL_NODE_VALUE, TSDB_CODE_QRY_APP_ERROR); - + int32_t type; void *v; @@ -1030,6 +1054,12 @@ int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint3 switch (tType) { case TSDB_DATA_TYPE_BOOL: + if (sType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(sType)) { + goto _return; + } + if (tmpVar.i64 > 1 ||tmpVar.i64 < 0) { + goto _return; + } case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: { if (tVariantDumpEx(&tmpVar, (char *)&val, tType, false, &converted, &extInfo)) { @@ -1147,20 +1177,46 @@ _return: return code; } +static int32_t filterDealJson(SFilterInfo *info, tExprNode* tree, tExprNode** pLeft) { + if((*pLeft)->nodeType == TSQL_NODE_EXPR && (*pLeft)->_node.optr == TSDB_RELATION_ARROW){ // json tag -> operation + assert(info->pTable != NULL); + SSchema* schema = (*pLeft)->_node.pLeft->pSchema; + if((*pLeft)->_node.pRight->pVal->nLen > TSDB_MAX_JSON_KEY_LEN) return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5((*pLeft)->_node.pRight->pVal->pz, (*pLeft)->_node.pRight->pVal->nLen, keyMd5); + memcpy(schema->name, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + (*pLeft) = (*pLeft)->_node.pLeft; // -> operation use left as input + }else if(((*pLeft)->pSchema->type == TSDB_DATA_TYPE_JSON) && + (tree->_node.optr == TSDB_RELATION_ISNULL || tree->_node.optr == TSDB_RELATION_NOTNULL)){ + SSchema* schema = (*pLeft)->pSchema; + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + uint32_t nullData = TSDB_DATA_JSON_NULL; + jsonKeyMd5(&nullData, INT_BYTES, keyMd5); + memcpy(schema->name, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + }else if(tree->_node.optr == TSDB_RELATION_CONTAINS){ + SSchema* schema = (*pLeft)->pSchema; + if(tree->_node.pRight->pVal->nLen > TSDB_MAX_JSON_KEY_LEN) return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(tree->_node.pRight->pVal->pz, tree->_node.pRight->pVal->nLen, keyMd5); + memcpy(schema->name, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + } + return TSDB_CODE_SUCCESS; +} int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *group) { + tExprNode* pLeft = tree->_node.pLeft; + int32_t ret = TSDB_CODE_SUCCESS; + if((ret = filterDealJson(info, tree, &pLeft)) != TSDB_CODE_SUCCESS) return ret; SFilterFieldId left = {0}, right = {0}; - - filterAddFieldFromNode(info, tree->_node.pLeft, &left); - - tVariant* var = tree->_node.pRight->pVal; - int32_t type = FILTER_GET_COL_FIELD_TYPE(FILTER_GET_FIELD(info, left)); + filterAddFieldFromNode(info, pLeft, &left); + uint8_t type = FILTER_GET_COL_FIELD_TYPE(FILTER_GET_FIELD(info, left)); int32_t len = 0; uint32_t uidx = 0; - if (tree->_node.optr == TSDB_RELATION_IN && (!IS_VAR_DATA_TYPE(type))) { + if (tree->_node.optr == TSDB_RELATION_IN && !IS_VAR_DATA_TYPE(type) && type != TSDB_DATA_TYPE_JSON) { void *data = NULL; + tVariant* var = tree->_node.pRight->pVal; filterConvertSetFromBinary((void **)&data, var->pz, var->nLen, type, false); CHK_LRET(data == NULL, TSDB_CODE_QRY_APP_ERROR, "failed to convert in param"); @@ -1180,18 +1236,11 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g while(p) { void *key = taosHashGetDataKey((SHashObj *)data, p); void *fdata = NULL; - - if (IS_VAR_DATA_TYPE(type)) { - len = (int32_t)taosHashGetDataKeyLen((SHashObj *)data, p); - fdata = malloc(len + VARSTR_HEADER_SIZE); - varDataLen(fdata) = len; - memcpy(varDataVal(fdata), key, len); - len += VARSTR_HEADER_SIZE; - } else { - fdata = malloc(sizeof(int64_t)); - SIMPLE_COPY_VALUES(fdata, key); - len = tDataTypes[type].bytes; - } + + fdata = malloc(sizeof(int64_t)); + SIMPLE_COPY_VALUES(fdata, key); + len = tDataTypes[type].bytes; + filterAddField(info, NULL, &fdata, FLD_TYPE_VALUE, &right, len, true); filterAddUnit(info, TSDB_RELATION_EQUAL, &left, &right, &uidx); @@ -1206,9 +1255,9 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g taosHashCleanup(data); } else { - filterAddFieldFromNode(info, tree->_node.pRight, &right); + filterAddFieldFromNode(info, tree->_node.pRight, &right); - filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); + filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); SFilterGroup fgroup = {0}; filterAddUnitToGroup(&fgroup, uidx); @@ -1476,7 +1525,7 @@ int32_t filterTreeToGroup(tExprNode* tree, SFilterInfo *info, SArray* group) { return TSDB_CODE_SUCCESS; } - code = filterAddGroupUnitFromNode(info, tree, group); + code = filterAddGroupUnitFromNode(info, tree, group); _return: @@ -1534,7 +1583,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) tlen = varDataLen(data); data += VARSTR_HEADER_SIZE; } - converToStr(str + len, type, data, tlen > 32 ? 32 : tlen, &tlen); + if (data) converToStr(str + len, type, data, tlen > 32 ? 32 : tlen, &tlen); } else { strcat(str, "NULL"); } @@ -1815,7 +1864,7 @@ int32_t filterInitValFieldData(SFilterInfo *info) { } else if (type == TSDB_DATA_TYPE_NCHAR) { size_t len = (var->nType == TSDB_DATA_TYPE_BINARY || var->nType == TSDB_DATA_TYPE_NCHAR) ? var->nLen : MAX_NUM_STR_SIZE; fi->data = calloc(1, (len + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); - } else { + } else if (type != TSDB_DATA_TYPE_JSON){ if (var->nType == TSDB_DATA_TYPE_VALUE_ARRAY) { //TIME RANGE fi->data = calloc(var->nLen, tDataTypes[type].bytes); for (int32_t a = 0; a < var->nLen; ++a) { @@ -1827,18 +1876,22 @@ int32_t filterInitValFieldData(SFilterInfo *info) { } else { fi->data = calloc(1, sizeof(int64_t)); } + } else{ // type == TSDB_DATA_TYPE_JSON + // fi->data = null; use fi->desc as data, because json value is variable, so use tVariant (fi->desc) } - bool converted = false; - char extInfo = 0; - if (tVariantDumpEx(var, (char*)fi->data, type, true, &converted, &extInfo)) { - if (converted) { - filterHandleValueExtInfo(unit, extInfo); - - continue; + if(type != TSDB_DATA_TYPE_JSON){ + bool converted = false; + char extInfo = 0; + if (tVariantDumpEx(var, (char*)fi->data, type, true, &converted, &extInfo)) { + if (converted) { + filterHandleValueExtInfo(unit, extInfo); + + continue; + } + qError("dump value to type[%d] failed", type); + return TSDB_CODE_TSC_INVALID_OPERATION; } - qError("dump value to type[%d] failed", type); - return TSDB_CODE_TSC_INVALID_OPERATION; } // match/nmatch for nchar type need convert from ucs4 to mbs @@ -1848,7 +1901,14 @@ int32_t filterInitValFieldData(SFilterInfo *info) { int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData)); varDataSetLen(newValData, len); varDataCopy(fi->data, newValData); + }else if(type == TSDB_DATA_TYPE_JSON && + (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ + char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; + int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData); + memcpy(((tVariant*)(fi->desc))->pz, newValData, len); + ((tVariant*)(fi->desc))->nLen = len; } + } return TSDB_CODE_SUCCESS; @@ -1858,6 +1918,8 @@ int32_t filterInitValFieldData(SFilterInfo *info) { bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) { int32_t ret = func(left, right); + if(ret == TSDB_DATA_JSON_CAN_NOT_COMPARE) return false; + switch (optr) { case TSDB_RELATION_EQUAL: { return ret == 0; @@ -1883,6 +1945,9 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) case TSDB_RELATION_MATCH: { return ret == 0; } + case TSDB_RELATION_CONTAINS: { + return ret == 0; + } case TSDB_RELATION_NMATCH: { return ret == 0; } @@ -2569,7 +2634,11 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].colId = FILTER_UNIT_COL_ID(info, unit); if (unit->right.type == FLD_TYPE_VALUE) { - info->cunits[i].valData = FILTER_UNIT_VAL_DATA(info, unit); + if(FILTER_UNIT_DATA_TYPE(unit) == TSDB_DATA_TYPE_JSON){ // json value is tVariant + info->cunits[i].valData = FILTER_UNIT_JSON_VAL_DATA(info, unit); + }else{ + info->cunits[i].valData = FILTER_UNIT_VAL_DATA(info, unit); + } } else { info->cunits[i].valData = NULL; } @@ -2647,9 +2716,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t } } - if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL + if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE || cunit->optr == TSDB_RELATION_MATCH - || cunit->optr == TSDB_RELATION_NOT_EQUAL) { + || cunit->optr == TSDB_RELATION_NOT_EQUAL || cunit->optr == TSDB_RELATION_CONTAINS) { continue; } @@ -2891,7 +2960,18 @@ static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, for (int32_t i = 0; i < numOfRows; ++i) { uint32_t uidx = info->groups[0].unitIdxs[0]; void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i; - (*p)[i] = ((colData == NULL) || isNull(colData, info->cunits[uidx].dataType)); + if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ + if (!colData){ // for json->'key' is null + (*p)[i] = 1; + }else if( *(char*)colData == TSDB_DATA_TYPE_JSON){ // for json is null + colData = POINTER_SHIFT(colData, CHAR_BYTES); + (*p)[i] = isNull(colData, info->cunits[uidx].dataType); + }else{ + (*p)[i] = 0; + } + }else{ + (*p)[i] = ((colData == NULL) || isNull(colData, info->cunits[uidx].dataType)); + } if ((*p)[i] == 0) { all = false; } @@ -2914,7 +2994,20 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows for (int32_t i = 0; i < numOfRows; ++i) { uint32_t uidx = info->groups[0].unitIdxs[0]; void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i; - (*p)[i] = ((colData != NULL) && !isNull(colData, info->cunits[uidx].dataType)); + + if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ + if (!colData) { // for json->'key' is not null + (*p)[i] = 0; + }else if( *(char*)colData == TSDB_DATA_TYPE_JSON){ // for json is not null + colData = POINTER_SHIFT(colData, CHAR_BYTES); + (*p)[i] = !isNull(colData, info->cunits[uidx].dataType); + }else{ // for json->'key' is not null + (*p)[i] = 1; + } + }else { + (*p)[i] = ((colData != NULL) && !isNull(colData, info->cunits[uidx].dataType)); + } + if ((*p)[i] == 0) { all = false; } @@ -2923,6 +3016,42 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows return all; } +static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){ + if(cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH){ + uint8_t jsonType = *(char*)colData; + char* realData = POINTER_SHIFT(colData, CHAR_BYTES); + if (jsonType != TSDB_DATA_TYPE_NCHAR){ + *result = false; + }else{ + char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); + int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData)); + varDataSetLen(newColData, len); + tVariant* val = cunit->valData; + char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; + assert(val->nLen <= TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE); + memcpy(varDataVal(newValData), val->pz, val->nLen); + varDataSetLen(newValData, val->nLen); + *result = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, newValData); + tfree(newColData); + } + }else if(cunit->optr == TSDB_RELATION_LIKE){ + uint8_t jsonType = *(char*)colData; + char* realData = POINTER_SHIFT(colData, CHAR_BYTES); + if (jsonType != TSDB_DATA_TYPE_NCHAR){ + *result = false; + }else{ + tVariant* val = cunit->valData; + char* newValData = calloc(val->nLen + VARSTR_HEADER_SIZE, 1); + memcpy(varDataVal(newValData), val->pz, val->nLen); + varDataSetLen(newValData, val->nLen); + *result = filterDoCompare(gDataCompare[cunit->func], cunit->optr, realData, newValData); + tfree(newValData); + } + }else{ + *result = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData); + } +} + bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) { SFilterInfo *info = (SFilterInfo *)pinfo; bool all = true; @@ -2988,6 +3117,8 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat varDataSetLen(newColData, len); (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData); tfree(newColData); + }else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ + doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData); }else{ (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData); } @@ -3000,7 +3131,6 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat return all; } - bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) { SFilterInfo *info = (SFilterInfo *)pinfo; bool all = true; @@ -3044,6 +3174,8 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * varDataSetLen(newColData, len); (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData); tfree(newColData); + }else if(cunit->dataType == TSDB_DATA_TYPE_JSON){ + doJsonCompare(cunit, &(*p)[i], colData); }else{ (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData); } @@ -3175,6 +3307,25 @@ int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from return TSDB_CODE_SUCCESS; } +int32_t filterSetJsonColFieldData(SFilterInfo *info, void *param, filer_get_col_from_name fp) { + CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "info NULL"); + CHK_LRET(info->fields[FLD_TYPE_COLUMN].num <= 0, TSDB_CODE_QRY_APP_ERROR, "no column fileds"); + + if (FILTER_ALL_RES(info) || FILTER_EMPTY_RES(info)) { + return TSDB_CODE_SUCCESS; + } + + for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { + SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; + SSchema* sch = fi->desc; + + (*fp)(param, sch->colId, sch->name, &fi->data); + } + + filterUpdateComUnits(info); + + return TSDB_CODE_SUCCESS; +} int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) { int32_t code = TSDB_CODE_SUCCESS; @@ -3195,10 +3346,10 @@ int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) { filterInitUnitsFields(info); code = filterTreeToGroup(tree, info, group); - ERR_JRET(code); filterConvertGroupFromArray(info, group); + taosArrayDestroy(group); ERR_JRET(filterInitValFieldData(info)); @@ -3210,7 +3361,6 @@ int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) { CHK_JMP(FILTER_GET_FLAG(info->status, FI_STATUS_ALL)); if (FILTER_GET_FLAG(info->status, FI_STATUS_EMPTY)) { - taosArrayDestroy(group); return code; } } @@ -3220,15 +3370,11 @@ int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) { filterDumpInfoToString(info, "Final", 0); - taosArrayDestroy(group); - return code; _return: qInfo("No filter, code:%d", code); - taosArrayDestroy(group); - filterFreeInfo(*pinfo); *pinfo = NULL; @@ -3472,7 +3618,7 @@ int32_t filterIsIndexedColumnQuery(SFilterInfo* info, int32_t idxId, bool *res) int32_t optr = FILTER_UNIT_OPTR(info->units); CHK_JMP(optr == TSDB_RELATION_LIKE || optr == TSDB_RELATION_IN || optr == TSDB_RELATION_MATCH - || optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL); + || optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL || optr == TSDB_RELATION_CONTAINS); *res = true; diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index 024ba77de13086b7ff8e32ab2c4c7340d8806b6b..8428c339f4e89d6a5e988448f3aadadf522102b1 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -74,7 +74,7 @@ static int32_t setBoundingBox(MinMaxEntry* range, int16_t type, double minval, d } } else if (IS_UNSIGNED_NUMERIC_TYPE(type)){ range->u64MinVal = (uint64_t) minval; - if ((uint64_t)maxval > UINT64_MAX) { + if (maxval > UINT64_MAX) { range->u64MaxVal = UINT64_MAX; } else { range->u64MaxVal = (uint64_t) maxval; @@ -146,7 +146,7 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { } int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) { - int64_t v = 0; + uint64_t v = 0; GET_TYPED_DATA(v, uint64_t, pBucket->type, value); int32_t index = -1; @@ -162,8 +162,8 @@ int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) { index = (int32_t) (delta % pBucket->numOfSlots); } else { double slotSpan = (double)span / pBucket->numOfSlots; - index = (int32_t)((v - pBucket->range.u64MinVal) / slotSpan); - if (v == pBucket->range.u64MaxVal) { + index = (int32_t)(((double)v - pBucket->range.u64MinVal) / slotSpan); + if (index == pBucket->numOfSlots) { index -= 1; } } @@ -194,7 +194,7 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { } else { double slotSpan = span / pBucket->numOfSlots; index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); - if (v == pBucket->range.dMaxVal) { + if (index == pBucket->numOfSlots) { index -= 1; } } diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index 27a22f70832dc9669aa473b03820d84d4736b497..4c61cbb0f730780c6c7106a02c765974b2d21706 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -538,7 +538,7 @@ SArray* createTableScanPlan(SQueryAttr* pQueryAttr) { } else { if (pQueryAttr->queryBlockDist) { op = OP_TableBlockInfoScan; - } else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery) { + } else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery || pQueryAttr->needTableSeqScan) { op = OP_TableSeqScan; } else if (pQueryAttr->needReverseScan || pQueryAttr->pointInterpQuery) { op = OP_DataBlocksOptScan; @@ -567,6 +567,10 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } else if (pQueryAttr->pointInterpQuery) { op = OP_TimeEvery; taosArrayPush(plan, &op); + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Project; + taosArrayPush(plan, &op); + } } else if (pQueryAttr->interval.interval > 0) { if (pQueryAttr->stableQuery) { op = OP_MultiTableTimeInterval; @@ -691,7 +695,6 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { op = OP_Filter; taosArrayPush(plan, &op); } - if (pQueryAttr->pExpr2 != NULL) { op = OP_Project; taosArrayPush(plan, &op); diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c index c43b0b3435b2073d4711bbb8a0ec0d9e347b0d13..a8a6f6732b7eef33cad040c2aadc4b3e1848bde2 100644 --- a/src/query/src/qScript.c +++ b/src/query/src/qScript.c @@ -12,7 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#ifdef LUA_EMBEDDED #include "os.h" #include "qScript.h" #include "ttype.h" @@ -444,3 +444,4 @@ bool isValidScript(char *script, int32_t len) { return ret; } +#endif diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 88bd970eb4e5d6a0f5111105628d129744c63db1..166881346fb4bae0bcf97d0c325c52a03c44cb85 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -22,6 +22,7 @@ #include "ttoken.h" #include "ttokendef.h" #include "tutil.h" +#include "tscUtil.h" SSqlInfo qSqlParse(const char *pStr) { void *pParser = ParseAlloc(malloc); @@ -52,7 +53,6 @@ SSqlInfo qSqlParse(const char *pStr) { Parse(pParser, 0, t0, &sqlInfo); goto abort_parse; } - case TK_QUESTION: case TK_ILLEGAL: { snprintf(sqlInfo.msg, tListLen(sqlInfo.msg), "unrecognized token: \"%s\"", t0.z); @@ -143,14 +143,14 @@ tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optr if (optrType == TK_NULL) { if (pToken){ pToken->type = TSDB_DATA_TYPE_NULL; - tVariantCreate(&pSqlExpr->value, pToken, true); + tVariantCreate(&pSqlExpr->value, pToken); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; } else if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { if (pToken) { toTSDBType(pToken->type); - tVariantCreate(&pSqlExpr->value, pToken, true); + tVariantCreate(&pSqlExpr->value, pToken); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; @@ -178,6 +178,14 @@ tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optr pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pSqlExpr->tokenId = TK_TIMESTAMP; pSqlExpr->type = SQL_NODE_VALUE; + } else if (optrType == TK_AS) { + // Here it must be column type + if (pToken != NULL) { + pSqlExpr->dataType = *(TAOS_FIELD *)pToken; + } + + pSqlExpr->tokenId = optrType; + pSqlExpr->type = SQL_NODE_DATA_TYPE; } else { // Here it must be the column name (tk_id) if it is not a number or string. assert(optrType == TK_ID || optrType == TK_ALL); @@ -203,7 +211,7 @@ tSqlExpr *tSqlExprCreateTimestamp(SStrToken *pToken, int32_t optrType) { if (optrType == TK_INTEGER || optrType == TK_STRING) { if (pToken) { toTSDBType(pToken->type); - tVariantCreate(&pSqlExpr->value, pToken, true); + tVariantCreate(&pSqlExpr->value, pToken); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; @@ -275,6 +283,25 @@ tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToke return pExpr; } +tSqlExpr *tSqlExprCreateFuncWithParams(SSqlInfo *pInfo, tSqlExpr* col, TAOS_FIELD *colType, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType) { + if (colType == NULL || col == NULL) { + return NULL; + } + + if (NULL == col) { + return NULL; + } + + tSqlExpr* ctype = tSqlExprCreateIdValue(pInfo, (SStrToken *)colType, TK_AS); + + SArray *exprList = tSqlExprListAppend(0,col,0, 0); + + tSqlExprListAppend(exprList,ctype,0, 0); + + return tSqlExprCreateFunction(exprList, pFuncToken, endToken, optType); +} + + /* * create binary expression in this procedure * if the expr is arithmetic, calculate the result and set it to tSqlExpr Object @@ -387,6 +414,11 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pRSub->Expr.paramList = (SArray *)pRight; pExpr->pRight = pRSub; + } else if (optrType == TK_ARROW || optrType == TK_CONTAINS) { + pExpr->tokenId = optrType; + pExpr->pLeft = pLeft; + pExpr->pRight = pRight; + pExpr->type = SQL_NODE_EXPR; } else { pExpr->tokenId = optrType; pExpr->pLeft = pLeft; @@ -522,12 +554,13 @@ void tSqlExprCompact(tSqlExpr** pExpr) { } bool tSqlExprIsLeaf(tSqlExpr* pExpr) { - return (pExpr->pRight == NULL && pExpr->pLeft == NULL) && + return ((pExpr->pRight == NULL && pExpr->pLeft == NULL) && (pExpr->tokenId == 0 || (pExpr->tokenId == TK_ID) || (pExpr->tokenId >= TK_BOOL && pExpr->tokenId <= TK_NCHAR) || (pExpr->tokenId == TK_NULL) || - (pExpr->tokenId == TK_SET)); + (pExpr->tokenId == TK_SET))) || + (pExpr->tokenId == TK_ARROW); } bool tSqlExprIsParentOfLeaf(tSqlExpr* pExpr) { @@ -566,7 +599,7 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order, if (pToken) { tVariantListItem item; - tVariantCreate(&item.pVar, pToken, needRmquoteEscape); + tVariantCreateExt(&item.pVar, pToken, TK_ID, needRmquoteEscape); item.sortOrder = order; taosArrayPush(pList, &item); @@ -575,6 +608,24 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order, return pList; } +SArray *commonItemAppend(SArray *pList, tVariant *pVar, tSqlExpr *jsonExp, bool isJsonExp, uint8_t sortOrder){ + if (pList == NULL) { + pList = taosArrayInit(4, sizeof(CommonItem)); + } + + CommonItem item; + item.sortOrder = sortOrder; + item.isJsonExp = isJsonExp; + if(isJsonExp){ + item.jsonExp = jsonExp; + }else{ + item.pVar = *pVar; + } + + taosArrayPush(pList, &item); + return pList; +} + SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) { if (pList == NULL) { pList = taosArrayInit(4, sizeof(tVariantListItem)); @@ -680,7 +731,7 @@ void tSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) { // column name is too long, set the it to be invalid. if ((int32_t) pName->n >= maxLen) { - pName->n = -1; + pField->name[0] = 0; } else { strncpy(pField->name, pName->z, pName->n); pField->name[pName->n] = 0; @@ -801,6 +852,10 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { pField->bytes = (int16_t)bytes; } + } else { + if (type->type > 0) { + pField->type = -1; + } } } @@ -878,6 +933,15 @@ static void freeVariant(void *pItem) { tVariantDestroy(&p->pVar); } +static void freeCommonItem(void *pItem) { + CommonItem* p = (CommonItem *) pItem; + if (p->isJsonExp){ + tSqlExprDestroy(p->jsonExp); + }else{ + tVariantDestroy(&p->pVar); + } +} + void freeCreateTableInfo(void* p) { SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; taosArrayDestroy(pInfo->pTagNames); @@ -897,10 +961,10 @@ void destroySqlNode(SSqlNode *pSqlNode) { tSqlExprDestroy(pSqlNode->pWhere); pSqlNode->pWhere = NULL; - taosArrayDestroyEx(pSqlNode->pSortOrder, freeVariant); + taosArrayDestroyEx(pSqlNode->pSortOrder, freeCommonItem); pSqlNode->pSortOrder = NULL; - taosArrayDestroyEx(pSqlNode->pGroupby, freeVariant); + taosArrayDestroyEx(pSqlNode->pGroupby, freeCommonItem); pSqlNode->pGroupby = NULL; pSqlNode->from = destroyRelationInfo(pSqlNode->from); @@ -1015,7 +1079,6 @@ void SqlInfoDestroy(SSqlInfo *pInfo) { } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { taosArrayDestroyEx(pInfo->pAlterInfo->varList, freeVariant); taosArrayDestroy(pInfo->pAlterInfo->pAddColumns); - tfree(pInfo->pAlterInfo->tagData.data); tfree(pInfo->pAlterInfo); } else if (pInfo->type == TSDB_SQL_COMPACT_VNODE) { tSqlExprListDestroy(pInfo->list); diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 99572f6e9345b933434e3685ecb79750a04388fc..acbf094555e72cb72fd096014be8e8a89d700f4c 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -5,7 +5,7 @@ #include "queryLog.h" static int32_t getDataStartOffset(); -static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo); +static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t qry_index, STSGroupBlockInfo* pBlockInfo); static STSBuf* allocResForTSBuf(STSBuf* pTSBuf); static int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader); @@ -267,7 +267,8 @@ static void writeDataToDisk(STSBuf* pTSBuf) { metaLen += (int32_t)fwrite(&pBlock->tag.nType, 1, sizeof(pBlock->tag.nType), pTSBuf->f); int32_t trueLen = pBlock->tag.nLen; - if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { + if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || + pBlock->tag.nType == TSDB_DATA_TYPE_JSON) { metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f); } else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) { @@ -349,7 +350,8 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { // NOTE: mix types tags are not supported size_t sz = 0; - if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { + if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || + pBlock->tag.nType == TSDB_DATA_TYPE_JSON) { char* tp = realloc(pBlock->tag.pz, pBlock->tag.nLen + 1); assert(tp != NULL); @@ -697,8 +699,8 @@ bool tsBufNextPos(STSBuf* pTSBuf) { int32_t groupIndex = pTSBuf->numOfGroups - 1; pCur->vgroupIndex = groupIndex; - int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id; - STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id); + // get current vgroupIndex BlockInfo + STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pCur->vgroupIndex].info; int32_t blockIndex = pBlockInfo->numOfBlocks - 1; tsBufGetBlock(pTSBuf, groupIndex, blockIndex); @@ -718,32 +720,43 @@ bool tsBufNextPos(STSBuf* pTSBuf) { while (1) { assert(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE); + // tsIndex is last if ((pCur->order == TSDB_ORDER_ASC && pCur->tsIndex >= pTSBuf->block.numOfElem - 1) || (pCur->order == TSDB_ORDER_DESC && pCur->tsIndex <= 0)) { - int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id; - STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id); - if (pBlockInfo == NULL || (pCur->blockIndex >= pBlockInfo->numOfBlocks - 1 && pCur->order == TSDB_ORDER_ASC) || + // get current vgroupIndex BlockInfo + STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pCur->vgroupIndex].info; + if (pBlockInfo == NULL) { + return false; + } + + // blockIndex is last + if ((pCur->blockIndex >= pBlockInfo->numOfBlocks - 1 && pCur->order == TSDB_ORDER_ASC) || (pCur->blockIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) { + + // vgroupIndex is last if ((pCur->vgroupIndex >= pTSBuf->numOfGroups - 1 && pCur->order == TSDB_ORDER_ASC) || (pCur->vgroupIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) { + // this is end. both vgroupIndex and blockindex and tsIndex is last pCur->vgroupIndex = -1; return false; } - if (pBlockInfo == NULL) { - return false; - } - + // blockIndex must match with next group + int32_t nextGroupIdx = pCur->vgroupIndex + step; + pBlockInfo = &pTSBuf->pData[nextGroupIdx].info; int32_t blockIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : (pBlockInfo->numOfBlocks - 1); + // vgroupIndex move next and set value in tsBufGetBlock() tsBufGetBlock(pTSBuf, pCur->vgroupIndex + step, blockIndex); break; } else { + // blockIndex move next and set value in tsBufGetBlock() tsBufGetBlock(pTSBuf, pCur->vgroupIndex, pCur->blockIndex + step); break; } } else { + // tsIndex move next pCur->tsIndex += step; break; } @@ -767,7 +780,7 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { } STSCursor* pCur = &pTSBuf->cur; - if (pCur != NULL && pCur->vgroupIndex < 0) { + if (pCur->vgroupIndex < 0) { return elem1; } @@ -796,7 +809,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { return -1; } - // src can only have one vnode index + // src can only have one vnode qry_index assert(pSrcBuf->numOfGroups == 1); // there are data in buffer, flush to disk first @@ -819,7 +832,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { pDestBuf->pData = tmp; } - // directly copy the vnode index information + // directly copy the vnode qry_index information memcpy(&pDestBuf->pData[oldSize], pSrcBuf->pData, (size_t)pSrcBuf->numOfGroups * sizeof(STSGroupBlockInfoEx)); // set the new offset value @@ -1012,8 +1025,8 @@ static int32_t getDataStartOffset() { } // update prev vnode length info in file -static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo) { - int32_t offset = sizeof(STSBufFileHeader) + index * sizeof(STSGroupBlockInfo); +static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t qry_index, STSGroupBlockInfo* pBlockInfo) { + int32_t offset = sizeof(STSBufFileHeader) + qry_index * sizeof(STSGroupBlockInfo); doUpdateGroupInfo(pTSBuf, offset, pBlockInfo); } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index a150f3a717afaa0ddd79a33a9c8be5285c327574..4da6f52d7ae08dcdbc7192c7b89a6fb2733995fe 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -23,6 +23,8 @@ #include "tlosertree.h" #include "queryLog.h" #include "tscompression.h" +#include "tscUtil.h" +#include "cJSON.h" typedef struct SCompSupporter { STableQueryInfo **pTableQueryInfo; @@ -587,4 +589,3 @@ void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDi tfree(outputBuf); } } - diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index fce7f649892f87d075c8dd64e4d1160e5d05bf77..2a2ccf9cae0f9e2aab60bddca7c27a8ceb719239 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -279,6 +279,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); + pQInfo->runtimeEnv.outputBuf = NULL; return doBuildResCheck(pQInfo); } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index b06e430139339e7fbe335a3dbb3683bc470b2994..c2d150911dc694ae6b9ab6e35f34207e86219a85 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -1,3 +1,5 @@ +/* This file is automatically generated by Lemon from input grammar +** source file "sql.y". */ /* ** 2000-05-29 ** @@ -22,10 +24,7 @@ ** The following is the concatenation of all %include directives from the ** input grammar file: */ -#include -#include /************ Begin %include sections from the grammar ************************/ - #include #include #include @@ -38,11 +37,211 @@ #include "tutil.h" #include "tvariant.h" /**************** End of %include directives **********************************/ -/* These constants specify the various numeric values for terminal symbols -** in a format understandable to "makeheaders". This section is blank unless -** "lemon" is run with the "-m" command-line option. -***************** Begin makeheaders token definitions *************************/ -/**************** End makeheaders token definitions ***************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +#ifndef TK_ID +#define TK_ID 1 +#define TK_BOOL 2 +#define TK_TINYINT 3 +#define TK_SMALLINT 4 +#define TK_INTEGER 5 +#define TK_BIGINT 6 +#define TK_FLOAT 7 +#define TK_DOUBLE 8 +#define TK_STRING 9 +#define TK_TIMESTAMP 10 +#define TK_BINARY 11 +#define TK_NCHAR 12 +#define TK_JSON 13 +#define TK_OR 14 +#define TK_AND 15 +#define TK_NOT 16 +#define TK_EQ 17 +#define TK_NE 18 +#define TK_ISNULL 19 +#define TK_NOTNULL 20 +#define TK_IS 21 +#define TK_LIKE 22 +#define TK_MATCH 23 +#define TK_NMATCH 24 +#define TK_CONTAINS 25 +#define TK_GLOB 26 +#define TK_BETWEEN 27 +#define TK_IN 28 +#define TK_GT 29 +#define TK_GE 30 +#define TK_LT 31 +#define TK_LE 32 +#define TK_BITAND 33 +#define TK_BITOR 34 +#define TK_LSHIFT 35 +#define TK_RSHIFT 36 +#define TK_PLUS 37 +#define TK_MINUS 38 +#define TK_DIVIDE 39 +#define TK_TIMES 40 +#define TK_STAR 41 +#define TK_SLASH 42 +#define TK_REM 43 +#define TK_UMINUS 44 +#define TK_UPLUS 45 +#define TK_BITNOT 46 +#define TK_ARROW 47 +#define TK_SHOW 48 +#define TK_DATABASES 49 +#define TK_TOPICS 50 +#define TK_FUNCTIONS 51 +#define TK_MNODES 52 +#define TK_DNODES 53 +#define TK_ACCOUNTS 54 +#define TK_USERS 55 +#define TK_MODULES 56 +#define TK_QUERIES 57 +#define TK_CONNECTIONS 58 +#define TK_STREAMS 59 +#define TK_VARIABLES 60 +#define TK_SCORES 61 +#define TK_GRANTS 62 +#define TK_VNODES 63 +#define TK_DOT 64 +#define TK_CREATE 65 +#define TK_TABLE 66 +#define TK_STABLE 67 +#define TK_DATABASE 68 +#define TK_TABLES 69 +#define TK_STABLES 70 +#define TK_VGROUPS 71 +#define TK_DROP 72 +#define TK_TOPIC 73 +#define TK_FUNCTION 74 +#define TK_DNODE 75 +#define TK_USER 76 +#define TK_ACCOUNT 77 +#define TK_USE 78 +#define TK_DESCRIBE 79 +#define TK_DESC 80 +#define TK_ALTER 81 +#define TK_PASS 82 +#define TK_PRIVILEGE 83 +#define TK_LOCAL 84 +#define TK_COMPACT 85 +#define TK_LP 86 +#define TK_RP 87 +#define TK_IF 88 +#define TK_EXISTS 89 +#define TK_AS 90 +#define TK_OUTPUTTYPE 91 +#define TK_AGGREGATE 92 +#define TK_BUFSIZE 93 +#define TK_PPS 94 +#define TK_TSERIES 95 +#define TK_DBS 96 +#define TK_STORAGE 97 +#define TK_QTIME 98 +#define TK_CONNS 99 +#define TK_STATE 100 +#define TK_COMMA 101 +#define TK_KEEP 102 +#define TK_CACHE 103 +#define TK_REPLICA 104 +#define TK_QUORUM 105 +#define TK_DAYS 106 +#define TK_MINROWS 107 +#define TK_MAXROWS 108 +#define TK_BLOCKS 109 +#define TK_CTIME 110 +#define TK_WAL 111 +#define TK_FSYNC 112 +#define TK_COMP 113 +#define TK_PRECISION 114 +#define TK_UPDATE 115 +#define TK_CACHELAST 116 +#define TK_PARTITIONS 117 +#define TK_UNSIGNED 118 +#define TK_TAGS 119 +#define TK_USING 120 +#define TK_NULL 121 +#define TK_NOW 122 +#define TK_VARIABLE 123 +#define TK_SELECT 124 +#define TK_UNION 125 +#define TK_ALL 126 +#define TK_DISTINCT 127 +#define TK_FROM 128 +#define TK_RANGE 129 +#define TK_INTERVAL 130 +#define TK_EVERY 131 +#define TK_SESSION 132 +#define TK_STATE_WINDOW 133 +#define TK_FILL 134 +#define TK_SLIDING 135 +#define TK_ORDER 136 +#define TK_BY 137 +#define TK_ASC 138 +#define TK_GROUP 139 +#define TK_HAVING 140 +#define TK_LIMIT 141 +#define TK_OFFSET 142 +#define TK_SLIMIT 143 +#define TK_SOFFSET 144 +#define TK_WHERE 145 +#define TK_RESET 146 +#define TK_QUERY 147 +#define TK_SYNCDB 148 +#define TK_ADD 149 +#define TK_COLUMN 150 +#define TK_MODIFY 151 +#define TK_TAG 152 +#define TK_CHANGE 153 +#define TK_SET 154 +#define TK_KILL 155 +#define TK_CONNECTION 156 +#define TK_STREAM 157 +#define TK_COLON 158 +#define TK_ABORT 159 +#define TK_AFTER 160 +#define TK_ATTACH 161 +#define TK_BEFORE 162 +#define TK_BEGIN 163 +#define TK_CASCADE 164 +#define TK_CLUSTER 165 +#define TK_CONFLICT 166 +#define TK_COPY 167 +#define TK_DEFERRED 168 +#define TK_DELIMITERS 169 +#define TK_DETACH 170 +#define TK_EACH 171 +#define TK_END 172 +#define TK_EXPLAIN 173 +#define TK_FAIL 174 +#define TK_FOR 175 +#define TK_IGNORE 176 +#define TK_IMMEDIATE 177 +#define TK_INITIALLY 178 +#define TK_INSTEAD 179 +#define TK_KEY 180 +#define TK_OF 181 +#define TK_RAISE 182 +#define TK_REPLACE 183 +#define TK_RESTRICT 184 +#define TK_ROW 185 +#define TK_STATEMENT 186 +#define TK_TRIGGER 187 +#define TK_VIEW 188 +#define TK_IPTOKEN 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_TBNAME 195 +#define TK_JOIN 196 +#define TK_INSERT 197 +#define TK_INTO 198 +#define TK_VALUES 199 +#define TK_FILE 200 +#endif +/**************** End token definitions ***************************************/ /* The next sections is a series of control #defines. ** various aspects of the generated parser. @@ -100,30 +299,30 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 282 +#define YYNOCODE 284 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - tVariant yy2; - SCreateDbInfo yy10; - int32_t yy40; - SSqlNode* yy68; - SCreatedTableInfo yy72; - SLimitVal yy114; - SRangeVal yy144; - SCreateTableSql* yy170; - SIntervalVal yy280; - int yy281; - SSessionWindowVal yy295; - SArray* yy345; - tSqlExpr* yy418; - SCreateAcctInfo yy427; - SWindowStateVal yy432; - SRelationInfo* yy484; - TAOS_FIELD yy487; - int64_t yy525; + SCreateTableSql* yy6; + SSqlNode* yy16; + tSqlExpr* yy18; + SIntervalVal yy32; + SRelationInfo* yy36; + SLimitVal yy38; + SCreateAcctInfo yy51; + int64_t yy69; + SRangeVal yy124; + SSessionWindowVal yy155; + tVariant yy162; + SArray* yy189; + SCreatedTableInfo yy208; + TAOS_FIELD yy279; + SWindowStateVal yy336; + int yy420; + SCreateDbInfo yy470; + int32_t yy516; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -139,18 +338,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 378 -#define YYNRULE 302 -#define YYNRULE_WITH_ACTION 302 -#define YYNTOKEN 199 -#define YY_MAX_SHIFT 377 -#define YY_MIN_SHIFTREDUCE 593 -#define YY_MAX_SHIFTREDUCE 894 -#define YY_ERROR_ACTION 895 -#define YY_ACCEPT_ACTION 896 -#define YY_NO_ACTION 897 -#define YY_MIN_REDUCE 898 -#define YY_MAX_REDUCE 1199 +#define YYNSTATE 393 +#define YYNRULE 315 +#define YYNRULE_WITH_ACTION 315 +#define YYNTOKEN 201 +#define YY_MAX_SHIFT 392 +#define YY_MIN_SHIFTREDUCE 617 +#define YY_MAX_SHIFTREDUCE 931 +#define YY_ERROR_ACTION 932 +#define YY_ACCEPT_ACTION 933 +#define YY_NO_ACTION 934 +#define YY_MIN_REDUCE 935 +#define YY_MAX_REDUCE 1249 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -217,297 +416,316 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (790) +#define YY_ACTTAB_COUNT (858) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 244, 644, 376, 237, 1053, 23, 213, 728, 1075, 645, - /* 10 */ 680, 896, 377, 59, 60, 250, 63, 64, 1175, 1053, - /* 20 */ 258, 53, 52, 51, 644, 62, 334, 67, 65, 68, - /* 30 */ 66, 158, 645, 336, 174, 58, 57, 354, 353, 56, - /* 40 */ 55, 54, 59, 60, 252, 63, 64, 1052, 1053, 258, - /* 50 */ 53, 52, 51, 296, 62, 334, 67, 65, 68, 66, - /* 60 */ 1023, 1066, 1021, 1022, 58, 57, 1195, 1024, 56, 55, - /* 70 */ 54, 1025, 255, 1026, 1027, 58, 57, 1072, 280, 56, - /* 80 */ 55, 54, 59, 60, 165, 63, 64, 38, 84, 258, - /* 90 */ 53, 52, 51, 90, 62, 334, 67, 65, 68, 66, - /* 100 */ 1066, 287, 286, 644, 58, 57, 332, 29, 56, 55, - /* 110 */ 54, 645, 59, 61, 831, 63, 64, 240, 1039, 258, - /* 120 */ 53, 52, 51, 644, 62, 334, 67, 65, 68, 66, - /* 130 */ 45, 645, 239, 213, 58, 57, 1050, 850, 56, 55, - /* 140 */ 54, 60, 1047, 63, 64, 1176, 281, 258, 53, 52, - /* 150 */ 51, 165, 62, 334, 67, 65, 68, 66, 38, 308, - /* 160 */ 39, 95, 58, 57, 796, 797, 56, 55, 54, 594, - /* 170 */ 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, - /* 180 */ 605, 606, 607, 156, 1066, 238, 63, 64, 768, 251, - /* 190 */ 258, 53, 52, 51, 254, 62, 334, 67, 65, 68, - /* 200 */ 66, 241, 364, 248, 332, 58, 57, 1050, 210, 56, - /* 210 */ 55, 54, 256, 44, 330, 371, 370, 329, 328, 327, - /* 220 */ 369, 326, 325, 324, 368, 323, 367, 366, 1123, 16, - /* 230 */ 306, 15, 165, 24, 6, 1015, 1003, 1004, 1005, 1006, - /* 240 */ 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1016, 1017, - /* 250 */ 216, 165, 257, 846, 211, 213, 835, 224, 838, 837, - /* 260 */ 841, 840, 99, 141, 140, 139, 223, 1176, 257, 846, - /* 270 */ 339, 90, 835, 772, 838, 272, 841, 56, 55, 54, - /* 280 */ 67, 65, 68, 66, 276, 275, 235, 236, 58, 57, - /* 290 */ 335, 765, 56, 55, 54, 1036, 1037, 35, 1040, 259, - /* 300 */ 372, 984, 235, 236, 5, 41, 184, 267, 45, 1122, - /* 310 */ 38, 183, 108, 113, 104, 112, 752, 9, 180, 749, - /* 320 */ 261, 750, 784, 751, 38, 102, 787, 266, 96, 38, - /* 330 */ 319, 279, 836, 82, 839, 69, 125, 119, 130, 217, - /* 340 */ 231, 946, 118, 129, 117, 135, 138, 128, 194, 263, - /* 350 */ 264, 69, 292, 293, 132, 204, 202, 200, 38, 1049, - /* 360 */ 213, 1041, 199, 145, 144, 143, 142, 127, 38, 249, - /* 370 */ 847, 842, 1176, 1050, 343, 38, 38, 843, 1050, 364, - /* 380 */ 844, 44, 38, 371, 370, 83, 847, 842, 369, 375, - /* 390 */ 374, 621, 368, 843, 367, 366, 38, 262, 38, 260, - /* 400 */ 267, 342, 341, 344, 268, 218, 265, 1050, 349, 348, - /* 410 */ 813, 181, 14, 345, 219, 267, 98, 1050, 87, 1038, - /* 420 */ 346, 350, 88, 97, 1050, 1050, 1051, 351, 155, 153, - /* 430 */ 152, 1050, 956, 753, 754, 947, 34, 242, 85, 194, - /* 440 */ 793, 352, 194, 356, 803, 1050, 101, 1050, 804, 1, - /* 450 */ 182, 3, 195, 845, 160, 283, 291, 290, 70, 283, - /* 460 */ 75, 78, 26, 738, 311, 740, 313, 739, 812, 314, - /* 470 */ 869, 848, 833, 643, 18, 81, 17, 39, 39, 70, - /* 480 */ 100, 70, 137, 136, 25, 25, 757, 25, 758, 20, - /* 490 */ 755, 19, 756, 124, 22, 123, 21, 288, 1170, 1169, - /* 500 */ 1168, 233, 79, 76, 234, 214, 215, 727, 289, 1187, - /* 510 */ 834, 220, 212, 221, 222, 1133, 226, 227, 228, 1132, - /* 520 */ 246, 225, 277, 1129, 209, 1128, 247, 355, 48, 1067, - /* 530 */ 157, 1074, 1085, 1064, 154, 1082, 1083, 284, 1115, 1087, - /* 540 */ 159, 164, 1114, 302, 1048, 176, 282, 86, 177, 1046, - /* 550 */ 178, 179, 961, 783, 316, 317, 295, 318, 321, 322, - /* 560 */ 166, 46, 243, 297, 309, 80, 207, 42, 333, 955, - /* 570 */ 340, 1194, 115, 1193, 1190, 185, 347, 1186, 121, 299, - /* 580 */ 77, 167, 50, 307, 1185, 1182, 168, 305, 186, 303, - /* 590 */ 981, 43, 301, 40, 47, 208, 943, 131, 941, 133, - /* 600 */ 134, 939, 938, 298, 269, 197, 198, 935, 934, 933, - /* 610 */ 932, 931, 930, 929, 201, 203, 925, 923, 921, 205, - /* 620 */ 918, 206, 294, 914, 320, 49, 91, 300, 1116, 365, - /* 630 */ 126, 357, 358, 359, 360, 361, 362, 232, 363, 253, - /* 640 */ 315, 373, 894, 270, 271, 893, 273, 274, 229, 892, - /* 650 */ 875, 230, 109, 960, 959, 874, 110, 146, 278, 283, - /* 660 */ 310, 10, 285, 89, 92, 760, 937, 936, 189, 147, - /* 670 */ 188, 982, 187, 190, 191, 193, 928, 192, 148, 927, - /* 680 */ 4, 149, 1019, 920, 30, 983, 919, 175, 171, 169, - /* 690 */ 172, 170, 173, 33, 2, 792, 1029, 73, 790, 789, - /* 700 */ 786, 785, 74, 163, 794, 161, 245, 805, 162, 11, - /* 710 */ 799, 93, 31, 801, 94, 304, 32, 12, 13, 27, - /* 720 */ 312, 103, 28, 101, 106, 36, 658, 693, 691, 690, - /* 730 */ 689, 105, 687, 686, 37, 107, 685, 682, 648, 111, - /* 740 */ 7, 331, 849, 337, 8, 851, 338, 114, 39, 71, - /* 750 */ 72, 116, 120, 730, 729, 726, 122, 674, 672, 664, - /* 760 */ 670, 666, 668, 662, 660, 696, 695, 694, 692, 688, - /* 770 */ 684, 683, 196, 646, 611, 898, 897, 897, 897, 897, - /* 780 */ 897, 897, 897, 897, 897, 897, 897, 897, 150, 151, + /* 0 */ 102, 668, 668, 1166, 161, 1167, 312, 810, 260, 669, + /* 10 */ 669, 813, 391, 241, 37, 38, 24, 41, 42, 1084, + /* 20 */ 1076, 263, 31, 30, 29, 1089, 1223, 40, 344, 45, + /* 30 */ 43, 46, 44, 1073, 1074, 55, 1077, 36, 35, 298, + /* 40 */ 299, 34, 33, 32, 37, 38, 213, 41, 42, 250, + /* 50 */ 84, 263, 31, 30, 29, 214, 1223, 40, 344, 45, + /* 60 */ 43, 46, 44, 933, 392, 1223, 256, 36, 35, 211, + /* 70 */ 215, 34, 33, 32, 293, 292, 128, 122, 133, 1223, + /* 80 */ 1223, 1226, 1225, 132, 1075, 138, 141, 131, 37, 38, + /* 90 */ 85, 41, 42, 983, 135, 263, 31, 30, 29, 668, + /* 100 */ 196, 40, 344, 45, 43, 46, 44, 669, 340, 287, + /* 110 */ 13, 36, 35, 1105, 101, 34, 33, 32, 37, 38, + /* 120 */ 58, 41, 42, 60, 246, 263, 31, 30, 29, 220, + /* 130 */ 286, 40, 344, 45, 43, 46, 44, 316, 97, 1223, + /* 140 */ 96, 36, 35, 668, 104, 34, 33, 32, 340, 37, + /* 150 */ 39, 669, 41, 42, 1105, 176, 263, 31, 30, 29, + /* 160 */ 1114, 862, 40, 344, 45, 43, 46, 44, 34, 33, + /* 170 */ 32, 244, 36, 35, 302, 221, 34, 33, 32, 206, + /* 180 */ 204, 202, 379, 59, 51, 1223, 201, 148, 147, 146, + /* 190 */ 145, 618, 619, 620, 621, 622, 623, 624, 625, 626, + /* 200 */ 627, 628, 629, 630, 631, 159, 993, 242, 38, 278, + /* 210 */ 41, 42, 59, 196, 263, 31, 30, 29, 282, 281, + /* 220 */ 40, 344, 45, 43, 46, 44, 984, 222, 243, 1111, + /* 230 */ 36, 35, 1087, 196, 34, 33, 32, 1223, 41, 42, + /* 240 */ 387, 1021, 263, 31, 30, 29, 822, 823, 40, 344, + /* 250 */ 45, 43, 46, 44, 390, 389, 645, 253, 36, 35, + /* 260 */ 704, 1087, 34, 33, 32, 67, 338, 386, 385, 337, + /* 270 */ 336, 335, 384, 334, 333, 332, 383, 331, 382, 381, + /* 280 */ 1052, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, + /* 290 */ 1049, 1050, 1051, 1053, 1054, 234, 877, 25, 1215, 866, + /* 300 */ 1164, 869, 1165, 872, 776, 59, 59, 773, 1223, 774, + /* 310 */ 868, 775, 871, 867, 219, 870, 234, 877, 59, 1245, + /* 320 */ 866, 227, 869, 1214, 872, 342, 266, 144, 143, 142, + /* 330 */ 226, 239, 240, 1223, 352, 91, 5, 62, 186, 268, + /* 340 */ 269, 259, 315, 185, 111, 116, 107, 115, 272, 59, + /* 350 */ 254, 356, 239, 240, 1087, 1087, 346, 45, 43, 46, + /* 360 */ 44, 67, 327, 386, 385, 36, 35, 1086, 384, 34, + /* 370 */ 33, 32, 383, 68, 382, 381, 100, 59, 264, 1060, + /* 380 */ 47, 1058, 1059, 36, 35, 1237, 1061, 34, 33, 32, + /* 390 */ 1062, 86, 1063, 1064, 357, 285, 343, 83, 1087, 59, + /* 400 */ 752, 47, 215, 267, 235, 265, 248, 355, 354, 59, + /* 410 */ 1090, 59, 1223, 294, 1226, 878, 873, 874, 342, 59, + /* 420 */ 59, 91, 358, 777, 270, 274, 1087, 271, 875, 364, + /* 430 */ 363, 876, 369, 368, 273, 262, 878, 873, 874, 215, + /* 440 */ 158, 156, 155, 255, 359, 182, 257, 1090, 1087, 1223, + /* 450 */ 1090, 1226, 1213, 237, 365, 130, 366, 6, 1087, 68, + /* 460 */ 1087, 238, 1223, 1223, 367, 371, 217, 379, 1087, 1087, + /* 470 */ 218, 1223, 223, 216, 224, 225, 1223, 1105, 105, 794, + /* 480 */ 1223, 229, 1223, 1223, 1223, 1223, 230, 273, 842, 231, + /* 490 */ 228, 1223, 791, 212, 245, 273, 1223, 88, 183, 1223, + /* 500 */ 1223, 273, 99, 1223, 98, 89, 345, 1, 184, 819, + /* 510 */ 3, 197, 1088, 829, 1078, 297, 296, 830, 76, 10, + /* 520 */ 762, 79, 319, 163, 764, 349, 321, 71, 763, 261, + /* 530 */ 54, 48, 348, 906, 60, 289, 60, 879, 71, 667, + /* 540 */ 103, 295, 71, 289, 1177, 82, 841, 9, 15, 1176, + /* 550 */ 14, 9, 251, 9, 347, 121, 17, 120, 16, 361, + /* 560 */ 360, 77, 80, 322, 783, 798, 784, 1173, 781, 865, + /* 570 */ 782, 19, 1172, 18, 127, 252, 126, 751, 21, 370, + /* 580 */ 20, 140, 139, 283, 160, 1113, 26, 1124, 1121, 1122, + /* 590 */ 1106, 290, 1126, 162, 1156, 167, 308, 1155, 1154, 1153, + /* 600 */ 178, 1085, 179, 1083, 180, 181, 998, 324, 157, 809, + /* 610 */ 325, 1103, 326, 301, 329, 330, 69, 209, 65, 341, + /* 620 */ 168, 169, 992, 247, 303, 317, 353, 305, 1244, 81, + /* 630 */ 882, 28, 118, 78, 1243, 1240, 170, 187, 171, 362, + /* 640 */ 313, 172, 311, 1236, 309, 124, 1235, 307, 1232, 173, + /* 650 */ 188, 1018, 66, 304, 61, 175, 70, 210, 980, 134, + /* 660 */ 978, 136, 300, 137, 976, 975, 275, 199, 200, 972, + /* 670 */ 971, 970, 969, 968, 967, 966, 203, 205, 962, 960, + /* 680 */ 958, 27, 207, 955, 208, 951, 328, 174, 288, 87, + /* 690 */ 92, 380, 306, 373, 129, 372, 374, 375, 236, 376, + /* 700 */ 258, 323, 377, 378, 388, 931, 276, 232, 277, 930, + /* 710 */ 233, 279, 997, 996, 112, 113, 280, 929, 912, 284, + /* 720 */ 911, 318, 289, 11, 90, 291, 786, 52, 974, 973, + /* 730 */ 1019, 191, 149, 189, 190, 192, 193, 195, 150, 194, + /* 740 */ 965, 2, 1056, 151, 964, 4, 152, 1020, 957, 53, + /* 750 */ 177, 956, 93, 818, 74, 816, 815, 812, 1066, 811, + /* 760 */ 75, 166, 820, 164, 249, 831, 165, 22, 825, 94, + /* 770 */ 63, 827, 95, 310, 347, 314, 12, 64, 49, 23, + /* 780 */ 320, 50, 104, 106, 56, 682, 108, 109, 57, 110, + /* 790 */ 717, 715, 714, 713, 711, 710, 709, 706, 672, 339, + /* 800 */ 114, 7, 903, 901, 881, 904, 880, 902, 8, 883, + /* 810 */ 350, 351, 72, 754, 780, 117, 119, 60, 73, 123, + /* 820 */ 125, 779, 753, 750, 698, 696, 688, 694, 690, 692, + /* 830 */ 686, 684, 720, 719, 718, 716, 712, 708, 707, 198, + /* 840 */ 670, 635, 935, 934, 934, 934, 934, 934, 934, 934, + /* 850 */ 934, 934, 934, 934, 934, 934, 153, 154, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 247, 1, 201, 202, 251, 269, 269, 5, 201, 9, - /* 10 */ 5, 199, 200, 13, 14, 247, 16, 17, 281, 251, - /* 20 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29, - /* 30 */ 30, 201, 9, 15, 256, 35, 36, 35, 36, 39, - /* 40 */ 40, 41, 13, 14, 247, 16, 17, 251, 251, 20, - /* 50 */ 21, 22, 23, 275, 25, 26, 27, 28, 29, 30, - /* 60 */ 225, 249, 227, 228, 35, 36, 251, 232, 39, 40, - /* 70 */ 41, 236, 208, 238, 239, 35, 36, 270, 266, 39, - /* 80 */ 40, 41, 13, 14, 201, 16, 17, 201, 88, 20, - /* 90 */ 21, 22, 23, 84, 25, 26, 27, 28, 29, 30, - /* 100 */ 249, 271, 272, 1, 35, 36, 86, 84, 39, 40, - /* 110 */ 41, 9, 13, 14, 85, 16, 17, 266, 0, 20, - /* 120 */ 21, 22, 23, 1, 25, 26, 27, 28, 29, 30, - /* 130 */ 121, 9, 246, 269, 35, 36, 250, 119, 39, 40, - /* 140 */ 41, 14, 201, 16, 17, 281, 85, 20, 21, 22, - /* 150 */ 23, 201, 25, 26, 27, 28, 29, 30, 201, 276, - /* 160 */ 99, 278, 35, 36, 128, 129, 39, 40, 41, 47, - /* 170 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 180 */ 58, 59, 60, 61, 249, 63, 16, 17, 39, 248, - /* 190 */ 20, 21, 22, 23, 208, 25, 26, 27, 28, 29, - /* 200 */ 30, 266, 92, 246, 86, 35, 36, 250, 269, 39, - /* 210 */ 40, 41, 62, 100, 101, 102, 103, 104, 105, 106, - /* 220 */ 107, 108, 109, 110, 111, 112, 113, 114, 278, 148, - /* 230 */ 280, 150, 201, 46, 84, 225, 226, 227, 228, 229, - /* 240 */ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - /* 250 */ 63, 201, 1, 2, 269, 269, 5, 70, 7, 5, - /* 260 */ 9, 7, 209, 76, 77, 78, 79, 281, 1, 2, - /* 270 */ 83, 84, 5, 124, 7, 145, 9, 39, 40, 41, - /* 280 */ 27, 28, 29, 30, 154, 155, 35, 36, 35, 36, - /* 290 */ 39, 99, 39, 40, 41, 242, 243, 244, 245, 208, - /* 300 */ 223, 224, 35, 36, 64, 65, 66, 201, 121, 278, - /* 310 */ 201, 71, 72, 73, 74, 75, 2, 125, 212, 5, - /* 320 */ 70, 7, 5, 9, 201, 209, 9, 70, 278, 201, - /* 330 */ 90, 144, 5, 146, 7, 84, 64, 65, 66, 269, - /* 340 */ 153, 207, 148, 71, 150, 73, 74, 75, 214, 35, - /* 350 */ 36, 84, 35, 36, 82, 64, 65, 66, 201, 250, - /* 360 */ 269, 245, 71, 72, 73, 74, 75, 80, 201, 246, - /* 370 */ 119, 120, 281, 250, 246, 201, 201, 126, 250, 92, - /* 380 */ 126, 100, 201, 102, 103, 209, 119, 120, 107, 67, - /* 390 */ 68, 69, 111, 126, 113, 114, 201, 147, 201, 149, - /* 400 */ 201, 151, 152, 246, 147, 269, 149, 250, 151, 152, - /* 410 */ 78, 212, 84, 246, 269, 201, 88, 250, 85, 243, - /* 420 */ 246, 246, 85, 252, 250, 250, 212, 246, 64, 65, - /* 430 */ 66, 250, 207, 119, 120, 207, 84, 120, 267, 214, - /* 440 */ 85, 246, 214, 246, 85, 250, 118, 250, 85, 210, - /* 450 */ 211, 205, 206, 126, 99, 122, 35, 36, 99, 122, - /* 460 */ 99, 99, 99, 85, 85, 85, 85, 85, 136, 117, - /* 470 */ 85, 85, 1, 85, 148, 84, 150, 99, 99, 99, - /* 480 */ 99, 99, 80, 81, 99, 99, 5, 99, 7, 148, - /* 490 */ 5, 150, 7, 148, 148, 150, 150, 274, 269, 269, - /* 500 */ 269, 269, 140, 142, 269, 269, 269, 116, 274, 251, - /* 510 */ 39, 269, 269, 269, 269, 241, 269, 269, 269, 241, - /* 520 */ 241, 269, 201, 241, 269, 241, 241, 241, 268, 249, - /* 530 */ 201, 201, 201, 265, 62, 201, 201, 249, 279, 201, - /* 540 */ 201, 201, 279, 201, 249, 253, 203, 203, 201, 201, - /* 550 */ 201, 201, 201, 126, 201, 201, 273, 201, 201, 201, - /* 560 */ 264, 201, 273, 273, 134, 139, 201, 201, 201, 201, - /* 570 */ 201, 201, 201, 201, 201, 201, 201, 201, 201, 273, - /* 580 */ 141, 263, 138, 137, 201, 201, 262, 132, 201, 131, - /* 590 */ 201, 201, 130, 201, 201, 201, 201, 201, 201, 201, - /* 600 */ 201, 201, 201, 133, 201, 201, 201, 201, 201, 201, - /* 610 */ 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, - /* 620 */ 201, 201, 127, 201, 91, 143, 203, 203, 203, 115, - /* 630 */ 98, 97, 53, 94, 96, 57, 95, 203, 93, 203, - /* 640 */ 203, 86, 5, 156, 5, 5, 156, 5, 203, 5, - /* 650 */ 102, 203, 209, 213, 213, 101, 209, 204, 145, 122, - /* 660 */ 117, 84, 99, 123, 99, 85, 203, 203, 216, 204, - /* 670 */ 220, 222, 221, 219, 217, 215, 203, 218, 204, 203, - /* 680 */ 205, 204, 240, 203, 84, 224, 203, 254, 259, 261, - /* 690 */ 258, 260, 257, 255, 210, 85, 240, 99, 126, 126, - /* 700 */ 5, 5, 84, 99, 85, 84, 1, 85, 84, 135, - /* 710 */ 85, 84, 99, 85, 84, 84, 99, 135, 84, 84, - /* 720 */ 117, 80, 84, 118, 72, 89, 5, 9, 5, 5, - /* 730 */ 5, 88, 5, 5, 89, 88, 5, 5, 87, 80, - /* 740 */ 84, 15, 85, 26, 84, 119, 61, 150, 99, 16, - /* 750 */ 16, 150, 150, 5, 5, 85, 150, 5, 5, 5, - /* 760 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 770 */ 5, 5, 99, 87, 62, 0, 282, 282, 282, 282, - /* 780 */ 282, 282, 282, 282, 282, 282, 282, 282, 21, 21, - /* 790 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 800 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 810 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 820 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 830 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 840 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 850 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 860 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 870 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 880 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 890 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 900 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 910 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 920 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 930 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 940 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 950 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 960 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 970 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 980 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, + /* 0 */ 211, 1, 1, 279, 203, 281, 282, 5, 210, 9, + /* 10 */ 9, 9, 203, 204, 14, 15, 271, 17, 18, 203, + /* 20 */ 0, 21, 22, 23, 24, 253, 281, 27, 28, 29, + /* 30 */ 30, 31, 32, 244, 245, 246, 247, 37, 38, 37, + /* 40 */ 38, 41, 42, 43, 14, 15, 271, 17, 18, 1, + /* 50 */ 211, 21, 22, 23, 24, 271, 281, 27, 28, 29, + /* 60 */ 30, 31, 32, 201, 202, 281, 250, 37, 38, 271, + /* 70 */ 271, 41, 42, 43, 273, 274, 66, 67, 68, 281, + /* 80 */ 281, 283, 283, 73, 245, 75, 76, 77, 14, 15, + /* 90 */ 90, 17, 18, 209, 84, 21, 22, 23, 24, 1, + /* 100 */ 216, 27, 28, 29, 30, 31, 32, 9, 88, 87, + /* 110 */ 86, 37, 38, 251, 90, 41, 42, 43, 14, 15, + /* 120 */ 90, 17, 18, 101, 122, 21, 22, 23, 24, 271, + /* 130 */ 268, 27, 28, 29, 30, 31, 32, 278, 279, 281, + /* 140 */ 281, 37, 38, 1, 120, 41, 42, 43, 88, 14, + /* 150 */ 15, 9, 17, 18, 251, 258, 21, 22, 23, 24, + /* 160 */ 203, 87, 27, 28, 29, 30, 31, 32, 41, 42, + /* 170 */ 43, 268, 37, 38, 277, 271, 41, 42, 43, 66, + /* 180 */ 67, 68, 94, 203, 86, 281, 73, 74, 75, 76, + /* 190 */ 77, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 200 */ 58, 59, 60, 61, 62, 63, 209, 65, 15, 147, + /* 210 */ 17, 18, 203, 216, 21, 22, 23, 24, 156, 157, + /* 220 */ 27, 28, 29, 30, 31, 32, 209, 271, 248, 272, + /* 230 */ 37, 38, 252, 216, 41, 42, 43, 281, 17, 18, + /* 240 */ 225, 226, 21, 22, 23, 24, 130, 131, 27, 28, + /* 250 */ 29, 30, 31, 32, 69, 70, 71, 248, 37, 38, + /* 260 */ 5, 252, 41, 42, 43, 102, 103, 104, 105, 106, + /* 270 */ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + /* 280 */ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + /* 290 */ 237, 238, 239, 240, 241, 1, 2, 48, 271, 5, + /* 300 */ 279, 7, 281, 9, 2, 203, 203, 5, 281, 7, + /* 310 */ 5, 9, 7, 5, 65, 7, 1, 2, 203, 253, + /* 320 */ 5, 72, 7, 271, 9, 47, 72, 78, 79, 80, + /* 330 */ 81, 37, 38, 281, 85, 86, 66, 67, 68, 37, + /* 340 */ 38, 210, 64, 73, 74, 75, 76, 77, 72, 203, + /* 350 */ 248, 248, 37, 38, 252, 252, 41, 29, 30, 31, + /* 360 */ 32, 102, 92, 104, 105, 37, 38, 252, 109, 41, + /* 370 */ 42, 43, 113, 124, 115, 116, 254, 203, 210, 227, + /* 380 */ 86, 229, 230, 37, 38, 253, 234, 41, 42, 43, + /* 390 */ 238, 269, 240, 241, 248, 146, 25, 148, 252, 203, + /* 400 */ 5, 86, 271, 149, 155, 151, 249, 153, 154, 203, + /* 410 */ 253, 203, 281, 276, 283, 121, 122, 123, 47, 203, + /* 420 */ 203, 86, 248, 121, 122, 149, 252, 151, 123, 153, + /* 430 */ 154, 123, 37, 38, 203, 64, 121, 122, 123, 271, + /* 440 */ 66, 67, 68, 249, 248, 214, 249, 253, 252, 281, + /* 450 */ 253, 283, 271, 271, 248, 82, 248, 86, 252, 124, + /* 460 */ 252, 271, 281, 281, 248, 248, 271, 94, 252, 252, + /* 470 */ 271, 281, 271, 271, 271, 271, 281, 251, 211, 41, + /* 480 */ 281, 271, 281, 281, 281, 281, 271, 203, 80, 271, + /* 490 */ 271, 281, 101, 271, 268, 203, 281, 87, 214, 281, + /* 500 */ 281, 203, 279, 281, 281, 87, 214, 212, 213, 87, + /* 510 */ 207, 208, 214, 87, 247, 37, 38, 87, 101, 128, + /* 520 */ 87, 101, 87, 101, 87, 16, 87, 101, 87, 1, + /* 530 */ 86, 101, 25, 87, 101, 125, 101, 87, 101, 87, + /* 540 */ 101, 276, 101, 125, 243, 86, 138, 101, 150, 243, + /* 550 */ 152, 101, 243, 101, 47, 150, 150, 152, 152, 37, + /* 560 */ 38, 144, 142, 119, 5, 127, 7, 243, 5, 41, + /* 570 */ 7, 150, 243, 152, 150, 243, 152, 118, 150, 243, + /* 580 */ 152, 82, 83, 203, 203, 203, 270, 203, 203, 203, + /* 590 */ 251, 251, 203, 203, 280, 203, 203, 280, 280, 280, + /* 600 */ 255, 251, 203, 203, 203, 203, 203, 203, 64, 123, + /* 610 */ 203, 267, 203, 275, 203, 203, 203, 203, 203, 203, + /* 620 */ 266, 265, 203, 275, 275, 136, 203, 275, 203, 141, + /* 630 */ 121, 140, 203, 143, 203, 203, 264, 203, 263, 203, + /* 640 */ 139, 262, 134, 203, 133, 203, 203, 132, 203, 261, + /* 650 */ 203, 203, 203, 135, 203, 259, 203, 203, 203, 203, + /* 660 */ 203, 203, 129, 203, 203, 203, 203, 203, 203, 203, + /* 670 */ 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, + /* 680 */ 203, 145, 203, 203, 203, 203, 93, 260, 205, 205, + /* 690 */ 205, 117, 205, 55, 100, 99, 96, 98, 205, 59, + /* 700 */ 205, 205, 97, 95, 88, 5, 158, 205, 5, 5, + /* 710 */ 205, 158, 215, 215, 211, 211, 5, 5, 104, 147, + /* 720 */ 103, 119, 125, 86, 126, 101, 87, 86, 205, 205, + /* 730 */ 224, 218, 206, 223, 222, 221, 219, 217, 206, 220, + /* 740 */ 205, 212, 242, 206, 205, 207, 206, 226, 205, 257, + /* 750 */ 256, 205, 101, 87, 101, 123, 123, 5, 242, 5, + /* 760 */ 86, 101, 87, 86, 1, 87, 86, 137, 87, 86, + /* 770 */ 101, 87, 86, 86, 47, 1, 86, 101, 86, 137, + /* 780 */ 119, 86, 120, 82, 91, 5, 90, 74, 91, 90, + /* 790 */ 9, 5, 5, 5, 5, 5, 5, 5, 89, 16, + /* 800 */ 82, 86, 9, 9, 87, 9, 87, 9, 86, 121, + /* 810 */ 28, 63, 17, 5, 123, 152, 152, 101, 17, 152, + /* 820 */ 152, 123, 5, 87, 5, 5, 5, 5, 5, 5, + /* 830 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 101, + /* 840 */ 89, 64, 0, 284, 284, 284, 284, 284, 284, 284, + /* 850 */ 284, 284, 284, 284, 284, 284, 22, 22, 284, 284, + /* 860 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 870 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 880 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 890 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 900 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 910 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 920 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 930 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 940 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 950 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 960 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 970 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 980 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 990 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1000 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1010 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1020 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1030 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1040 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + /* 1050 */ 284, 284, 284, 284, 284, 284, 284, 284, 284, }; -#define YY_SHIFT_COUNT (377) +#define YY_SHIFT_COUNT (392) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (775) +#define YY_SHIFT_MAX (842) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 187, 113, 113, 281, 281, 20, 251, 267, 267, 23, - /* 10 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 20 */ 102, 102, 102, 0, 122, 267, 314, 314, 314, 9, - /* 30 */ 9, 102, 102, 36, 102, 118, 102, 102, 102, 102, - /* 40 */ 287, 20, 110, 110, 5, 790, 790, 790, 267, 267, - /* 50 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, - /* 60 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, - /* 70 */ 314, 314, 314, 317, 317, 2, 2, 2, 2, 2, - /* 80 */ 2, 2, 102, 102, 102, 149, 102, 102, 102, 9, - /* 90 */ 9, 102, 102, 102, 102, 332, 332, 192, 9, 102, - /* 100 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 110 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 120 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 130 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 140 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 150 */ 102, 102, 102, 102, 102, 102, 102, 472, 472, 472, - /* 160 */ 427, 427, 427, 427, 472, 472, 426, 439, 430, 444, - /* 170 */ 446, 455, 458, 462, 470, 495, 482, 472, 472, 472, - /* 180 */ 533, 533, 514, 20, 20, 472, 472, 532, 534, 579, - /* 190 */ 539, 538, 578, 541, 545, 514, 5, 472, 472, 555, - /* 200 */ 555, 472, 555, 472, 555, 472, 472, 790, 790, 29, - /* 210 */ 69, 69, 99, 69, 127, 170, 240, 253, 253, 253, - /* 220 */ 253, 253, 253, 272, 291, 40, 40, 40, 40, 250, - /* 230 */ 257, 130, 328, 238, 238, 254, 327, 322, 364, 61, - /* 240 */ 333, 337, 421, 355, 359, 363, 361, 362, 378, 379, - /* 250 */ 380, 381, 382, 352, 385, 386, 471, 150, 18, 388, - /* 260 */ 81, 194, 326, 481, 485, 341, 345, 391, 346, 402, - /* 270 */ 637, 487, 639, 640, 490, 642, 644, 548, 554, 513, - /* 280 */ 537, 543, 577, 540, 580, 600, 563, 565, 610, 598, - /* 290 */ 572, 573, 695, 696, 618, 619, 621, 622, 624, 625, - /* 300 */ 604, 627, 628, 630, 705, 631, 613, 574, 617, 582, - /* 310 */ 634, 543, 635, 603, 638, 605, 641, 636, 643, 652, - /* 320 */ 721, 645, 647, 718, 723, 724, 725, 727, 728, 731, - /* 330 */ 732, 651, 726, 659, 656, 657, 626, 660, 717, 685, - /* 340 */ 733, 597, 601, 649, 649, 649, 649, 734, 602, 606, - /* 350 */ 649, 649, 649, 748, 749, 670, 649, 752, 753, 754, - /* 360 */ 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, - /* 370 */ 765, 766, 673, 686, 767, 768, 712, 775, + /* 0 */ 249, 163, 163, 259, 259, 60, 315, 294, 294, 294, + /* 10 */ 98, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 20 */ 1, 1, 48, 48, 0, 142, 294, 294, 294, 294, + /* 30 */ 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, + /* 40 */ 294, 294, 294, 294, 294, 294, 294, 294, 302, 302, + /* 50 */ 302, 335, 335, 116, 1, 20, 1, 1, 1, 1, + /* 60 */ 1, 373, 60, 48, 48, 88, 88, 255, 858, 858, + /* 70 */ 858, 302, 302, 302, 2, 2, 395, 395, 395, 395, + /* 80 */ 395, 395, 395, 1, 1, 1, 438, 1, 1, 1, + /* 90 */ 335, 335, 1, 1, 1, 1, 408, 408, 408, 408, + /* 100 */ 391, 335, 1, 1, 1, 1, 1, 1, 1, 1, + /* 110 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 120 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 130 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 140 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 150 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 160 */ 544, 544, 544, 486, 486, 486, 486, 544, 488, 490, + /* 170 */ 489, 491, 501, 508, 511, 515, 518, 533, 536, 544, + /* 180 */ 544, 544, 593, 593, 574, 60, 60, 544, 544, 594, + /* 190 */ 596, 638, 600, 599, 640, 605, 608, 574, 255, 544, + /* 200 */ 544, 616, 616, 544, 616, 544, 616, 544, 544, 858, + /* 210 */ 858, 30, 74, 104, 104, 104, 135, 193, 221, 270, + /* 220 */ 328, 328, 328, 328, 328, 328, 10, 113, 346, 346, + /* 230 */ 346, 346, 254, 276, 371, 62, 24, 127, 127, 305, + /* 240 */ 308, 185, 374, 22, 410, 418, 478, 422, 426, 430, + /* 250 */ 278, 417, 420, 433, 435, 437, 439, 441, 444, 446, + /* 260 */ 450, 507, 528, 509, 452, 398, 405, 406, 559, 563, + /* 270 */ 522, 421, 424, 459, 428, 499, 700, 548, 703, 704, + /* 280 */ 553, 711, 712, 614, 617, 572, 597, 602, 637, 598, + /* 290 */ 639, 641, 624, 651, 666, 653, 632, 633, 752, 754, + /* 300 */ 674, 675, 677, 678, 680, 681, 660, 683, 684, 686, + /* 310 */ 763, 687, 669, 630, 727, 774, 676, 642, 690, 602, + /* 320 */ 692, 661, 695, 662, 701, 693, 696, 713, 780, 697, + /* 330 */ 699, 781, 786, 787, 788, 789, 790, 791, 792, 709, + /* 340 */ 783, 718, 793, 794, 715, 717, 719, 796, 798, 688, + /* 350 */ 722, 782, 748, 795, 663, 664, 716, 716, 716, 716, + /* 360 */ 691, 698, 801, 667, 668, 716, 716, 716, 808, 817, + /* 370 */ 736, 716, 819, 820, 821, 822, 823, 824, 825, 826, + /* 380 */ 827, 828, 829, 830, 831, 832, 833, 738, 751, 834, + /* 390 */ 835, 777, 842, }; -#define YY_REDUCE_COUNT (208) -#define YY_REDUCE_MIN (-264) -#define YY_REDUCE_MAX (484) +#define YY_REDUCE_COUNT (210) +#define YY_REDUCE_MIN (-276) +#define YY_REDUCE_MAX (546) static const short yy_reduce_ofst[] = { - /* 0 */ -188, 10, 10, -165, -165, 53, -136, -14, 91, -170, - /* 10 */ -114, -50, -117, -43, 123, 128, 157, 167, 174, 175, - /* 20 */ 181, 195, 197, -193, -199, -263, -247, -232, -203, -149, - /* 30 */ -65, 31, 50, -222, -59, 116, 106, 199, 214, 109, - /* 40 */ 134, 176, 225, 228, 77, 171, 239, 246, -264, -61, - /* 50 */ -15, 70, 136, 145, 229, 230, 231, 232, 235, 236, - /* 60 */ 237, 242, 243, 244, 245, 247, 248, 249, 252, 255, - /* 70 */ -204, -185, 258, 223, 234, 274, 278, 279, 282, 284, - /* 80 */ 285, 286, 321, 329, 330, 260, 331, 334, 335, 280, - /* 90 */ 288, 338, 339, 340, 342, 259, 263, 292, 295, 347, - /* 100 */ 348, 349, 350, 351, 353, 354, 356, 357, 358, 360, - /* 110 */ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, - /* 120 */ 375, 376, 377, 383, 384, 387, 389, 390, 392, 393, - /* 130 */ 394, 395, 396, 397, 398, 399, 400, 401, 403, 404, - /* 140 */ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, - /* 150 */ 415, 416, 417, 418, 419, 420, 422, 343, 344, 423, - /* 160 */ 283, 289, 290, 306, 424, 425, 268, 296, 318, 324, - /* 170 */ 428, 431, 429, 432, 435, 438, 433, 434, 436, 437, - /* 180 */ 440, 441, 442, 443, 447, 445, 448, 449, 451, 450, - /* 190 */ 452, 454, 457, 459, 460, 456, 461, 463, 464, 453, - /* 200 */ 465, 473, 474, 476, 477, 480, 483, 484, 475, + /* 0 */ -138, 53, 53, 152, 152, -211, -202, 131, 168, -201, + /* 10 */ -199, -20, 9, 102, 103, 146, 174, 196, 206, 208, + /* 20 */ 216, 217, -276, -141, -43, -191, -255, -225, -216, -142, + /* 30 */ -96, -44, 27, 52, 181, 182, 190, 195, 199, 201, + /* 40 */ 202, 203, 204, 210, 215, 218, 219, 222, 157, 194, + /* 50 */ 197, -97, 226, -103, -184, 267, 231, 284, 292, 298, + /* 60 */ 115, -116, -161, 21, 223, -3, 17, 15, 122, 295, + /* 70 */ 303, -228, 66, 132, 137, 265, 301, 306, 309, 324, + /* 80 */ 329, 332, 336, 380, 381, 382, 316, 384, 385, 386, + /* 90 */ 339, 340, 389, 390, 392, 393, 314, 317, 318, 319, + /* 100 */ 345, 350, 399, 400, 401, 402, 403, 404, 407, 409, + /* 110 */ 411, 412, 413, 414, 415, 416, 419, 423, 425, 429, + /* 120 */ 431, 432, 434, 436, 440, 442, 443, 445, 447, 448, + /* 130 */ 449, 451, 453, 454, 455, 456, 457, 458, 460, 461, + /* 140 */ 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, + /* 150 */ 472, 473, 474, 475, 476, 477, 479, 480, 481, 482, + /* 160 */ 483, 484, 485, 338, 348, 349, 352, 487, 344, 354, + /* 170 */ 356, 372, 375, 379, 388, 427, 396, 492, 494, 493, + /* 180 */ 495, 496, 497, 498, 500, 503, 504, 502, 505, 506, + /* 190 */ 510, 512, 513, 514, 517, 519, 520, 516, 521, 523, + /* 200 */ 524, 526, 532, 535, 537, 539, 540, 543, 546, 529, + /* 210 */ 538, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 895, 1018, 957, 1028, 944, 954, 1178, 1178, 1178, 895, - /* 10 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 20 */ 895, 895, 895, 1076, 915, 1178, 895, 895, 895, 895, - /* 30 */ 895, 895, 895, 1100, 895, 954, 895, 895, 895, 895, - /* 40 */ 964, 954, 964, 964, 895, 1071, 1002, 1020, 895, 895, - /* 50 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 60 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 70 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 80 */ 895, 895, 895, 895, 895, 1078, 1084, 1081, 895, 895, - /* 90 */ 895, 1086, 895, 895, 895, 1119, 1119, 1069, 895, 895, - /* 100 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 110 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 120 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 130 */ 895, 942, 895, 940, 895, 895, 895, 895, 895, 895, - /* 140 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 150 */ 895, 895, 895, 895, 895, 895, 913, 917, 917, 917, - /* 160 */ 895, 895, 895, 895, 917, 917, 1126, 1130, 1112, 1124, - /* 170 */ 1120, 1107, 1105, 1103, 1111, 1096, 1134, 917, 917, 917, - /* 180 */ 962, 962, 958, 954, 954, 917, 917, 980, 978, 976, - /* 190 */ 968, 974, 970, 972, 966, 945, 895, 917, 917, 952, - /* 200 */ 952, 917, 952, 917, 952, 917, 917, 1002, 1020, 895, - /* 210 */ 1135, 1125, 895, 1177, 1165, 1164, 895, 1173, 1172, 1171, - /* 220 */ 1163, 1162, 1161, 895, 895, 1157, 1160, 1159, 1158, 895, - /* 230 */ 895, 895, 895, 1167, 1166, 895, 895, 895, 895, 895, - /* 240 */ 895, 895, 1093, 895, 895, 895, 1131, 1127, 895, 895, - /* 250 */ 895, 895, 895, 895, 895, 895, 895, 1137, 895, 895, - /* 260 */ 895, 895, 895, 895, 895, 895, 895, 1030, 895, 895, - /* 270 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 280 */ 1068, 895, 895, 895, 895, 895, 1080, 1079, 895, 895, - /* 290 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 300 */ 895, 895, 895, 895, 895, 895, 1121, 895, 1113, 895, - /* 310 */ 895, 1042, 895, 895, 895, 895, 895, 895, 895, 895, - /* 320 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 330 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 340 */ 895, 895, 895, 1196, 1191, 1192, 1189, 895, 895, 895, - /* 350 */ 1188, 1183, 1184, 895, 895, 895, 1181, 895, 895, 895, - /* 360 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, - /* 370 */ 895, 895, 986, 895, 924, 922, 895, 895, + /* 0 */ 932, 1055, 994, 1065, 981, 991, 1228, 1228, 1228, 1228, + /* 10 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 20 */ 932, 932, 932, 932, 1115, 952, 932, 932, 932, 932, + /* 30 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 40 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 50 */ 932, 932, 932, 1139, 932, 991, 932, 932, 932, 932, + /* 60 */ 932, 1001, 991, 932, 932, 1001, 1001, 932, 1110, 1039, + /* 70 */ 1057, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 80 */ 932, 932, 932, 932, 932, 932, 1117, 1123, 1120, 932, + /* 90 */ 932, 932, 1125, 932, 932, 932, 1161, 1161, 1161, 1161, + /* 100 */ 1108, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 110 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 120 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 130 */ 932, 932, 932, 932, 979, 932, 977, 932, 932, 932, + /* 140 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 150 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 950, + /* 160 */ 954, 954, 954, 932, 932, 932, 932, 954, 1170, 1174, + /* 170 */ 1151, 1168, 1162, 1146, 1144, 1142, 1150, 1135, 1178, 954, + /* 180 */ 954, 954, 999, 999, 995, 991, 991, 954, 954, 1017, + /* 190 */ 1015, 1013, 1005, 1011, 1007, 1009, 1003, 982, 932, 954, + /* 200 */ 954, 989, 989, 954, 989, 954, 989, 954, 954, 1039, + /* 210 */ 1057, 1227, 932, 1179, 1169, 1227, 932, 1210, 1209, 932, + /* 220 */ 1218, 1217, 1216, 1208, 1207, 1206, 932, 932, 1202, 1205, + /* 230 */ 1204, 1203, 932, 932, 1181, 932, 932, 1212, 1211, 932, + /* 240 */ 932, 932, 932, 932, 932, 932, 1132, 932, 932, 932, + /* 250 */ 1157, 1175, 1171, 932, 932, 932, 932, 932, 932, 932, + /* 260 */ 932, 1182, 932, 932, 932, 932, 932, 932, 932, 932, + /* 270 */ 1096, 932, 932, 1067, 932, 932, 932, 932, 932, 932, + /* 280 */ 932, 932, 932, 932, 932, 932, 1107, 932, 932, 932, + /* 290 */ 932, 932, 1119, 1118, 932, 932, 932, 932, 932, 932, + /* 300 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 310 */ 932, 932, 1163, 932, 1158, 932, 1152, 932, 932, 1079, + /* 320 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 330 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 340 */ 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + /* 350 */ 932, 932, 932, 932, 932, 932, 1246, 1241, 1242, 1239, + /* 360 */ 932, 932, 932, 932, 932, 1238, 1233, 1234, 932, 932, + /* 370 */ 932, 1231, 932, 932, 932, 932, 932, 932, 932, 932, + /* 380 */ 932, 932, 932, 932, 932, 932, 932, 1023, 932, 961, + /* 390 */ 959, 932, 932, }; /********** End of lemon-generated parsing tables *****************************/ @@ -540,6 +758,7 @@ static const YYCODETYPE yyFallback[] = { 1, /* TIMESTAMP => ID */ 1, /* BINARY => ID */ 1, /* NCHAR => ID */ + 1, /* JSON => ID */ 0, /* OR => nothing */ 0, /* AND => nothing */ 0, /* NOT => nothing */ @@ -551,6 +770,7 @@ static const YYCODETYPE yyFallback[] = { 1, /* LIKE => ID */ 1, /* MATCH => ID */ 1, /* NMATCH => ID */ + 0, /* CONTAINS => nothing */ 1, /* GLOB => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ @@ -569,10 +789,10 @@ static const YYCODETYPE yyFallback[] = { 0, /* STAR => nothing */ 0, /* SLASH => nothing */ 0, /* REM => nothing */ - 0, /* CONCAT => nothing */ 0, /* UMINUS => nothing */ 0, /* UPLUS => nothing */ 0, /* BITNOT => nothing */ + 0, /* ARROW => nothing */ 0, /* SHOW => nothing */ 0, /* DATABASES => nothing */ 0, /* TOPICS => nothing */ @@ -648,12 +868,12 @@ static const YYCODETYPE yyFallback[] = { 0, /* USING => nothing */ 1, /* NULL => ID */ 1, /* NOW => ID */ + 0, /* VARIABLE => nothing */ 0, /* SELECT => nothing */ 0, /* UNION => nothing */ 1, /* ALL => ID */ 0, /* DISTINCT => nothing */ 0, /* FROM => nothing */ - 0, /* VARIABLE => nothing */ 0, /* RANGE => nothing */ 0, /* INTERVAL => nothing */ 0, /* EVERY => nothing */ @@ -779,6 +999,7 @@ typedef struct yyParser yyParser; #ifndef NDEBUG #include +#include static FILE *yyTraceFILE = 0; static char *yyTracePrompt = 0; #endif /* NDEBUG */ @@ -826,275 +1047,277 @@ static const char *const yyTokenName[] = { /* 10 */ "TIMESTAMP", /* 11 */ "BINARY", /* 12 */ "NCHAR", - /* 13 */ "OR", - /* 14 */ "AND", - /* 15 */ "NOT", - /* 16 */ "EQ", - /* 17 */ "NE", - /* 18 */ "ISNULL", - /* 19 */ "NOTNULL", - /* 20 */ "IS", - /* 21 */ "LIKE", - /* 22 */ "MATCH", - /* 23 */ "NMATCH", - /* 24 */ "GLOB", - /* 25 */ "BETWEEN", - /* 26 */ "IN", - /* 27 */ "GT", - /* 28 */ "GE", - /* 29 */ "LT", - /* 30 */ "LE", - /* 31 */ "BITAND", - /* 32 */ "BITOR", - /* 33 */ "LSHIFT", - /* 34 */ "RSHIFT", - /* 35 */ "PLUS", - /* 36 */ "MINUS", - /* 37 */ "DIVIDE", - /* 38 */ "TIMES", - /* 39 */ "STAR", - /* 40 */ "SLASH", - /* 41 */ "REM", - /* 42 */ "CONCAT", - /* 43 */ "UMINUS", - /* 44 */ "UPLUS", - /* 45 */ "BITNOT", - /* 46 */ "SHOW", - /* 47 */ "DATABASES", - /* 48 */ "TOPICS", - /* 49 */ "FUNCTIONS", - /* 50 */ "MNODES", - /* 51 */ "DNODES", - /* 52 */ "ACCOUNTS", - /* 53 */ "USERS", - /* 54 */ "MODULES", - /* 55 */ "QUERIES", - /* 56 */ "CONNECTIONS", - /* 57 */ "STREAMS", - /* 58 */ "VARIABLES", - /* 59 */ "SCORES", - /* 60 */ "GRANTS", - /* 61 */ "VNODES", - /* 62 */ "DOT", - /* 63 */ "CREATE", - /* 64 */ "TABLE", - /* 65 */ "STABLE", - /* 66 */ "DATABASE", - /* 67 */ "TABLES", - /* 68 */ "STABLES", - /* 69 */ "VGROUPS", - /* 70 */ "DROP", - /* 71 */ "TOPIC", - /* 72 */ "FUNCTION", - /* 73 */ "DNODE", - /* 74 */ "USER", - /* 75 */ "ACCOUNT", - /* 76 */ "USE", - /* 77 */ "DESCRIBE", - /* 78 */ "DESC", - /* 79 */ "ALTER", - /* 80 */ "PASS", - /* 81 */ "PRIVILEGE", - /* 82 */ "LOCAL", - /* 83 */ "COMPACT", - /* 84 */ "LP", - /* 85 */ "RP", - /* 86 */ "IF", - /* 87 */ "EXISTS", - /* 88 */ "AS", - /* 89 */ "OUTPUTTYPE", - /* 90 */ "AGGREGATE", - /* 91 */ "BUFSIZE", - /* 92 */ "PPS", - /* 93 */ "TSERIES", - /* 94 */ "DBS", - /* 95 */ "STORAGE", - /* 96 */ "QTIME", - /* 97 */ "CONNS", - /* 98 */ "STATE", - /* 99 */ "COMMA", - /* 100 */ "KEEP", - /* 101 */ "CACHE", - /* 102 */ "REPLICA", - /* 103 */ "QUORUM", - /* 104 */ "DAYS", - /* 105 */ "MINROWS", - /* 106 */ "MAXROWS", - /* 107 */ "BLOCKS", - /* 108 */ "CTIME", - /* 109 */ "WAL", - /* 110 */ "FSYNC", - /* 111 */ "COMP", - /* 112 */ "PRECISION", - /* 113 */ "UPDATE", - /* 114 */ "CACHELAST", - /* 115 */ "PARTITIONS", - /* 116 */ "UNSIGNED", - /* 117 */ "TAGS", - /* 118 */ "USING", - /* 119 */ "NULL", - /* 120 */ "NOW", - /* 121 */ "SELECT", - /* 122 */ "UNION", - /* 123 */ "ALL", - /* 124 */ "DISTINCT", - /* 125 */ "FROM", - /* 126 */ "VARIABLE", - /* 127 */ "RANGE", - /* 128 */ "INTERVAL", - /* 129 */ "EVERY", - /* 130 */ "SESSION", - /* 131 */ "STATE_WINDOW", - /* 132 */ "FILL", - /* 133 */ "SLIDING", - /* 134 */ "ORDER", - /* 135 */ "BY", - /* 136 */ "ASC", - /* 137 */ "GROUP", - /* 138 */ "HAVING", - /* 139 */ "LIMIT", - /* 140 */ "OFFSET", - /* 141 */ "SLIMIT", - /* 142 */ "SOFFSET", - /* 143 */ "WHERE", - /* 144 */ "RESET", - /* 145 */ "QUERY", - /* 146 */ "SYNCDB", - /* 147 */ "ADD", - /* 148 */ "COLUMN", - /* 149 */ "MODIFY", - /* 150 */ "TAG", - /* 151 */ "CHANGE", - /* 152 */ "SET", - /* 153 */ "KILL", - /* 154 */ "CONNECTION", - /* 155 */ "STREAM", - /* 156 */ "COLON", - /* 157 */ "ABORT", - /* 158 */ "AFTER", - /* 159 */ "ATTACH", - /* 160 */ "BEFORE", - /* 161 */ "BEGIN", - /* 162 */ "CASCADE", - /* 163 */ "CLUSTER", - /* 164 */ "CONFLICT", - /* 165 */ "COPY", - /* 166 */ "DEFERRED", - /* 167 */ "DELIMITERS", - /* 168 */ "DETACH", - /* 169 */ "EACH", - /* 170 */ "END", - /* 171 */ "EXPLAIN", - /* 172 */ "FAIL", - /* 173 */ "FOR", - /* 174 */ "IGNORE", - /* 175 */ "IMMEDIATE", - /* 176 */ "INITIALLY", - /* 177 */ "INSTEAD", - /* 178 */ "KEY", - /* 179 */ "OF", - /* 180 */ "RAISE", - /* 181 */ "REPLACE", - /* 182 */ "RESTRICT", - /* 183 */ "ROW", - /* 184 */ "STATEMENT", - /* 185 */ "TRIGGER", - /* 186 */ "VIEW", - /* 187 */ "IPTOKEN", - /* 188 */ "SEMI", - /* 189 */ "NONE", - /* 190 */ "PREV", - /* 191 */ "LINEAR", - /* 192 */ "IMPORT", - /* 193 */ "TBNAME", - /* 194 */ "JOIN", - /* 195 */ "INSERT", - /* 196 */ "INTO", - /* 197 */ "VALUES", - /* 198 */ "FILE", - /* 199 */ "program", - /* 200 */ "cmd", - /* 201 */ "ids", - /* 202 */ "dbPrefix", - /* 203 */ "cpxName", - /* 204 */ "ifexists", - /* 205 */ "alter_db_optr", - /* 206 */ "alter_topic_optr", - /* 207 */ "acct_optr", - /* 208 */ "exprlist", - /* 209 */ "ifnotexists", - /* 210 */ "db_optr", - /* 211 */ "topic_optr", - /* 212 */ "typename", - /* 213 */ "bufsize", - /* 214 */ "pps", - /* 215 */ "tseries", - /* 216 */ "dbs", - /* 217 */ "streams", - /* 218 */ "storage", - /* 219 */ "qtime", - /* 220 */ "users", - /* 221 */ "conns", - /* 222 */ "state", - /* 223 */ "intitemlist", - /* 224 */ "intitem", - /* 225 */ "keep", - /* 226 */ "cache", - /* 227 */ "replica", - /* 228 */ "quorum", - /* 229 */ "days", - /* 230 */ "minrows", - /* 231 */ "maxrows", - /* 232 */ "blocks", - /* 233 */ "ctime", - /* 234 */ "wal", - /* 235 */ "fsync", - /* 236 */ "comp", - /* 237 */ "prec", - /* 238 */ "update", - /* 239 */ "cachelast", - /* 240 */ "partitions", - /* 241 */ "signed", - /* 242 */ "create_table_args", - /* 243 */ "create_stable_args", - /* 244 */ "create_table_list", - /* 245 */ "create_from_stable", - /* 246 */ "columnlist", - /* 247 */ "tagitemlist", - /* 248 */ "tagNamelist", - /* 249 */ "select", - /* 250 */ "column", - /* 251 */ "tagitem", - /* 252 */ "selcollist", - /* 253 */ "from", - /* 254 */ "where_opt", - /* 255 */ "range_option", - /* 256 */ "interval_option", - /* 257 */ "sliding_opt", - /* 258 */ "session_option", - /* 259 */ "windowstate_option", - /* 260 */ "fill_opt", - /* 261 */ "groupby_opt", - /* 262 */ "having_opt", - /* 263 */ "orderby_opt", - /* 264 */ "slimit_opt", - /* 265 */ "limit_opt", - /* 266 */ "union", - /* 267 */ "sclp", - /* 268 */ "distinct", - /* 269 */ "expr", - /* 270 */ "as", - /* 271 */ "tablelist", - /* 272 */ "sub", - /* 273 */ "tmvar", - /* 274 */ "timestamp", - /* 275 */ "intervalKey", - /* 276 */ "sortlist", - /* 277 */ "sortitem", - /* 278 */ "item", - /* 279 */ "sortorder", - /* 280 */ "grouplist", - /* 281 */ "expritem", + /* 13 */ "JSON", + /* 14 */ "OR", + /* 15 */ "AND", + /* 16 */ "NOT", + /* 17 */ "EQ", + /* 18 */ "NE", + /* 19 */ "ISNULL", + /* 20 */ "NOTNULL", + /* 21 */ "IS", + /* 22 */ "LIKE", + /* 23 */ "MATCH", + /* 24 */ "NMATCH", + /* 25 */ "CONTAINS", + /* 26 */ "GLOB", + /* 27 */ "BETWEEN", + /* 28 */ "IN", + /* 29 */ "GT", + /* 30 */ "GE", + /* 31 */ "LT", + /* 32 */ "LE", + /* 33 */ "BITAND", + /* 34 */ "BITOR", + /* 35 */ "LSHIFT", + /* 36 */ "RSHIFT", + /* 37 */ "PLUS", + /* 38 */ "MINUS", + /* 39 */ "DIVIDE", + /* 40 */ "TIMES", + /* 41 */ "STAR", + /* 42 */ "SLASH", + /* 43 */ "REM", + /* 44 */ "UMINUS", + /* 45 */ "UPLUS", + /* 46 */ "BITNOT", + /* 47 */ "ARROW", + /* 48 */ "SHOW", + /* 49 */ "DATABASES", + /* 50 */ "TOPICS", + /* 51 */ "FUNCTIONS", + /* 52 */ "MNODES", + /* 53 */ "DNODES", + /* 54 */ "ACCOUNTS", + /* 55 */ "USERS", + /* 56 */ "MODULES", + /* 57 */ "QUERIES", + /* 58 */ "CONNECTIONS", + /* 59 */ "STREAMS", + /* 60 */ "VARIABLES", + /* 61 */ "SCORES", + /* 62 */ "GRANTS", + /* 63 */ "VNODES", + /* 64 */ "DOT", + /* 65 */ "CREATE", + /* 66 */ "TABLE", + /* 67 */ "STABLE", + /* 68 */ "DATABASE", + /* 69 */ "TABLES", + /* 70 */ "STABLES", + /* 71 */ "VGROUPS", + /* 72 */ "DROP", + /* 73 */ "TOPIC", + /* 74 */ "FUNCTION", + /* 75 */ "DNODE", + /* 76 */ "USER", + /* 77 */ "ACCOUNT", + /* 78 */ "USE", + /* 79 */ "DESCRIBE", + /* 80 */ "DESC", + /* 81 */ "ALTER", + /* 82 */ "PASS", + /* 83 */ "PRIVILEGE", + /* 84 */ "LOCAL", + /* 85 */ "COMPACT", + /* 86 */ "LP", + /* 87 */ "RP", + /* 88 */ "IF", + /* 89 */ "EXISTS", + /* 90 */ "AS", + /* 91 */ "OUTPUTTYPE", + /* 92 */ "AGGREGATE", + /* 93 */ "BUFSIZE", + /* 94 */ "PPS", + /* 95 */ "TSERIES", + /* 96 */ "DBS", + /* 97 */ "STORAGE", + /* 98 */ "QTIME", + /* 99 */ "CONNS", + /* 100 */ "STATE", + /* 101 */ "COMMA", + /* 102 */ "KEEP", + /* 103 */ "CACHE", + /* 104 */ "REPLICA", + /* 105 */ "QUORUM", + /* 106 */ "DAYS", + /* 107 */ "MINROWS", + /* 108 */ "MAXROWS", + /* 109 */ "BLOCKS", + /* 110 */ "CTIME", + /* 111 */ "WAL", + /* 112 */ "FSYNC", + /* 113 */ "COMP", + /* 114 */ "PRECISION", + /* 115 */ "UPDATE", + /* 116 */ "CACHELAST", + /* 117 */ "PARTITIONS", + /* 118 */ "UNSIGNED", + /* 119 */ "TAGS", + /* 120 */ "USING", + /* 121 */ "NULL", + /* 122 */ "NOW", + /* 123 */ "VARIABLE", + /* 124 */ "SELECT", + /* 125 */ "UNION", + /* 126 */ "ALL", + /* 127 */ "DISTINCT", + /* 128 */ "FROM", + /* 129 */ "RANGE", + /* 130 */ "INTERVAL", + /* 131 */ "EVERY", + /* 132 */ "SESSION", + /* 133 */ "STATE_WINDOW", + /* 134 */ "FILL", + /* 135 */ "SLIDING", + /* 136 */ "ORDER", + /* 137 */ "BY", + /* 138 */ "ASC", + /* 139 */ "GROUP", + /* 140 */ "HAVING", + /* 141 */ "LIMIT", + /* 142 */ "OFFSET", + /* 143 */ "SLIMIT", + /* 144 */ "SOFFSET", + /* 145 */ "WHERE", + /* 146 */ "RESET", + /* 147 */ "QUERY", + /* 148 */ "SYNCDB", + /* 149 */ "ADD", + /* 150 */ "COLUMN", + /* 151 */ "MODIFY", + /* 152 */ "TAG", + /* 153 */ "CHANGE", + /* 154 */ "SET", + /* 155 */ "KILL", + /* 156 */ "CONNECTION", + /* 157 */ "STREAM", + /* 158 */ "COLON", + /* 159 */ "ABORT", + /* 160 */ "AFTER", + /* 161 */ "ATTACH", + /* 162 */ "BEFORE", + /* 163 */ "BEGIN", + /* 164 */ "CASCADE", + /* 165 */ "CLUSTER", + /* 166 */ "CONFLICT", + /* 167 */ "COPY", + /* 168 */ "DEFERRED", + /* 169 */ "DELIMITERS", + /* 170 */ "DETACH", + /* 171 */ "EACH", + /* 172 */ "END", + /* 173 */ "EXPLAIN", + /* 174 */ "FAIL", + /* 175 */ "FOR", + /* 176 */ "IGNORE", + /* 177 */ "IMMEDIATE", + /* 178 */ "INITIALLY", + /* 179 */ "INSTEAD", + /* 180 */ "KEY", + /* 181 */ "OF", + /* 182 */ "RAISE", + /* 183 */ "REPLACE", + /* 184 */ "RESTRICT", + /* 185 */ "ROW", + /* 186 */ "STATEMENT", + /* 187 */ "TRIGGER", + /* 188 */ "VIEW", + /* 189 */ "IPTOKEN", + /* 190 */ "SEMI", + /* 191 */ "NONE", + /* 192 */ "PREV", + /* 193 */ "LINEAR", + /* 194 */ "IMPORT", + /* 195 */ "TBNAME", + /* 196 */ "JOIN", + /* 197 */ "INSERT", + /* 198 */ "INTO", + /* 199 */ "VALUES", + /* 200 */ "FILE", + /* 201 */ "program", + /* 202 */ "cmd", + /* 203 */ "ids", + /* 204 */ "dbPrefix", + /* 205 */ "cpxName", + /* 206 */ "ifexists", + /* 207 */ "alter_db_optr", + /* 208 */ "alter_topic_optr", + /* 209 */ "acct_optr", + /* 210 */ "exprlist", + /* 211 */ "ifnotexists", + /* 212 */ "db_optr", + /* 213 */ "topic_optr", + /* 214 */ "typename", + /* 215 */ "bufsize", + /* 216 */ "pps", + /* 217 */ "tseries", + /* 218 */ "dbs", + /* 219 */ "streams", + /* 220 */ "storage", + /* 221 */ "qtime", + /* 222 */ "users", + /* 223 */ "conns", + /* 224 */ "state", + /* 225 */ "intitemlist", + /* 226 */ "intitem", + /* 227 */ "keep", + /* 228 */ "cache", + /* 229 */ "replica", + /* 230 */ "quorum", + /* 231 */ "days", + /* 232 */ "minrows", + /* 233 */ "maxrows", + /* 234 */ "blocks", + /* 235 */ "ctime", + /* 236 */ "wal", + /* 237 */ "fsync", + /* 238 */ "comp", + /* 239 */ "prec", + /* 240 */ "update", + /* 241 */ "cachelast", + /* 242 */ "partitions", + /* 243 */ "signed", + /* 244 */ "create_table_args", + /* 245 */ "create_stable_args", + /* 246 */ "create_table_list", + /* 247 */ "create_from_stable", + /* 248 */ "columnlist", + /* 249 */ "tagitemlist", + /* 250 */ "tagNamelist", + /* 251 */ "select", + /* 252 */ "column", + /* 253 */ "tagitem", + /* 254 */ "selcollist", + /* 255 */ "from", + /* 256 */ "where_opt", + /* 257 */ "range_option", + /* 258 */ "interval_option", + /* 259 */ "sliding_opt", + /* 260 */ "session_option", + /* 261 */ "windowstate_option", + /* 262 */ "fill_opt", + /* 263 */ "groupby_opt", + /* 264 */ "having_opt", + /* 265 */ "orderby_opt", + /* 266 */ "slimit_opt", + /* 267 */ "limit_opt", + /* 268 */ "union", + /* 269 */ "sclp", + /* 270 */ "distinct", + /* 271 */ "expr", + /* 272 */ "as", + /* 273 */ "tablelist", + /* 274 */ "sub", + /* 275 */ "tmvar", + /* 276 */ "timestamp", + /* 277 */ "intervalKey", + /* 278 */ "sortlist", + /* 279 */ "item", + /* 280 */ "sortorder", + /* 281 */ "arrow", + /* 282 */ "grouplist", + /* 283 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1264,146 +1487,159 @@ static const char *const yyRuleName[] = { /* 159 */ "tagitem ::= BOOL", /* 160 */ "tagitem ::= NULL", /* 161 */ "tagitem ::= NOW", - /* 162 */ "tagitem ::= MINUS INTEGER", - /* 163 */ "tagitem ::= MINUS FLOAT", - /* 164 */ "tagitem ::= PLUS INTEGER", - /* 165 */ "tagitem ::= PLUS FLOAT", - /* 166 */ "select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", - /* 167 */ "select ::= LP select RP", - /* 168 */ "union ::= select", - /* 169 */ "union ::= union UNION ALL select", - /* 170 */ "cmd ::= union", - /* 171 */ "select ::= SELECT selcollist", - /* 172 */ "sclp ::= selcollist COMMA", - /* 173 */ "sclp ::=", - /* 174 */ "selcollist ::= sclp distinct expr as", - /* 175 */ "selcollist ::= sclp STAR", - /* 176 */ "as ::= AS ids", - /* 177 */ "as ::= ids", - /* 178 */ "as ::=", - /* 179 */ "distinct ::= DISTINCT", - /* 180 */ "distinct ::=", - /* 181 */ "from ::= FROM tablelist", - /* 182 */ "from ::= FROM sub", - /* 183 */ "sub ::= LP union RP", - /* 184 */ "sub ::= LP union RP ids", - /* 185 */ "sub ::= sub COMMA LP union RP ids", - /* 186 */ "tablelist ::= ids cpxName", - /* 187 */ "tablelist ::= ids cpxName ids", - /* 188 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 189 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 190 */ "tmvar ::= VARIABLE", - /* 191 */ "timestamp ::= INTEGER", - /* 192 */ "timestamp ::= MINUS INTEGER", - /* 193 */ "timestamp ::= PLUS INTEGER", - /* 194 */ "timestamp ::= STRING", - /* 195 */ "timestamp ::= NOW", - /* 196 */ "timestamp ::= NOW PLUS VARIABLE", - /* 197 */ "timestamp ::= NOW MINUS VARIABLE", - /* 198 */ "range_option ::=", - /* 199 */ "range_option ::= RANGE LP timestamp COMMA timestamp RP", - /* 200 */ "interval_option ::= intervalKey LP tmvar RP", - /* 201 */ "interval_option ::= intervalKey LP tmvar COMMA tmvar RP", - /* 202 */ "interval_option ::=", - /* 203 */ "intervalKey ::= INTERVAL", - /* 204 */ "intervalKey ::= EVERY", - /* 205 */ "session_option ::=", - /* 206 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", - /* 207 */ "windowstate_option ::=", - /* 208 */ "windowstate_option ::= STATE_WINDOW LP ids RP", - /* 209 */ "fill_opt ::=", - /* 210 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 211 */ "fill_opt ::= FILL LP ID RP", - /* 212 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 213 */ "sliding_opt ::=", - /* 214 */ "orderby_opt ::=", - /* 215 */ "orderby_opt ::= ORDER BY sortlist", - /* 216 */ "sortlist ::= sortlist COMMA item sortorder", - /* 217 */ "sortlist ::= item sortorder", - /* 218 */ "item ::= ids cpxName", - /* 219 */ "sortorder ::= ASC", - /* 220 */ "sortorder ::= DESC", - /* 221 */ "sortorder ::=", - /* 222 */ "groupby_opt ::=", - /* 223 */ "groupby_opt ::= GROUP BY grouplist", - /* 224 */ "grouplist ::= grouplist COMMA item", - /* 225 */ "grouplist ::= item", - /* 226 */ "having_opt ::=", - /* 227 */ "having_opt ::= HAVING expr", - /* 228 */ "limit_opt ::=", - /* 229 */ "limit_opt ::= LIMIT signed", - /* 230 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 231 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 232 */ "slimit_opt ::=", - /* 233 */ "slimit_opt ::= SLIMIT signed", - /* 234 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 235 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 236 */ "where_opt ::=", - /* 237 */ "where_opt ::= WHERE expr", - /* 238 */ "expr ::= LP expr RP", - /* 239 */ "expr ::= ID", - /* 240 */ "expr ::= ID DOT ID", - /* 241 */ "expr ::= ID DOT STAR", - /* 242 */ "expr ::= INTEGER", - /* 243 */ "expr ::= MINUS INTEGER", - /* 244 */ "expr ::= PLUS INTEGER", - /* 245 */ "expr ::= FLOAT", - /* 246 */ "expr ::= MINUS FLOAT", - /* 247 */ "expr ::= PLUS FLOAT", - /* 248 */ "expr ::= STRING", - /* 249 */ "expr ::= NOW", - /* 250 */ "expr ::= VARIABLE", - /* 251 */ "expr ::= PLUS VARIABLE", - /* 252 */ "expr ::= MINUS VARIABLE", - /* 253 */ "expr ::= BOOL", - /* 254 */ "expr ::= NULL", - /* 255 */ "expr ::= ID LP exprlist RP", - /* 256 */ "expr ::= ID LP STAR RP", - /* 257 */ "expr ::= expr IS NULL", - /* 258 */ "expr ::= expr IS NOT NULL", - /* 259 */ "expr ::= expr LT expr", - /* 260 */ "expr ::= expr GT expr", - /* 261 */ "expr ::= expr LE expr", - /* 262 */ "expr ::= expr GE expr", - /* 263 */ "expr ::= expr NE expr", - /* 264 */ "expr ::= expr EQ expr", - /* 265 */ "expr ::= expr BETWEEN expr AND expr", - /* 266 */ "expr ::= expr AND expr", - /* 267 */ "expr ::= expr OR expr", - /* 268 */ "expr ::= expr PLUS expr", - /* 269 */ "expr ::= expr MINUS expr", - /* 270 */ "expr ::= expr STAR expr", - /* 271 */ "expr ::= expr SLASH expr", - /* 272 */ "expr ::= expr REM expr", - /* 273 */ "expr ::= expr LIKE expr", - /* 274 */ "expr ::= expr MATCH expr", - /* 275 */ "expr ::= expr NMATCH expr", - /* 276 */ "expr ::= expr IN LP exprlist RP", - /* 277 */ "exprlist ::= exprlist COMMA expritem", - /* 278 */ "exprlist ::= expritem", - /* 279 */ "expritem ::= expr", - /* 280 */ "expritem ::=", - /* 281 */ "cmd ::= RESET QUERY CACHE", - /* 282 */ "cmd ::= SYNCDB ids REPLICA", - /* 283 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 284 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 285 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 286 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 287 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 288 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 289 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 290 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 291 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 292 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 293 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 294 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 295 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 296 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 297 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 298 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 299 */ "cmd ::= KILL CONNECTION INTEGER", - /* 300 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 301 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 162 */ "tagitem ::= NOW PLUS VARIABLE", + /* 163 */ "tagitem ::= NOW MINUS VARIABLE", + /* 164 */ "tagitem ::= MINUS INTEGER", + /* 165 */ "tagitem ::= MINUS FLOAT", + /* 166 */ "tagitem ::= PLUS INTEGER", + /* 167 */ "tagitem ::= PLUS FLOAT", + /* 168 */ "select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", + /* 169 */ "select ::= LP select RP", + /* 170 */ "union ::= select", + /* 171 */ "union ::= union UNION ALL select", + /* 172 */ "cmd ::= union", + /* 173 */ "select ::= SELECT selcollist", + /* 174 */ "sclp ::= selcollist COMMA", + /* 175 */ "sclp ::=", + /* 176 */ "selcollist ::= sclp distinct expr as", + /* 177 */ "selcollist ::= sclp STAR", + /* 178 */ "as ::= AS ids", + /* 179 */ "as ::= ids", + /* 180 */ "as ::=", + /* 181 */ "distinct ::= DISTINCT", + /* 182 */ "distinct ::=", + /* 183 */ "from ::= FROM tablelist", + /* 184 */ "from ::= FROM sub", + /* 185 */ "sub ::= LP union RP", + /* 186 */ "sub ::= LP union RP ids", + /* 187 */ "sub ::= sub COMMA LP union RP ids", + /* 188 */ "tablelist ::= ids cpxName", + /* 189 */ "tablelist ::= ids cpxName ids", + /* 190 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 191 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 192 */ "tmvar ::= VARIABLE", + /* 193 */ "timestamp ::= INTEGER", + /* 194 */ "timestamp ::= MINUS INTEGER", + /* 195 */ "timestamp ::= PLUS INTEGER", + /* 196 */ "timestamp ::= STRING", + /* 197 */ "timestamp ::= NOW", + /* 198 */ "timestamp ::= NOW PLUS VARIABLE", + /* 199 */ "timestamp ::= NOW MINUS VARIABLE", + /* 200 */ "range_option ::=", + /* 201 */ "range_option ::= RANGE LP timestamp COMMA timestamp RP", + /* 202 */ "interval_option ::= intervalKey LP tmvar RP", + /* 203 */ "interval_option ::= intervalKey LP tmvar COMMA tmvar RP", + /* 204 */ "interval_option ::=", + /* 205 */ "intervalKey ::= INTERVAL", + /* 206 */ "intervalKey ::= EVERY", + /* 207 */ "session_option ::=", + /* 208 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", + /* 209 */ "windowstate_option ::=", + /* 210 */ "windowstate_option ::= STATE_WINDOW LP ids RP", + /* 211 */ "fill_opt ::=", + /* 212 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 213 */ "fill_opt ::= FILL LP ID RP", + /* 214 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 215 */ "sliding_opt ::=", + /* 216 */ "orderby_opt ::=", + /* 217 */ "orderby_opt ::= ORDER BY sortlist", + /* 218 */ "sortlist ::= sortlist COMMA item sortorder", + /* 219 */ "sortlist ::= sortlist COMMA arrow sortorder", + /* 220 */ "sortlist ::= item sortorder", + /* 221 */ "sortlist ::= arrow sortorder", + /* 222 */ "item ::= ID", + /* 223 */ "item ::= ID DOT ID", + /* 224 */ "sortorder ::= ASC", + /* 225 */ "sortorder ::= DESC", + /* 226 */ "sortorder ::=", + /* 227 */ "groupby_opt ::=", + /* 228 */ "groupby_opt ::= GROUP BY grouplist", + /* 229 */ "grouplist ::= grouplist COMMA item", + /* 230 */ "grouplist ::= grouplist COMMA arrow", + /* 231 */ "grouplist ::= item", + /* 232 */ "grouplist ::= arrow", + /* 233 */ "having_opt ::=", + /* 234 */ "having_opt ::= HAVING expr", + /* 235 */ "limit_opt ::=", + /* 236 */ "limit_opt ::= LIMIT signed", + /* 237 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 238 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 239 */ "slimit_opt ::=", + /* 240 */ "slimit_opt ::= SLIMIT signed", + /* 241 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 242 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 243 */ "where_opt ::=", + /* 244 */ "where_opt ::= WHERE expr", + /* 245 */ "expr ::= LP expr RP", + /* 246 */ "expr ::= ID", + /* 247 */ "expr ::= ID DOT ID", + /* 248 */ "expr ::= ID DOT STAR", + /* 249 */ "expr ::= INTEGER", + /* 250 */ "expr ::= MINUS INTEGER", + /* 251 */ "expr ::= PLUS INTEGER", + /* 252 */ "expr ::= FLOAT", + /* 253 */ "expr ::= MINUS FLOAT", + /* 254 */ "expr ::= PLUS FLOAT", + /* 255 */ "expr ::= STRING", + /* 256 */ "expr ::= NOW", + /* 257 */ "expr ::= VARIABLE", + /* 258 */ "expr ::= PLUS VARIABLE", + /* 259 */ "expr ::= MINUS VARIABLE", + /* 260 */ "expr ::= BOOL", + /* 261 */ "expr ::= NULL", + /* 262 */ "expr ::= ID LP exprlist RP", + /* 263 */ "expr ::= ID LP STAR RP", + /* 264 */ "expr ::= ID LP expr AS typename RP", + /* 265 */ "expr ::= expr IS NULL", + /* 266 */ "expr ::= expr IS NOT NULL", + /* 267 */ "expr ::= expr LT expr", + /* 268 */ "expr ::= expr GT expr", + /* 269 */ "expr ::= expr LE expr", + /* 270 */ "expr ::= expr GE expr", + /* 271 */ "expr ::= expr NE expr", + /* 272 */ "expr ::= expr EQ expr", + /* 273 */ "expr ::= expr BETWEEN expr AND expr", + /* 274 */ "expr ::= expr AND expr", + /* 275 */ "expr ::= expr OR expr", + /* 276 */ "expr ::= expr PLUS expr", + /* 277 */ "expr ::= expr MINUS expr", + /* 278 */ "expr ::= expr STAR expr", + /* 279 */ "expr ::= expr SLASH expr", + /* 280 */ "expr ::= expr REM expr", + /* 281 */ "expr ::= expr LIKE expr", + /* 282 */ "expr ::= expr MATCH expr", + /* 283 */ "expr ::= expr NMATCH expr", + /* 284 */ "expr ::= ID CONTAINS STRING", + /* 285 */ "expr ::= ID DOT ID CONTAINS STRING", + /* 286 */ "arrow ::= ID ARROW STRING", + /* 287 */ "arrow ::= ID DOT ID ARROW STRING", + /* 288 */ "expr ::= arrow", + /* 289 */ "expr ::= expr IN LP exprlist RP", + /* 290 */ "exprlist ::= exprlist COMMA expritem", + /* 291 */ "exprlist ::= expritem", + /* 292 */ "expritem ::= expr", + /* 293 */ "expritem ::=", + /* 294 */ "cmd ::= RESET QUERY CACHE", + /* 295 */ "cmd ::= SYNCDB ids REPLICA", + /* 296 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 297 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 298 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 299 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 300 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 301 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 302 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 303 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 304 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 305 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 306 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 307 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 308 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 309 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 310 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 311 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 312 */ "cmd ::= KILL CONNECTION INTEGER", + /* 313 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 314 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1529,61 +1765,57 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 208: /* exprlist */ - case 252: /* selcollist */ - case 267: /* sclp */ + case 210: /* exprlist */ + case 254: /* selcollist */ + case 269: /* sclp */ { -tSqlExprListDestroy((yypminor->yy345)); -} - break; - case 223: /* intitemlist */ - case 225: /* keep */ - case 246: /* columnlist */ - case 247: /* tagitemlist */ - case 248: /* tagNamelist */ - case 260: /* fill_opt */ - case 261: /* groupby_opt */ - case 263: /* orderby_opt */ - case 276: /* sortlist */ - case 280: /* grouplist */ -{ -taosArrayDestroy((yypminor->yy345)); +tSqlExprListDestroy((yypminor->yy189)); } break; - case 244: /* create_table_list */ + case 225: /* intitemlist */ + case 227: /* keep */ + case 248: /* columnlist */ + case 249: /* tagitemlist */ + case 250: /* tagNamelist */ + case 262: /* fill_opt */ + case 263: /* groupby_opt */ + case 265: /* orderby_opt */ + case 278: /* sortlist */ + case 282: /* grouplist */ { -destroyCreateTableSql((yypminor->yy170)); +taosArrayDestroy((yypminor->yy189)); } break; - case 249: /* select */ + case 246: /* create_table_list */ { -destroySqlNode((yypminor->yy68)); +destroyCreateTableSql((yypminor->yy6)); } break; - case 253: /* from */ - case 271: /* tablelist */ - case 272: /* sub */ + case 251: /* select */ { -destroyRelationInfo((yypminor->yy484)); +destroySqlNode((yypminor->yy16)); } break; - case 254: /* where_opt */ - case 262: /* having_opt */ - case 269: /* expr */ - case 274: /* timestamp */ - case 281: /* expritem */ + case 255: /* from */ + case 273: /* tablelist */ + case 274: /* sub */ { -tSqlExprDestroy((yypminor->yy418)); +destroyRelationInfo((yypminor->yy36)); } break; - case 266: /* union */ + case 256: /* where_opt */ + case 264: /* having_opt */ + case 271: /* expr */ + case 276: /* timestamp */ + case 281: /* arrow */ + case 283: /* expritem */ { -destroyAllSqlNode((yypminor->yy345)); +tSqlExprDestroy((yypminor->yy18)); } break; - case 277: /* sortitem */ + case 268: /* union */ { -tVariantDestroy(&(yypminor->yy2)); +destroyAllSqlNode((yypminor->yy189)); } break; /********* End destructor definitions *****************************************/ @@ -1750,7 +1982,7 @@ static YYACTIONTYPE yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ - assert( i>=0 && i=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) ); return yy_action[i]; } }while(1); @@ -1872,308 +2104,321 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 199, /* (0) program ::= cmd */ - 200, /* (1) cmd ::= SHOW DATABASES */ - 200, /* (2) cmd ::= SHOW TOPICS */ - 200, /* (3) cmd ::= SHOW FUNCTIONS */ - 200, /* (4) cmd ::= SHOW MNODES */ - 200, /* (5) cmd ::= SHOW DNODES */ - 200, /* (6) cmd ::= SHOW ACCOUNTS */ - 200, /* (7) cmd ::= SHOW USERS */ - 200, /* (8) cmd ::= SHOW MODULES */ - 200, /* (9) cmd ::= SHOW QUERIES */ - 200, /* (10) cmd ::= SHOW CONNECTIONS */ - 200, /* (11) cmd ::= SHOW STREAMS */ - 200, /* (12) cmd ::= SHOW VARIABLES */ - 200, /* (13) cmd ::= SHOW SCORES */ - 200, /* (14) cmd ::= SHOW GRANTS */ - 200, /* (15) cmd ::= SHOW VNODES */ - 200, /* (16) cmd ::= SHOW VNODES ids */ - 202, /* (17) dbPrefix ::= */ - 202, /* (18) dbPrefix ::= ids DOT */ - 203, /* (19) cpxName ::= */ - 203, /* (20) cpxName ::= DOT ids */ - 200, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ - 200, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ - 200, /* (23) cmd ::= SHOW CREATE DATABASE ids */ - 200, /* (24) cmd ::= SHOW dbPrefix TABLES */ - 200, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - 200, /* (26) cmd ::= SHOW dbPrefix STABLES */ - 200, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - 200, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ - 200, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ - 200, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ - 200, /* (31) cmd ::= DROP DATABASE ifexists ids */ - 200, /* (32) cmd ::= DROP TOPIC ifexists ids */ - 200, /* (33) cmd ::= DROP FUNCTION ids */ - 200, /* (34) cmd ::= DROP DNODE ids */ - 200, /* (35) cmd ::= DROP USER ids */ - 200, /* (36) cmd ::= DROP ACCOUNT ids */ - 200, /* (37) cmd ::= USE ids */ - 200, /* (38) cmd ::= DESCRIBE ids cpxName */ - 200, /* (39) cmd ::= DESC ids cpxName */ - 200, /* (40) cmd ::= ALTER USER ids PASS ids */ - 200, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */ - 200, /* (42) cmd ::= ALTER DNODE ids ids */ - 200, /* (43) cmd ::= ALTER DNODE ids ids ids */ - 200, /* (44) cmd ::= ALTER LOCAL ids */ - 200, /* (45) cmd ::= ALTER LOCAL ids ids */ - 200, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */ - 200, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */ - 200, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */ - 200, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - 200, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */ - 201, /* (51) ids ::= ID */ - 201, /* (52) ids ::= STRING */ - 204, /* (53) ifexists ::= IF EXISTS */ - 204, /* (54) ifexists ::= */ - 209, /* (55) ifnotexists ::= IF NOT EXISTS */ - 209, /* (56) ifnotexists ::= */ - 200, /* (57) cmd ::= CREATE DNODE ids */ - 200, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - 200, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - 200, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - 200, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 200, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 200, /* (63) cmd ::= CREATE USER ids PASS ids */ - 213, /* (64) bufsize ::= */ - 213, /* (65) bufsize ::= BUFSIZE INTEGER */ - 214, /* (66) pps ::= */ - 214, /* (67) pps ::= PPS INTEGER */ - 215, /* (68) tseries ::= */ - 215, /* (69) tseries ::= TSERIES INTEGER */ - 216, /* (70) dbs ::= */ - 216, /* (71) dbs ::= DBS INTEGER */ - 217, /* (72) streams ::= */ - 217, /* (73) streams ::= STREAMS INTEGER */ - 218, /* (74) storage ::= */ - 218, /* (75) storage ::= STORAGE INTEGER */ - 219, /* (76) qtime ::= */ - 219, /* (77) qtime ::= QTIME INTEGER */ - 220, /* (78) users ::= */ - 220, /* (79) users ::= USERS INTEGER */ - 221, /* (80) conns ::= */ - 221, /* (81) conns ::= CONNS INTEGER */ - 222, /* (82) state ::= */ - 222, /* (83) state ::= STATE ids */ - 207, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - 223, /* (85) intitemlist ::= intitemlist COMMA intitem */ - 223, /* (86) intitemlist ::= intitem */ - 224, /* (87) intitem ::= INTEGER */ - 225, /* (88) keep ::= KEEP intitemlist */ - 226, /* (89) cache ::= CACHE INTEGER */ - 227, /* (90) replica ::= REPLICA INTEGER */ - 228, /* (91) quorum ::= QUORUM INTEGER */ - 229, /* (92) days ::= DAYS INTEGER */ - 230, /* (93) minrows ::= MINROWS INTEGER */ - 231, /* (94) maxrows ::= MAXROWS INTEGER */ - 232, /* (95) blocks ::= BLOCKS INTEGER */ - 233, /* (96) ctime ::= CTIME INTEGER */ - 234, /* (97) wal ::= WAL INTEGER */ - 235, /* (98) fsync ::= FSYNC INTEGER */ - 236, /* (99) comp ::= COMP INTEGER */ - 237, /* (100) prec ::= PRECISION STRING */ - 238, /* (101) update ::= UPDATE INTEGER */ - 239, /* (102) cachelast ::= CACHELAST INTEGER */ - 240, /* (103) partitions ::= PARTITIONS INTEGER */ - 210, /* (104) db_optr ::= */ - 210, /* (105) db_optr ::= db_optr cache */ - 210, /* (106) db_optr ::= db_optr replica */ - 210, /* (107) db_optr ::= db_optr quorum */ - 210, /* (108) db_optr ::= db_optr days */ - 210, /* (109) db_optr ::= db_optr minrows */ - 210, /* (110) db_optr ::= db_optr maxrows */ - 210, /* (111) db_optr ::= db_optr blocks */ - 210, /* (112) db_optr ::= db_optr ctime */ - 210, /* (113) db_optr ::= db_optr wal */ - 210, /* (114) db_optr ::= db_optr fsync */ - 210, /* (115) db_optr ::= db_optr comp */ - 210, /* (116) db_optr ::= db_optr prec */ - 210, /* (117) db_optr ::= db_optr keep */ - 210, /* (118) db_optr ::= db_optr update */ - 210, /* (119) db_optr ::= db_optr cachelast */ - 211, /* (120) topic_optr ::= db_optr */ - 211, /* (121) topic_optr ::= topic_optr partitions */ - 205, /* (122) alter_db_optr ::= */ - 205, /* (123) alter_db_optr ::= alter_db_optr replica */ - 205, /* (124) alter_db_optr ::= alter_db_optr quorum */ - 205, /* (125) alter_db_optr ::= alter_db_optr keep */ - 205, /* (126) alter_db_optr ::= alter_db_optr blocks */ - 205, /* (127) alter_db_optr ::= alter_db_optr comp */ - 205, /* (128) alter_db_optr ::= alter_db_optr update */ - 205, /* (129) alter_db_optr ::= alter_db_optr cachelast */ - 206, /* (130) alter_topic_optr ::= alter_db_optr */ - 206, /* (131) alter_topic_optr ::= alter_topic_optr partitions */ - 212, /* (132) typename ::= ids */ - 212, /* (133) typename ::= ids LP signed RP */ - 212, /* (134) typename ::= ids UNSIGNED */ - 241, /* (135) signed ::= INTEGER */ - 241, /* (136) signed ::= PLUS INTEGER */ - 241, /* (137) signed ::= MINUS INTEGER */ - 200, /* (138) cmd ::= CREATE TABLE create_table_args */ - 200, /* (139) cmd ::= CREATE TABLE create_stable_args */ - 200, /* (140) cmd ::= CREATE STABLE create_stable_args */ - 200, /* (141) cmd ::= CREATE TABLE create_table_list */ - 244, /* (142) create_table_list ::= create_from_stable */ - 244, /* (143) create_table_list ::= create_table_list create_from_stable */ - 242, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - 243, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - 245, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - 245, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - 248, /* (148) tagNamelist ::= tagNamelist COMMA ids */ - 248, /* (149) tagNamelist ::= ids */ - 242, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */ - 246, /* (151) columnlist ::= columnlist COMMA column */ - 246, /* (152) columnlist ::= column */ - 250, /* (153) column ::= ids typename */ - 247, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */ - 247, /* (155) tagitemlist ::= tagitem */ - 251, /* (156) tagitem ::= INTEGER */ - 251, /* (157) tagitem ::= FLOAT */ - 251, /* (158) tagitem ::= STRING */ - 251, /* (159) tagitem ::= BOOL */ - 251, /* (160) tagitem ::= NULL */ - 251, /* (161) tagitem ::= NOW */ - 251, /* (162) tagitem ::= MINUS INTEGER */ - 251, /* (163) tagitem ::= MINUS FLOAT */ - 251, /* (164) tagitem ::= PLUS INTEGER */ - 251, /* (165) tagitem ::= PLUS FLOAT */ - 249, /* (166) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - 249, /* (167) select ::= LP select RP */ - 266, /* (168) union ::= select */ - 266, /* (169) union ::= union UNION ALL select */ - 200, /* (170) cmd ::= union */ - 249, /* (171) select ::= SELECT selcollist */ - 267, /* (172) sclp ::= selcollist COMMA */ - 267, /* (173) sclp ::= */ - 252, /* (174) selcollist ::= sclp distinct expr as */ - 252, /* (175) selcollist ::= sclp STAR */ - 270, /* (176) as ::= AS ids */ - 270, /* (177) as ::= ids */ - 270, /* (178) as ::= */ - 268, /* (179) distinct ::= DISTINCT */ - 268, /* (180) distinct ::= */ - 253, /* (181) from ::= FROM tablelist */ - 253, /* (182) from ::= FROM sub */ - 272, /* (183) sub ::= LP union RP */ - 272, /* (184) sub ::= LP union RP ids */ - 272, /* (185) sub ::= sub COMMA LP union RP ids */ - 271, /* (186) tablelist ::= ids cpxName */ - 271, /* (187) tablelist ::= ids cpxName ids */ - 271, /* (188) tablelist ::= tablelist COMMA ids cpxName */ - 271, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */ - 273, /* (190) tmvar ::= VARIABLE */ - 274, /* (191) timestamp ::= INTEGER */ - 274, /* (192) timestamp ::= MINUS INTEGER */ - 274, /* (193) timestamp ::= PLUS INTEGER */ - 274, /* (194) timestamp ::= STRING */ - 274, /* (195) timestamp ::= NOW */ - 274, /* (196) timestamp ::= NOW PLUS VARIABLE */ - 274, /* (197) timestamp ::= NOW MINUS VARIABLE */ - 255, /* (198) range_option ::= */ - 255, /* (199) range_option ::= RANGE LP timestamp COMMA timestamp RP */ - 256, /* (200) interval_option ::= intervalKey LP tmvar RP */ - 256, /* (201) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - 256, /* (202) interval_option ::= */ - 275, /* (203) intervalKey ::= INTERVAL */ - 275, /* (204) intervalKey ::= EVERY */ - 258, /* (205) session_option ::= */ - 258, /* (206) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 259, /* (207) windowstate_option ::= */ - 259, /* (208) windowstate_option ::= STATE_WINDOW LP ids RP */ - 260, /* (209) fill_opt ::= */ - 260, /* (210) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - 260, /* (211) fill_opt ::= FILL LP ID RP */ - 257, /* (212) sliding_opt ::= SLIDING LP tmvar RP */ - 257, /* (213) sliding_opt ::= */ - 263, /* (214) orderby_opt ::= */ - 263, /* (215) orderby_opt ::= ORDER BY sortlist */ - 276, /* (216) sortlist ::= sortlist COMMA item sortorder */ - 276, /* (217) sortlist ::= item sortorder */ - 278, /* (218) item ::= ids cpxName */ - 279, /* (219) sortorder ::= ASC */ - 279, /* (220) sortorder ::= DESC */ - 279, /* (221) sortorder ::= */ - 261, /* (222) groupby_opt ::= */ - 261, /* (223) groupby_opt ::= GROUP BY grouplist */ - 280, /* (224) grouplist ::= grouplist COMMA item */ - 280, /* (225) grouplist ::= item */ - 262, /* (226) having_opt ::= */ - 262, /* (227) having_opt ::= HAVING expr */ - 265, /* (228) limit_opt ::= */ - 265, /* (229) limit_opt ::= LIMIT signed */ - 265, /* (230) limit_opt ::= LIMIT signed OFFSET signed */ - 265, /* (231) limit_opt ::= LIMIT signed COMMA signed */ - 264, /* (232) slimit_opt ::= */ - 264, /* (233) slimit_opt ::= SLIMIT signed */ - 264, /* (234) slimit_opt ::= SLIMIT signed SOFFSET signed */ - 264, /* (235) slimit_opt ::= SLIMIT signed COMMA signed */ - 254, /* (236) where_opt ::= */ - 254, /* (237) where_opt ::= WHERE expr */ - 269, /* (238) expr ::= LP expr RP */ - 269, /* (239) expr ::= ID */ - 269, /* (240) expr ::= ID DOT ID */ - 269, /* (241) expr ::= ID DOT STAR */ - 269, /* (242) expr ::= INTEGER */ - 269, /* (243) expr ::= MINUS INTEGER */ - 269, /* (244) expr ::= PLUS INTEGER */ - 269, /* (245) expr ::= FLOAT */ - 269, /* (246) expr ::= MINUS FLOAT */ - 269, /* (247) expr ::= PLUS FLOAT */ - 269, /* (248) expr ::= STRING */ - 269, /* (249) expr ::= NOW */ - 269, /* (250) expr ::= VARIABLE */ - 269, /* (251) expr ::= PLUS VARIABLE */ - 269, /* (252) expr ::= MINUS VARIABLE */ - 269, /* (253) expr ::= BOOL */ - 269, /* (254) expr ::= NULL */ - 269, /* (255) expr ::= ID LP exprlist RP */ - 269, /* (256) expr ::= ID LP STAR RP */ - 269, /* (257) expr ::= expr IS NULL */ - 269, /* (258) expr ::= expr IS NOT NULL */ - 269, /* (259) expr ::= expr LT expr */ - 269, /* (260) expr ::= expr GT expr */ - 269, /* (261) expr ::= expr LE expr */ - 269, /* (262) expr ::= expr GE expr */ - 269, /* (263) expr ::= expr NE expr */ - 269, /* (264) expr ::= expr EQ expr */ - 269, /* (265) expr ::= expr BETWEEN expr AND expr */ - 269, /* (266) expr ::= expr AND expr */ - 269, /* (267) expr ::= expr OR expr */ - 269, /* (268) expr ::= expr PLUS expr */ - 269, /* (269) expr ::= expr MINUS expr */ - 269, /* (270) expr ::= expr STAR expr */ - 269, /* (271) expr ::= expr SLASH expr */ - 269, /* (272) expr ::= expr REM expr */ - 269, /* (273) expr ::= expr LIKE expr */ - 269, /* (274) expr ::= expr MATCH expr */ - 269, /* (275) expr ::= expr NMATCH expr */ - 269, /* (276) expr ::= expr IN LP exprlist RP */ - 208, /* (277) exprlist ::= exprlist COMMA expritem */ - 208, /* (278) exprlist ::= expritem */ - 281, /* (279) expritem ::= expr */ - 281, /* (280) expritem ::= */ - 200, /* (281) cmd ::= RESET QUERY CACHE */ - 200, /* (282) cmd ::= SYNCDB ids REPLICA */ - 200, /* (283) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - 200, /* (284) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 200, /* (285) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - 200, /* (286) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - 200, /* (287) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - 200, /* (288) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - 200, /* (289) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 200, /* (290) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - 200, /* (291) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 200, /* (292) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 200, /* (293) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - 200, /* (294) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 200, /* (295) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 200, /* (296) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 200, /* (297) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - 200, /* (298) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - 200, /* (299) cmd ::= KILL CONNECTION INTEGER */ - 200, /* (300) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 200, /* (301) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 201, /* (0) program ::= cmd */ + 202, /* (1) cmd ::= SHOW DATABASES */ + 202, /* (2) cmd ::= SHOW TOPICS */ + 202, /* (3) cmd ::= SHOW FUNCTIONS */ + 202, /* (4) cmd ::= SHOW MNODES */ + 202, /* (5) cmd ::= SHOW DNODES */ + 202, /* (6) cmd ::= SHOW ACCOUNTS */ + 202, /* (7) cmd ::= SHOW USERS */ + 202, /* (8) cmd ::= SHOW MODULES */ + 202, /* (9) cmd ::= SHOW QUERIES */ + 202, /* (10) cmd ::= SHOW CONNECTIONS */ + 202, /* (11) cmd ::= SHOW STREAMS */ + 202, /* (12) cmd ::= SHOW VARIABLES */ + 202, /* (13) cmd ::= SHOW SCORES */ + 202, /* (14) cmd ::= SHOW GRANTS */ + 202, /* (15) cmd ::= SHOW VNODES */ + 202, /* (16) cmd ::= SHOW VNODES ids */ + 204, /* (17) dbPrefix ::= */ + 204, /* (18) dbPrefix ::= ids DOT */ + 205, /* (19) cpxName ::= */ + 205, /* (20) cpxName ::= DOT ids */ + 202, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ + 202, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ + 202, /* (23) cmd ::= SHOW CREATE DATABASE ids */ + 202, /* (24) cmd ::= SHOW dbPrefix TABLES */ + 202, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + 202, /* (26) cmd ::= SHOW dbPrefix STABLES */ + 202, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + 202, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ + 202, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + 202, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + 202, /* (31) cmd ::= DROP DATABASE ifexists ids */ + 202, /* (32) cmd ::= DROP TOPIC ifexists ids */ + 202, /* (33) cmd ::= DROP FUNCTION ids */ + 202, /* (34) cmd ::= DROP DNODE ids */ + 202, /* (35) cmd ::= DROP USER ids */ + 202, /* (36) cmd ::= DROP ACCOUNT ids */ + 202, /* (37) cmd ::= USE ids */ + 202, /* (38) cmd ::= DESCRIBE ids cpxName */ + 202, /* (39) cmd ::= DESC ids cpxName */ + 202, /* (40) cmd ::= ALTER USER ids PASS ids */ + 202, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */ + 202, /* (42) cmd ::= ALTER DNODE ids ids */ + 202, /* (43) cmd ::= ALTER DNODE ids ids ids */ + 202, /* (44) cmd ::= ALTER LOCAL ids */ + 202, /* (45) cmd ::= ALTER LOCAL ids ids */ + 202, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */ + 202, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */ + 202, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */ + 202, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 202, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */ + 203, /* (51) ids ::= ID */ + 203, /* (52) ids ::= STRING */ + 206, /* (53) ifexists ::= IF EXISTS */ + 206, /* (54) ifexists ::= */ + 211, /* (55) ifnotexists ::= IF NOT EXISTS */ + 211, /* (56) ifnotexists ::= */ + 202, /* (57) cmd ::= CREATE DNODE ids */ + 202, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 202, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 202, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + 202, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 202, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 202, /* (63) cmd ::= CREATE USER ids PASS ids */ + 215, /* (64) bufsize ::= */ + 215, /* (65) bufsize ::= BUFSIZE INTEGER */ + 216, /* (66) pps ::= */ + 216, /* (67) pps ::= PPS INTEGER */ + 217, /* (68) tseries ::= */ + 217, /* (69) tseries ::= TSERIES INTEGER */ + 218, /* (70) dbs ::= */ + 218, /* (71) dbs ::= DBS INTEGER */ + 219, /* (72) streams ::= */ + 219, /* (73) streams ::= STREAMS INTEGER */ + 220, /* (74) storage ::= */ + 220, /* (75) storage ::= STORAGE INTEGER */ + 221, /* (76) qtime ::= */ + 221, /* (77) qtime ::= QTIME INTEGER */ + 222, /* (78) users ::= */ + 222, /* (79) users ::= USERS INTEGER */ + 223, /* (80) conns ::= */ + 223, /* (81) conns ::= CONNS INTEGER */ + 224, /* (82) state ::= */ + 224, /* (83) state ::= STATE ids */ + 209, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 225, /* (85) intitemlist ::= intitemlist COMMA intitem */ + 225, /* (86) intitemlist ::= intitem */ + 226, /* (87) intitem ::= INTEGER */ + 227, /* (88) keep ::= KEEP intitemlist */ + 228, /* (89) cache ::= CACHE INTEGER */ + 229, /* (90) replica ::= REPLICA INTEGER */ + 230, /* (91) quorum ::= QUORUM INTEGER */ + 231, /* (92) days ::= DAYS INTEGER */ + 232, /* (93) minrows ::= MINROWS INTEGER */ + 233, /* (94) maxrows ::= MAXROWS INTEGER */ + 234, /* (95) blocks ::= BLOCKS INTEGER */ + 235, /* (96) ctime ::= CTIME INTEGER */ + 236, /* (97) wal ::= WAL INTEGER */ + 237, /* (98) fsync ::= FSYNC INTEGER */ + 238, /* (99) comp ::= COMP INTEGER */ + 239, /* (100) prec ::= PRECISION STRING */ + 240, /* (101) update ::= UPDATE INTEGER */ + 241, /* (102) cachelast ::= CACHELAST INTEGER */ + 242, /* (103) partitions ::= PARTITIONS INTEGER */ + 212, /* (104) db_optr ::= */ + 212, /* (105) db_optr ::= db_optr cache */ + 212, /* (106) db_optr ::= db_optr replica */ + 212, /* (107) db_optr ::= db_optr quorum */ + 212, /* (108) db_optr ::= db_optr days */ + 212, /* (109) db_optr ::= db_optr minrows */ + 212, /* (110) db_optr ::= db_optr maxrows */ + 212, /* (111) db_optr ::= db_optr blocks */ + 212, /* (112) db_optr ::= db_optr ctime */ + 212, /* (113) db_optr ::= db_optr wal */ + 212, /* (114) db_optr ::= db_optr fsync */ + 212, /* (115) db_optr ::= db_optr comp */ + 212, /* (116) db_optr ::= db_optr prec */ + 212, /* (117) db_optr ::= db_optr keep */ + 212, /* (118) db_optr ::= db_optr update */ + 212, /* (119) db_optr ::= db_optr cachelast */ + 213, /* (120) topic_optr ::= db_optr */ + 213, /* (121) topic_optr ::= topic_optr partitions */ + 207, /* (122) alter_db_optr ::= */ + 207, /* (123) alter_db_optr ::= alter_db_optr replica */ + 207, /* (124) alter_db_optr ::= alter_db_optr quorum */ + 207, /* (125) alter_db_optr ::= alter_db_optr keep */ + 207, /* (126) alter_db_optr ::= alter_db_optr blocks */ + 207, /* (127) alter_db_optr ::= alter_db_optr comp */ + 207, /* (128) alter_db_optr ::= alter_db_optr update */ + 207, /* (129) alter_db_optr ::= alter_db_optr cachelast */ + 208, /* (130) alter_topic_optr ::= alter_db_optr */ + 208, /* (131) alter_topic_optr ::= alter_topic_optr partitions */ + 214, /* (132) typename ::= ids */ + 214, /* (133) typename ::= ids LP signed RP */ + 214, /* (134) typename ::= ids UNSIGNED */ + 243, /* (135) signed ::= INTEGER */ + 243, /* (136) signed ::= PLUS INTEGER */ + 243, /* (137) signed ::= MINUS INTEGER */ + 202, /* (138) cmd ::= CREATE TABLE create_table_args */ + 202, /* (139) cmd ::= CREATE TABLE create_stable_args */ + 202, /* (140) cmd ::= CREATE STABLE create_stable_args */ + 202, /* (141) cmd ::= CREATE TABLE create_table_list */ + 246, /* (142) create_table_list ::= create_from_stable */ + 246, /* (143) create_table_list ::= create_table_list create_from_stable */ + 244, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 245, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 247, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 247, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 250, /* (148) tagNamelist ::= tagNamelist COMMA ids */ + 250, /* (149) tagNamelist ::= ids */ + 244, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */ + 248, /* (151) columnlist ::= columnlist COMMA column */ + 248, /* (152) columnlist ::= column */ + 252, /* (153) column ::= ids typename */ + 249, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */ + 249, /* (155) tagitemlist ::= tagitem */ + 253, /* (156) tagitem ::= INTEGER */ + 253, /* (157) tagitem ::= FLOAT */ + 253, /* (158) tagitem ::= STRING */ + 253, /* (159) tagitem ::= BOOL */ + 253, /* (160) tagitem ::= NULL */ + 253, /* (161) tagitem ::= NOW */ + 253, /* (162) tagitem ::= NOW PLUS VARIABLE */ + 253, /* (163) tagitem ::= NOW MINUS VARIABLE */ + 253, /* (164) tagitem ::= MINUS INTEGER */ + 253, /* (165) tagitem ::= MINUS FLOAT */ + 253, /* (166) tagitem ::= PLUS INTEGER */ + 253, /* (167) tagitem ::= PLUS FLOAT */ + 251, /* (168) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + 251, /* (169) select ::= LP select RP */ + 268, /* (170) union ::= select */ + 268, /* (171) union ::= union UNION ALL select */ + 202, /* (172) cmd ::= union */ + 251, /* (173) select ::= SELECT selcollist */ + 269, /* (174) sclp ::= selcollist COMMA */ + 269, /* (175) sclp ::= */ + 254, /* (176) selcollist ::= sclp distinct expr as */ + 254, /* (177) selcollist ::= sclp STAR */ + 272, /* (178) as ::= AS ids */ + 272, /* (179) as ::= ids */ + 272, /* (180) as ::= */ + 270, /* (181) distinct ::= DISTINCT */ + 270, /* (182) distinct ::= */ + 255, /* (183) from ::= FROM tablelist */ + 255, /* (184) from ::= FROM sub */ + 274, /* (185) sub ::= LP union RP */ + 274, /* (186) sub ::= LP union RP ids */ + 274, /* (187) sub ::= sub COMMA LP union RP ids */ + 273, /* (188) tablelist ::= ids cpxName */ + 273, /* (189) tablelist ::= ids cpxName ids */ + 273, /* (190) tablelist ::= tablelist COMMA ids cpxName */ + 273, /* (191) tablelist ::= tablelist COMMA ids cpxName ids */ + 275, /* (192) tmvar ::= VARIABLE */ + 276, /* (193) timestamp ::= INTEGER */ + 276, /* (194) timestamp ::= MINUS INTEGER */ + 276, /* (195) timestamp ::= PLUS INTEGER */ + 276, /* (196) timestamp ::= STRING */ + 276, /* (197) timestamp ::= NOW */ + 276, /* (198) timestamp ::= NOW PLUS VARIABLE */ + 276, /* (199) timestamp ::= NOW MINUS VARIABLE */ + 257, /* (200) range_option ::= */ + 257, /* (201) range_option ::= RANGE LP timestamp COMMA timestamp RP */ + 258, /* (202) interval_option ::= intervalKey LP tmvar RP */ + 258, /* (203) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 258, /* (204) interval_option ::= */ + 277, /* (205) intervalKey ::= INTERVAL */ + 277, /* (206) intervalKey ::= EVERY */ + 260, /* (207) session_option ::= */ + 260, /* (208) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 261, /* (209) windowstate_option ::= */ + 261, /* (210) windowstate_option ::= STATE_WINDOW LP ids RP */ + 262, /* (211) fill_opt ::= */ + 262, /* (212) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 262, /* (213) fill_opt ::= FILL LP ID RP */ + 259, /* (214) sliding_opt ::= SLIDING LP tmvar RP */ + 259, /* (215) sliding_opt ::= */ + 265, /* (216) orderby_opt ::= */ + 265, /* (217) orderby_opt ::= ORDER BY sortlist */ + 278, /* (218) sortlist ::= sortlist COMMA item sortorder */ + 278, /* (219) sortlist ::= sortlist COMMA arrow sortorder */ + 278, /* (220) sortlist ::= item sortorder */ + 278, /* (221) sortlist ::= arrow sortorder */ + 279, /* (222) item ::= ID */ + 279, /* (223) item ::= ID DOT ID */ + 280, /* (224) sortorder ::= ASC */ + 280, /* (225) sortorder ::= DESC */ + 280, /* (226) sortorder ::= */ + 263, /* (227) groupby_opt ::= */ + 263, /* (228) groupby_opt ::= GROUP BY grouplist */ + 282, /* (229) grouplist ::= grouplist COMMA item */ + 282, /* (230) grouplist ::= grouplist COMMA arrow */ + 282, /* (231) grouplist ::= item */ + 282, /* (232) grouplist ::= arrow */ + 264, /* (233) having_opt ::= */ + 264, /* (234) having_opt ::= HAVING expr */ + 267, /* (235) limit_opt ::= */ + 267, /* (236) limit_opt ::= LIMIT signed */ + 267, /* (237) limit_opt ::= LIMIT signed OFFSET signed */ + 267, /* (238) limit_opt ::= LIMIT signed COMMA signed */ + 266, /* (239) slimit_opt ::= */ + 266, /* (240) slimit_opt ::= SLIMIT signed */ + 266, /* (241) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 266, /* (242) slimit_opt ::= SLIMIT signed COMMA signed */ + 256, /* (243) where_opt ::= */ + 256, /* (244) where_opt ::= WHERE expr */ + 271, /* (245) expr ::= LP expr RP */ + 271, /* (246) expr ::= ID */ + 271, /* (247) expr ::= ID DOT ID */ + 271, /* (248) expr ::= ID DOT STAR */ + 271, /* (249) expr ::= INTEGER */ + 271, /* (250) expr ::= MINUS INTEGER */ + 271, /* (251) expr ::= PLUS INTEGER */ + 271, /* (252) expr ::= FLOAT */ + 271, /* (253) expr ::= MINUS FLOAT */ + 271, /* (254) expr ::= PLUS FLOAT */ + 271, /* (255) expr ::= STRING */ + 271, /* (256) expr ::= NOW */ + 271, /* (257) expr ::= VARIABLE */ + 271, /* (258) expr ::= PLUS VARIABLE */ + 271, /* (259) expr ::= MINUS VARIABLE */ + 271, /* (260) expr ::= BOOL */ + 271, /* (261) expr ::= NULL */ + 271, /* (262) expr ::= ID LP exprlist RP */ + 271, /* (263) expr ::= ID LP STAR RP */ + 271, /* (264) expr ::= ID LP expr AS typename RP */ + 271, /* (265) expr ::= expr IS NULL */ + 271, /* (266) expr ::= expr IS NOT NULL */ + 271, /* (267) expr ::= expr LT expr */ + 271, /* (268) expr ::= expr GT expr */ + 271, /* (269) expr ::= expr LE expr */ + 271, /* (270) expr ::= expr GE expr */ + 271, /* (271) expr ::= expr NE expr */ + 271, /* (272) expr ::= expr EQ expr */ + 271, /* (273) expr ::= expr BETWEEN expr AND expr */ + 271, /* (274) expr ::= expr AND expr */ + 271, /* (275) expr ::= expr OR expr */ + 271, /* (276) expr ::= expr PLUS expr */ + 271, /* (277) expr ::= expr MINUS expr */ + 271, /* (278) expr ::= expr STAR expr */ + 271, /* (279) expr ::= expr SLASH expr */ + 271, /* (280) expr ::= expr REM expr */ + 271, /* (281) expr ::= expr LIKE expr */ + 271, /* (282) expr ::= expr MATCH expr */ + 271, /* (283) expr ::= expr NMATCH expr */ + 271, /* (284) expr ::= ID CONTAINS STRING */ + 271, /* (285) expr ::= ID DOT ID CONTAINS STRING */ + 281, /* (286) arrow ::= ID ARROW STRING */ + 281, /* (287) arrow ::= ID DOT ID ARROW STRING */ + 271, /* (288) expr ::= arrow */ + 271, /* (289) expr ::= expr IN LP exprlist RP */ + 210, /* (290) exprlist ::= exprlist COMMA expritem */ + 210, /* (291) exprlist ::= expritem */ + 283, /* (292) expritem ::= expr */ + 283, /* (293) expritem ::= */ + 202, /* (294) cmd ::= RESET QUERY CACHE */ + 202, /* (295) cmd ::= SYNCDB ids REPLICA */ + 202, /* (296) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 202, /* (297) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 202, /* (298) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + 202, /* (299) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 202, /* (300) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 202, /* (301) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 202, /* (302) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 202, /* (303) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 202, /* (304) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 202, /* (305) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 202, /* (306) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 202, /* (307) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 202, /* (308) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 202, /* (309) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 202, /* (310) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 202, /* (311) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 202, /* (312) cmd ::= KILL CONNECTION INTEGER */ + 202, /* (313) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 202, /* (314) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2341,146 +2586,159 @@ static const signed char yyRuleInfoNRhs[] = { -1, /* (159) tagitem ::= BOOL */ -1, /* (160) tagitem ::= NULL */ -1, /* (161) tagitem ::= NOW */ - -2, /* (162) tagitem ::= MINUS INTEGER */ - -2, /* (163) tagitem ::= MINUS FLOAT */ - -2, /* (164) tagitem ::= PLUS INTEGER */ - -2, /* (165) tagitem ::= PLUS FLOAT */ - -15, /* (166) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - -3, /* (167) select ::= LP select RP */ - -1, /* (168) union ::= select */ - -4, /* (169) union ::= union UNION ALL select */ - -1, /* (170) cmd ::= union */ - -2, /* (171) select ::= SELECT selcollist */ - -2, /* (172) sclp ::= selcollist COMMA */ - 0, /* (173) sclp ::= */ - -4, /* (174) selcollist ::= sclp distinct expr as */ - -2, /* (175) selcollist ::= sclp STAR */ - -2, /* (176) as ::= AS ids */ - -1, /* (177) as ::= ids */ - 0, /* (178) as ::= */ - -1, /* (179) distinct ::= DISTINCT */ - 0, /* (180) distinct ::= */ - -2, /* (181) from ::= FROM tablelist */ - -2, /* (182) from ::= FROM sub */ - -3, /* (183) sub ::= LP union RP */ - -4, /* (184) sub ::= LP union RP ids */ - -6, /* (185) sub ::= sub COMMA LP union RP ids */ - -2, /* (186) tablelist ::= ids cpxName */ - -3, /* (187) tablelist ::= ids cpxName ids */ - -4, /* (188) tablelist ::= tablelist COMMA ids cpxName */ - -5, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */ - -1, /* (190) tmvar ::= VARIABLE */ - -1, /* (191) timestamp ::= INTEGER */ - -2, /* (192) timestamp ::= MINUS INTEGER */ - -2, /* (193) timestamp ::= PLUS INTEGER */ - -1, /* (194) timestamp ::= STRING */ - -1, /* (195) timestamp ::= NOW */ - -3, /* (196) timestamp ::= NOW PLUS VARIABLE */ - -3, /* (197) timestamp ::= NOW MINUS VARIABLE */ - 0, /* (198) range_option ::= */ - -6, /* (199) range_option ::= RANGE LP timestamp COMMA timestamp RP */ - -4, /* (200) interval_option ::= intervalKey LP tmvar RP */ - -6, /* (201) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - 0, /* (202) interval_option ::= */ - -1, /* (203) intervalKey ::= INTERVAL */ - -1, /* (204) intervalKey ::= EVERY */ - 0, /* (205) session_option ::= */ - -7, /* (206) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 0, /* (207) windowstate_option ::= */ - -4, /* (208) windowstate_option ::= STATE_WINDOW LP ids RP */ - 0, /* (209) fill_opt ::= */ - -6, /* (210) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - -4, /* (211) fill_opt ::= FILL LP ID RP */ - -4, /* (212) sliding_opt ::= SLIDING LP tmvar RP */ - 0, /* (213) sliding_opt ::= */ - 0, /* (214) orderby_opt ::= */ - -3, /* (215) orderby_opt ::= ORDER BY sortlist */ - -4, /* (216) sortlist ::= sortlist COMMA item sortorder */ - -2, /* (217) sortlist ::= item sortorder */ - -2, /* (218) item ::= ids cpxName */ - -1, /* (219) sortorder ::= ASC */ - -1, /* (220) sortorder ::= DESC */ - 0, /* (221) sortorder ::= */ - 0, /* (222) groupby_opt ::= */ - -3, /* (223) groupby_opt ::= GROUP BY grouplist */ - -3, /* (224) grouplist ::= grouplist COMMA item */ - -1, /* (225) grouplist ::= item */ - 0, /* (226) having_opt ::= */ - -2, /* (227) having_opt ::= HAVING expr */ - 0, /* (228) limit_opt ::= */ - -2, /* (229) limit_opt ::= LIMIT signed */ - -4, /* (230) limit_opt ::= LIMIT signed OFFSET signed */ - -4, /* (231) limit_opt ::= LIMIT signed COMMA signed */ - 0, /* (232) slimit_opt ::= */ - -2, /* (233) slimit_opt ::= SLIMIT signed */ - -4, /* (234) slimit_opt ::= SLIMIT signed SOFFSET signed */ - -4, /* (235) slimit_opt ::= SLIMIT signed COMMA signed */ - 0, /* (236) where_opt ::= */ - -2, /* (237) where_opt ::= WHERE expr */ - -3, /* (238) expr ::= LP expr RP */ - -1, /* (239) expr ::= ID */ - -3, /* (240) expr ::= ID DOT ID */ - -3, /* (241) expr ::= ID DOT STAR */ - -1, /* (242) expr ::= INTEGER */ - -2, /* (243) expr ::= MINUS INTEGER */ - -2, /* (244) expr ::= PLUS INTEGER */ - -1, /* (245) expr ::= FLOAT */ - -2, /* (246) expr ::= MINUS FLOAT */ - -2, /* (247) expr ::= PLUS FLOAT */ - -1, /* (248) expr ::= STRING */ - -1, /* (249) expr ::= NOW */ - -1, /* (250) expr ::= VARIABLE */ - -2, /* (251) expr ::= PLUS VARIABLE */ - -2, /* (252) expr ::= MINUS VARIABLE */ - -1, /* (253) expr ::= BOOL */ - -1, /* (254) expr ::= NULL */ - -4, /* (255) expr ::= ID LP exprlist RP */ - -4, /* (256) expr ::= ID LP STAR RP */ - -3, /* (257) expr ::= expr IS NULL */ - -4, /* (258) expr ::= expr IS NOT NULL */ - -3, /* (259) expr ::= expr LT expr */ - -3, /* (260) expr ::= expr GT expr */ - -3, /* (261) expr ::= expr LE expr */ - -3, /* (262) expr ::= expr GE expr */ - -3, /* (263) expr ::= expr NE expr */ - -3, /* (264) expr ::= expr EQ expr */ - -5, /* (265) expr ::= expr BETWEEN expr AND expr */ - -3, /* (266) expr ::= expr AND expr */ - -3, /* (267) expr ::= expr OR expr */ - -3, /* (268) expr ::= expr PLUS expr */ - -3, /* (269) expr ::= expr MINUS expr */ - -3, /* (270) expr ::= expr STAR expr */ - -3, /* (271) expr ::= expr SLASH expr */ - -3, /* (272) expr ::= expr REM expr */ - -3, /* (273) expr ::= expr LIKE expr */ - -3, /* (274) expr ::= expr MATCH expr */ - -3, /* (275) expr ::= expr NMATCH expr */ - -5, /* (276) expr ::= expr IN LP exprlist RP */ - -3, /* (277) exprlist ::= exprlist COMMA expritem */ - -1, /* (278) exprlist ::= expritem */ - -1, /* (279) expritem ::= expr */ - 0, /* (280) expritem ::= */ - -3, /* (281) cmd ::= RESET QUERY CACHE */ - -3, /* (282) cmd ::= SYNCDB ids REPLICA */ - -7, /* (283) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (284) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -7, /* (285) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (286) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - -7, /* (287) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - -8, /* (288) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (289) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (290) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - -7, /* (291) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (292) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (293) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (294) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (295) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (296) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (297) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (298) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - -3, /* (299) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (300) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (301) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -3, /* (162) tagitem ::= NOW PLUS VARIABLE */ + -3, /* (163) tagitem ::= NOW MINUS VARIABLE */ + -2, /* (164) tagitem ::= MINUS INTEGER */ + -2, /* (165) tagitem ::= MINUS FLOAT */ + -2, /* (166) tagitem ::= PLUS INTEGER */ + -2, /* (167) tagitem ::= PLUS FLOAT */ + -15, /* (168) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + -3, /* (169) select ::= LP select RP */ + -1, /* (170) union ::= select */ + -4, /* (171) union ::= union UNION ALL select */ + -1, /* (172) cmd ::= union */ + -2, /* (173) select ::= SELECT selcollist */ + -2, /* (174) sclp ::= selcollist COMMA */ + 0, /* (175) sclp ::= */ + -4, /* (176) selcollist ::= sclp distinct expr as */ + -2, /* (177) selcollist ::= sclp STAR */ + -2, /* (178) as ::= AS ids */ + -1, /* (179) as ::= ids */ + 0, /* (180) as ::= */ + -1, /* (181) distinct ::= DISTINCT */ + 0, /* (182) distinct ::= */ + -2, /* (183) from ::= FROM tablelist */ + -2, /* (184) from ::= FROM sub */ + -3, /* (185) sub ::= LP union RP */ + -4, /* (186) sub ::= LP union RP ids */ + -6, /* (187) sub ::= sub COMMA LP union RP ids */ + -2, /* (188) tablelist ::= ids cpxName */ + -3, /* (189) tablelist ::= ids cpxName ids */ + -4, /* (190) tablelist ::= tablelist COMMA ids cpxName */ + -5, /* (191) tablelist ::= tablelist COMMA ids cpxName ids */ + -1, /* (192) tmvar ::= VARIABLE */ + -1, /* (193) timestamp ::= INTEGER */ + -2, /* (194) timestamp ::= MINUS INTEGER */ + -2, /* (195) timestamp ::= PLUS INTEGER */ + -1, /* (196) timestamp ::= STRING */ + -1, /* (197) timestamp ::= NOW */ + -3, /* (198) timestamp ::= NOW PLUS VARIABLE */ + -3, /* (199) timestamp ::= NOW MINUS VARIABLE */ + 0, /* (200) range_option ::= */ + -6, /* (201) range_option ::= RANGE LP timestamp COMMA timestamp RP */ + -4, /* (202) interval_option ::= intervalKey LP tmvar RP */ + -6, /* (203) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 0, /* (204) interval_option ::= */ + -1, /* (205) intervalKey ::= INTERVAL */ + -1, /* (206) intervalKey ::= EVERY */ + 0, /* (207) session_option ::= */ + -7, /* (208) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 0, /* (209) windowstate_option ::= */ + -4, /* (210) windowstate_option ::= STATE_WINDOW LP ids RP */ + 0, /* (211) fill_opt ::= */ + -6, /* (212) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + -4, /* (213) fill_opt ::= FILL LP ID RP */ + -4, /* (214) sliding_opt ::= SLIDING LP tmvar RP */ + 0, /* (215) sliding_opt ::= */ + 0, /* (216) orderby_opt ::= */ + -3, /* (217) orderby_opt ::= ORDER BY sortlist */ + -4, /* (218) sortlist ::= sortlist COMMA item sortorder */ + -4, /* (219) sortlist ::= sortlist COMMA arrow sortorder */ + -2, /* (220) sortlist ::= item sortorder */ + -2, /* (221) sortlist ::= arrow sortorder */ + -1, /* (222) item ::= ID */ + -3, /* (223) item ::= ID DOT ID */ + -1, /* (224) sortorder ::= ASC */ + -1, /* (225) sortorder ::= DESC */ + 0, /* (226) sortorder ::= */ + 0, /* (227) groupby_opt ::= */ + -3, /* (228) groupby_opt ::= GROUP BY grouplist */ + -3, /* (229) grouplist ::= grouplist COMMA item */ + -3, /* (230) grouplist ::= grouplist COMMA arrow */ + -1, /* (231) grouplist ::= item */ + -1, /* (232) grouplist ::= arrow */ + 0, /* (233) having_opt ::= */ + -2, /* (234) having_opt ::= HAVING expr */ + 0, /* (235) limit_opt ::= */ + -2, /* (236) limit_opt ::= LIMIT signed */ + -4, /* (237) limit_opt ::= LIMIT signed OFFSET signed */ + -4, /* (238) limit_opt ::= LIMIT signed COMMA signed */ + 0, /* (239) slimit_opt ::= */ + -2, /* (240) slimit_opt ::= SLIMIT signed */ + -4, /* (241) slimit_opt ::= SLIMIT signed SOFFSET signed */ + -4, /* (242) slimit_opt ::= SLIMIT signed COMMA signed */ + 0, /* (243) where_opt ::= */ + -2, /* (244) where_opt ::= WHERE expr */ + -3, /* (245) expr ::= LP expr RP */ + -1, /* (246) expr ::= ID */ + -3, /* (247) expr ::= ID DOT ID */ + -3, /* (248) expr ::= ID DOT STAR */ + -1, /* (249) expr ::= INTEGER */ + -2, /* (250) expr ::= MINUS INTEGER */ + -2, /* (251) expr ::= PLUS INTEGER */ + -1, /* (252) expr ::= FLOAT */ + -2, /* (253) expr ::= MINUS FLOAT */ + -2, /* (254) expr ::= PLUS FLOAT */ + -1, /* (255) expr ::= STRING */ + -1, /* (256) expr ::= NOW */ + -1, /* (257) expr ::= VARIABLE */ + -2, /* (258) expr ::= PLUS VARIABLE */ + -2, /* (259) expr ::= MINUS VARIABLE */ + -1, /* (260) expr ::= BOOL */ + -1, /* (261) expr ::= NULL */ + -4, /* (262) expr ::= ID LP exprlist RP */ + -4, /* (263) expr ::= ID LP STAR RP */ + -6, /* (264) expr ::= ID LP expr AS typename RP */ + -3, /* (265) expr ::= expr IS NULL */ + -4, /* (266) expr ::= expr IS NOT NULL */ + -3, /* (267) expr ::= expr LT expr */ + -3, /* (268) expr ::= expr GT expr */ + -3, /* (269) expr ::= expr LE expr */ + -3, /* (270) expr ::= expr GE expr */ + -3, /* (271) expr ::= expr NE expr */ + -3, /* (272) expr ::= expr EQ expr */ + -5, /* (273) expr ::= expr BETWEEN expr AND expr */ + -3, /* (274) expr ::= expr AND expr */ + -3, /* (275) expr ::= expr OR expr */ + -3, /* (276) expr ::= expr PLUS expr */ + -3, /* (277) expr ::= expr MINUS expr */ + -3, /* (278) expr ::= expr STAR expr */ + -3, /* (279) expr ::= expr SLASH expr */ + -3, /* (280) expr ::= expr REM expr */ + -3, /* (281) expr ::= expr LIKE expr */ + -3, /* (282) expr ::= expr MATCH expr */ + -3, /* (283) expr ::= expr NMATCH expr */ + -3, /* (284) expr ::= ID CONTAINS STRING */ + -5, /* (285) expr ::= ID DOT ID CONTAINS STRING */ + -3, /* (286) arrow ::= ID ARROW STRING */ + -5, /* (287) arrow ::= ID DOT ID ARROW STRING */ + -1, /* (288) expr ::= arrow */ + -5, /* (289) expr ::= expr IN LP exprlist RP */ + -3, /* (290) exprlist ::= exprlist COMMA expritem */ + -1, /* (291) exprlist ::= expritem */ + -1, /* (292) expritem ::= expr */ + 0, /* (293) expritem ::= */ + -3, /* (294) cmd ::= RESET QUERY CACHE */ + -3, /* (295) cmd ::= SYNCDB ids REPLICA */ + -7, /* (296) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (297) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (298) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (299) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (300) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (301) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (302) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (303) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (304) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (305) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (306) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (307) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (308) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (309) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (310) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (311) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (312) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (313) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (314) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2510,54 +2768,6 @@ static YYACTIONTYPE yy_reduce( (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; -#ifndef NDEBUG - if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfoNRhs[yyruleno]; - if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - yyTracePrompt, - yyruleno, yyRuleName[yyruleno], - yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ - yypParser->yyhwm++; - assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ - if( yyGrowStack(yypParser) ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - yymsp = yypParser->yytos; - } -#endif - } switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -2743,16 +2953,16 @@ static YYACTIONTYPE yy_reduce( break; case 46: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 47: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==47); -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy10, &t);} +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy470, &t);} break; case 48: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy427);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy51);} break; case 49: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy427);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy51);} break; case 50: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ -{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy345);} +{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy189);} break; case 51: /* ids ::= ID */ case 52: /* ids ::= STRING */ yytestcase(yyruleno==52); @@ -2764,7 +2974,7 @@ static YYACTIONTYPE yy_reduce( break; case 54: /* ifexists ::= */ case 56: /* ifnotexists ::= */ yytestcase(yyruleno==56); - case 180: /* distinct ::= */ yytestcase(yyruleno==180); + case 182: /* distinct ::= */ yytestcase(yyruleno==182); { yymsp[1].minor.yy0.n = 0;} break; case 55: /* ifnotexists ::= IF NOT EXISTS */ @@ -2774,17 +2984,17 @@ static YYACTIONTYPE yy_reduce( { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 58: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy427);} +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy51);} break; case 59: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 60: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==60); -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy10, &yymsp[-2].minor.yy0);} +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy470, &yymsp[-2].minor.yy0);} break; case 61: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ -{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy487, &yymsp[0].minor.yy0, 1);} +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy279, &yymsp[0].minor.yy0, 1);} break; case 62: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ -{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy487, &yymsp[0].minor.yy0, 2);} +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy279, &yymsp[0].minor.yy0, 2);} break; case 63: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} @@ -2815,38 +3025,38 @@ static YYACTIONTYPE yy_reduce( break; case 84: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy427.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy427.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy427.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy427.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy427.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy427.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy427.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy427.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy427.stat = yymsp[0].minor.yy0; + yylhsminor.yy51.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy51.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy51.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy51.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy51.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy51.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy51.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy51.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy51.stat = yymsp[0].minor.yy0; } - yymsp[-8].minor.yy427 = yylhsminor.yy427; + yymsp[-8].minor.yy51 = yylhsminor.yy51; break; case 85: /* intitemlist ::= intitemlist COMMA intitem */ case 154: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==154); -{ yylhsminor.yy345 = tVariantListAppend(yymsp[-2].minor.yy345, &yymsp[0].minor.yy2, -1); } - yymsp[-2].minor.yy345 = yylhsminor.yy345; +{ yylhsminor.yy189 = tVariantListAppend(yymsp[-2].minor.yy189, &yymsp[0].minor.yy162, -1); } + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; case 86: /* intitemlist ::= intitem */ case 155: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==155); -{ yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[0].minor.yy2, -1); } - yymsp[0].minor.yy345 = yylhsminor.yy345; +{ yylhsminor.yy189 = tVariantListAppend(NULL, &yymsp[0].minor.yy162, -1); } + yymsp[0].minor.yy189 = yylhsminor.yy189; break; case 87: /* intitem ::= INTEGER */ case 156: /* tagitem ::= INTEGER */ yytestcase(yyruleno==156); case 157: /* tagitem ::= FLOAT */ yytestcase(yyruleno==157); case 158: /* tagitem ::= STRING */ yytestcase(yyruleno==158); case 159: /* tagitem ::= BOOL */ yytestcase(yyruleno==159); -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true); } - yymsp[0].minor.yy2 = yylhsminor.yy2; +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy162, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy162 = yylhsminor.yy162; break; case 88: /* keep ::= KEEP intitemlist */ -{ yymsp[-1].minor.yy345 = yymsp[0].minor.yy345; } +{ yymsp[-1].minor.yy189 = yymsp[0].minor.yy189; } break; case 89: /* cache ::= CACHE INTEGER */ case 90: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==90); @@ -2866,681 +3076,744 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 104: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy10); yymsp[1].minor.yy10.dbType = TSDB_DB_TYPE_DEFAULT;} +{setDefaultCreateDbOption(&yymsp[1].minor.yy470); yymsp[1].minor.yy470.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 105: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 106: /* db_optr ::= db_optr replica */ case 123: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==123); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 107: /* db_optr ::= db_optr quorum */ case 124: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==124); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 108: /* db_optr ::= db_optr days */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 109: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 110: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 111: /* db_optr ::= db_optr blocks */ case 126: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==126); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 112: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 113: /* db_optr ::= db_optr wal */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 114: /* db_optr ::= db_optr fsync */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 115: /* db_optr ::= db_optr comp */ case 127: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==127); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 116: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 117: /* db_optr ::= db_optr keep */ case 125: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==125); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.keep = yymsp[0].minor.yy345; } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.keep = yymsp[0].minor.yy189; } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 118: /* db_optr ::= db_optr update */ case 128: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==128); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 119: /* db_optr ::= db_optr cachelast */ case 129: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==129); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 120: /* topic_optr ::= db_optr */ case 130: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==130); -{ yylhsminor.yy10 = yymsp[0].minor.yy10; yylhsminor.yy10.dbType = TSDB_DB_TYPE_TOPIC; } - yymsp[0].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[0].minor.yy470; yylhsminor.yy470.dbType = TSDB_DB_TYPE_TOPIC; } + yymsp[0].minor.yy470 = yylhsminor.yy470; break; case 121: /* topic_optr ::= topic_optr partitions */ case 131: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==131); -{ yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy10 = yylhsminor.yy10; +{ yylhsminor.yy470 = yymsp[-1].minor.yy470; yylhsminor.yy470.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy470 = yylhsminor.yy470; break; case 122: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy10); yymsp[1].minor.yy10.dbType = TSDB_DB_TYPE_DEFAULT;} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy470); yymsp[1].minor.yy470.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 132: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSetColumnType (&yylhsminor.yy487, &yymsp[0].minor.yy0); + tSetColumnType (&yylhsminor.yy279, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy487 = yylhsminor.yy487; + yymsp[0].minor.yy279 = yylhsminor.yy279; break; case 133: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy525 <= 0) { + if (yymsp[-1].minor.yy69 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSetColumnType(&yylhsminor.yy487, &yymsp[-3].minor.yy0); + tSetColumnType(&yylhsminor.yy279, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy525; // negative value of name length - tSetColumnType(&yylhsminor.yy487, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy69; // negative value of name length + tSetColumnType(&yylhsminor.yy279, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy487 = yylhsminor.yy487; + yymsp[-3].minor.yy279 = yylhsminor.yy279; break; case 134: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSetColumnType (&yylhsminor.yy487, &yymsp[-1].minor.yy0); + tSetColumnType (&yylhsminor.yy279, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy487 = yylhsminor.yy487; + yymsp[-1].minor.yy279 = yylhsminor.yy279; break; case 135: /* signed ::= INTEGER */ -{ yylhsminor.yy525 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy525 = yylhsminor.yy525; +{ yylhsminor.yy69 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy69 = yylhsminor.yy69; break; case 136: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy525 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy69 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 137: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy525 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy69 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; case 141: /* cmd ::= CREATE TABLE create_table_list */ -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy170;} +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy6;} break; case 142: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy72); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy208); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy170 = pCreateTable; + yylhsminor.yy6 = pCreateTable; } - yymsp[0].minor.yy170 = yylhsminor.yy170; + yymsp[0].minor.yy6 = yylhsminor.yy6; break; case 143: /* create_table_list ::= create_table_list create_from_stable */ { - taosArrayPush(yymsp[-1].minor.yy170->childTableInfo, &yymsp[0].minor.yy72); - yylhsminor.yy170 = yymsp[-1].minor.yy170; + taosArrayPush(yymsp[-1].minor.yy6->childTableInfo, &yymsp[0].minor.yy208); + yylhsminor.yy6 = yymsp[-1].minor.yy6; } - yymsp[-1].minor.yy170 = yylhsminor.yy170; + yymsp[-1].minor.yy6 = yylhsminor.yy6; break; case 144: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yylhsminor.yy170 = tSetCreateTableInfo(yymsp[-1].minor.yy345, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy6 = tSetCreateTableInfo(yymsp[-1].minor.yy189, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy6, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy170 = yylhsminor.yy170; + yymsp[-5].minor.yy6 = yylhsminor.yy6; break; case 145: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yylhsminor.yy170 = tSetCreateTableInfo(yymsp[-5].minor.yy345, yymsp[-1].minor.yy345, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy6 = tSetCreateTableInfo(yymsp[-5].minor.yy189, yymsp[-1].minor.yy189, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy6, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy170 = yylhsminor.yy170; + yymsp[-9].minor.yy6 = yylhsminor.yy6; break; case 146: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy72 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy345, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy208 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy189, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy72 = yylhsminor.yy72; + yymsp[-9].minor.yy208 = yylhsminor.yy208; break; case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; - yylhsminor.yy72 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy345, yymsp[-1].minor.yy345, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); + yylhsminor.yy208 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy189, yymsp[-1].minor.yy189, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } - yymsp[-12].minor.yy72 = yylhsminor.yy72; + yymsp[-12].minor.yy208 = yylhsminor.yy208; break; case 148: /* tagNamelist ::= tagNamelist COMMA ids */ -{taosArrayPush(yymsp[-2].minor.yy345, &yymsp[0].minor.yy0); yylhsminor.yy345 = yymsp[-2].minor.yy345; } - yymsp[-2].minor.yy345 = yylhsminor.yy345; +{taosArrayPush(yymsp[-2].minor.yy189, &yymsp[0].minor.yy0); yylhsminor.yy189 = yymsp[-2].minor.yy189; } + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; case 149: /* tagNamelist ::= ids */ -{yylhsminor.yy345 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy345, &yymsp[0].minor.yy0);} - yymsp[0].minor.yy345 = yylhsminor.yy345; +{yylhsminor.yy189 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy189, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy189 = yylhsminor.yy189; break; case 150: /* create_table_args ::= ifnotexists ids cpxName AS select */ { - yylhsminor.yy170 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy68, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy6 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy16, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy6, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy170 = yylhsminor.yy170; + yymsp[-4].minor.yy6 = yylhsminor.yy6; break; case 151: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy345, &yymsp[0].minor.yy487); yylhsminor.yy345 = yymsp[-2].minor.yy345; } - yymsp[-2].minor.yy345 = yylhsminor.yy345; +{taosArrayPush(yymsp[-2].minor.yy189, &yymsp[0].minor.yy279); yylhsminor.yy189 = yymsp[-2].minor.yy189; } + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; case 152: /* columnlist ::= column */ -{yylhsminor.yy345 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy345, &yymsp[0].minor.yy487);} - yymsp[0].minor.yy345 = yylhsminor.yy345; +{yylhsminor.yy189 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy189, &yymsp[0].minor.yy279);} + yymsp[0].minor.yy189 = yylhsminor.yy189; break; case 153: /* column ::= ids typename */ { - tSetColumnInfo(&yylhsminor.yy487, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy487); + tSetColumnInfo(&yylhsminor.yy279, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy279); } - yymsp[-1].minor.yy487 = yylhsminor.yy487; + yymsp[-1].minor.yy279 = yylhsminor.yy279; break; case 160: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true); } - yymsp[0].minor.yy2 = yylhsminor.yy2; +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy162, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy162 = yylhsminor.yy162; break; case 161: /* tagitem ::= NOW */ -{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true);} - yymsp[0].minor.yy2 = yylhsminor.yy2; +{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreateExt(&yylhsminor.yy162, &yymsp[0].minor.yy0, TK_NOW, true);} + yymsp[0].minor.yy162 = yylhsminor.yy162; + break; + case 162: /* tagitem ::= NOW PLUS VARIABLE */ +{ + yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; + tVariantCreateExt(&yymsp[-2].minor.yy162, &yymsp[0].minor.yy0, TK_PLUS, true); +} + break; + case 163: /* tagitem ::= NOW MINUS VARIABLE */ +{ + yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; + tVariantCreateExt(&yymsp[-2].minor.yy162, &yymsp[0].minor.yy0, TK_MINUS, true); +} break; - case 162: /* tagitem ::= MINUS INTEGER */ - case 163: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==163); - case 164: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==164); - case 165: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==165); + case 164: /* tagitem ::= MINUS INTEGER */ + case 165: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==165); + case 166: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==166); + case 167: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==167); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0, true); + tVariantCreate(&yylhsminor.yy162, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy2 = yylhsminor.yy2; + yymsp[-1].minor.yy162 = yylhsminor.yy162; break; - case 166: /* select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + case 168: /* select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ { - yylhsminor.yy68 = tSetQuerySqlNode(&yymsp[-14].minor.yy0, yymsp[-13].minor.yy345, yymsp[-12].minor.yy484, yymsp[-11].minor.yy418, yymsp[-4].minor.yy345, yymsp[-2].minor.yy345, &yymsp[-9].minor.yy280, &yymsp[-7].minor.yy295, &yymsp[-6].minor.yy432, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy345, &yymsp[0].minor.yy114, &yymsp[-1].minor.yy114, yymsp[-3].minor.yy418, &yymsp[-10].minor.yy144); + yylhsminor.yy16 = tSetQuerySqlNode(&yymsp[-14].minor.yy0, yymsp[-13].minor.yy189, yymsp[-12].minor.yy36, yymsp[-11].minor.yy18, yymsp[-4].minor.yy189, yymsp[-2].minor.yy189, &yymsp[-9].minor.yy32, &yymsp[-7].minor.yy155, &yymsp[-6].minor.yy336, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy189, &yymsp[0].minor.yy38, &yymsp[-1].minor.yy38, yymsp[-3].minor.yy18, &yymsp[-10].minor.yy124); } - yymsp[-14].minor.yy68 = yylhsminor.yy68; + yymsp[-14].minor.yy16 = yylhsminor.yy16; break; - case 167: /* select ::= LP select RP */ -{yymsp[-2].minor.yy68 = yymsp[-1].minor.yy68;} + case 169: /* select ::= LP select RP */ +{yymsp[-2].minor.yy16 = yymsp[-1].minor.yy16;} break; - case 168: /* union ::= select */ -{ yylhsminor.yy345 = setSubclause(NULL, yymsp[0].minor.yy68); } - yymsp[0].minor.yy345 = yylhsminor.yy345; + case 170: /* union ::= select */ +{ yylhsminor.yy189 = setSubclause(NULL, yymsp[0].minor.yy16); } + yymsp[0].minor.yy189 = yylhsminor.yy189; break; - case 169: /* union ::= union UNION ALL select */ -{ yylhsminor.yy345 = appendSelectClause(yymsp[-3].minor.yy345, yymsp[0].minor.yy68); } - yymsp[-3].minor.yy345 = yylhsminor.yy345; + case 171: /* union ::= union UNION ALL select */ +{ yylhsminor.yy189 = appendSelectClause(yymsp[-3].minor.yy189, yymsp[0].minor.yy16); } + yymsp[-3].minor.yy189 = yylhsminor.yy189; break; - case 170: /* cmd ::= union */ -{ setSqlInfo(pInfo, yymsp[0].minor.yy345, NULL, TSDB_SQL_SELECT); } + case 172: /* cmd ::= union */ +{ setSqlInfo(pInfo, yymsp[0].minor.yy189, NULL, TSDB_SQL_SELECT); } break; - case 171: /* select ::= SELECT selcollist */ + case 173: /* select ::= SELECT selcollist */ { - yylhsminor.yy68 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy345, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy16 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy189, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy68 = yylhsminor.yy68; + yymsp[-1].minor.yy16 = yylhsminor.yy16; break; - case 172: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy345 = yymsp[-1].minor.yy345;} - yymsp[-1].minor.yy345 = yylhsminor.yy345; + case 174: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy189 = yymsp[-1].minor.yy189;} + yymsp[-1].minor.yy189 = yylhsminor.yy189; break; - case 173: /* sclp ::= */ - case 214: /* orderby_opt ::= */ yytestcase(yyruleno==214); -{yymsp[1].minor.yy345 = 0;} + case 175: /* sclp ::= */ + case 216: /* orderby_opt ::= */ yytestcase(yyruleno==216); +{yymsp[1].minor.yy189 = 0;} break; - case 174: /* selcollist ::= sclp distinct expr as */ + case 176: /* selcollist ::= sclp distinct expr as */ { - yylhsminor.yy345 = tSqlExprListAppend(yymsp[-3].minor.yy345, yymsp[-1].minor.yy418, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy189 = tSqlExprListAppend(yymsp[-3].minor.yy189, yymsp[-1].minor.yy18, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-3].minor.yy345 = yylhsminor.yy345; + yymsp[-3].minor.yy189 = yylhsminor.yy189; break; - case 175: /* selcollist ::= sclp STAR */ + case 177: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(pInfo, NULL, TK_ALL); - yylhsminor.yy345 = tSqlExprListAppend(yymsp[-1].minor.yy345, pNode, 0, 0); + yylhsminor.yy189 = tSqlExprListAppend(yymsp[-1].minor.yy189, pNode, 0, 0); } - yymsp[-1].minor.yy345 = yylhsminor.yy345; + yymsp[-1].minor.yy189 = yylhsminor.yy189; break; - case 176: /* as ::= AS ids */ + case 178: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 177: /* as ::= ids */ + case 179: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 178: /* as ::= */ + case 180: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 179: /* distinct ::= DISTINCT */ + case 181: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 181: /* from ::= FROM tablelist */ - case 182: /* from ::= FROM sub */ yytestcase(yyruleno==182); -{yymsp[-1].minor.yy484 = yymsp[0].minor.yy484;} + case 183: /* from ::= FROM tablelist */ + case 184: /* from ::= FROM sub */ yytestcase(yyruleno==184); +{yymsp[-1].minor.yy36 = yymsp[0].minor.yy36;} break; - case 183: /* sub ::= LP union RP */ -{yymsp[-2].minor.yy484 = addSubqueryElem(NULL, yymsp[-1].minor.yy345, NULL);} + case 185: /* sub ::= LP union RP */ +{yymsp[-2].minor.yy36 = addSubqueryElem(NULL, yymsp[-1].minor.yy189, NULL);} break; - case 184: /* sub ::= LP union RP ids */ -{yymsp[-3].minor.yy484 = addSubqueryElem(NULL, yymsp[-2].minor.yy345, &yymsp[0].minor.yy0);} + case 186: /* sub ::= LP union RP ids */ +{yymsp[-3].minor.yy36 = addSubqueryElem(NULL, yymsp[-2].minor.yy189, &yymsp[0].minor.yy0);} break; - case 185: /* sub ::= sub COMMA LP union RP ids */ -{yylhsminor.yy484 = addSubqueryElem(yymsp[-5].minor.yy484, yymsp[-2].minor.yy345, &yymsp[0].minor.yy0);} - yymsp[-5].minor.yy484 = yylhsminor.yy484; + case 187: /* sub ::= sub COMMA LP union RP ids */ +{yylhsminor.yy36 = addSubqueryElem(yymsp[-5].minor.yy36, yymsp[-2].minor.yy189, &yymsp[0].minor.yy0);} + yymsp[-5].minor.yy36 = yylhsminor.yy36; break; - case 186: /* tablelist ::= ids cpxName */ + case 188: /* tablelist ::= ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy484 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy36 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } - yymsp[-1].minor.yy484 = yylhsminor.yy484; + yymsp[-1].minor.yy36 = yylhsminor.yy36; break; - case 187: /* tablelist ::= ids cpxName ids */ + case 189: /* tablelist ::= ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy484 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy36 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy484 = yylhsminor.yy484; + yymsp[-2].minor.yy36 = yylhsminor.yy36; break; - case 188: /* tablelist ::= tablelist COMMA ids cpxName */ + case 190: /* tablelist ::= tablelist COMMA ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy484 = setTableNameList(yymsp[-3].minor.yy484, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy36 = setTableNameList(yymsp[-3].minor.yy36, &yymsp[-1].minor.yy0, NULL); } - yymsp[-3].minor.yy484 = yylhsminor.yy484; + yymsp[-3].minor.yy36 = yylhsminor.yy36; break; - case 189: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 191: /* tablelist ::= tablelist COMMA ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy484 = setTableNameList(yymsp[-4].minor.yy484, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy36 = setTableNameList(yymsp[-4].minor.yy36, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-4].minor.yy484 = yylhsminor.yy484; + yymsp[-4].minor.yy36 = yylhsminor.yy36; break; - case 190: /* tmvar ::= VARIABLE */ + case 192: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 191: /* timestamp ::= INTEGER */ -{ yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 193: /* timestamp ::= INTEGER */ +{ yylhsminor.yy18 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 192: /* timestamp ::= MINUS INTEGER */ - case 193: /* timestamp ::= PLUS INTEGER */ yytestcase(yyruleno==193); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy418 = yylhsminor.yy418; + case 194: /* timestamp ::= MINUS INTEGER */ + case 195: /* timestamp ::= PLUS INTEGER */ yytestcase(yyruleno==195); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy18 = tSqlExprCreateTimestamp(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy18 = yylhsminor.yy18; break; - case 194: /* timestamp ::= STRING */ -{ yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 196: /* timestamp ::= STRING */ +{ yylhsminor.yy18 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 195: /* timestamp ::= NOW */ -{ yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 197: /* timestamp ::= NOW */ +{ yylhsminor.yy18 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 196: /* timestamp ::= NOW PLUS VARIABLE */ -{yymsp[-2].minor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_PLUS); } + case 198: /* timestamp ::= NOW PLUS VARIABLE */ +{yymsp[-2].minor.yy18 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_PLUS); } break; - case 197: /* timestamp ::= NOW MINUS VARIABLE */ -{yymsp[-2].minor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_MINUS); } + case 199: /* timestamp ::= NOW MINUS VARIABLE */ +{yymsp[-2].minor.yy18 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_MINUS); } break; - case 198: /* range_option ::= */ -{yymsp[1].minor.yy144.start = 0; yymsp[1].minor.yy144.end = 0;} + case 200: /* range_option ::= */ +{yymsp[1].minor.yy124.start = 0; yymsp[1].minor.yy124.end = 0;} break; - case 199: /* range_option ::= RANGE LP timestamp COMMA timestamp RP */ -{yymsp[-5].minor.yy144.start = yymsp[-3].minor.yy418; yymsp[-5].minor.yy144.end = yymsp[-1].minor.yy418;} + case 201: /* range_option ::= RANGE LP timestamp COMMA timestamp RP */ +{yymsp[-5].minor.yy124.start = yymsp[-3].minor.yy18; yymsp[-5].minor.yy124.end = yymsp[-1].minor.yy18;} break; - case 200: /* interval_option ::= intervalKey LP tmvar RP */ -{yylhsminor.yy280.interval = yymsp[-1].minor.yy0; yylhsminor.yy280.offset.n = 0; yylhsminor.yy280.token = yymsp[-3].minor.yy40;} - yymsp[-3].minor.yy280 = yylhsminor.yy280; + case 202: /* interval_option ::= intervalKey LP tmvar RP */ +{yylhsminor.yy32.interval = yymsp[-1].minor.yy0; yylhsminor.yy32.offset.n = 0; yylhsminor.yy32.token = yymsp[-3].minor.yy516;} + yymsp[-3].minor.yy32 = yylhsminor.yy32; break; - case 201: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ -{yylhsminor.yy280.interval = yymsp[-3].minor.yy0; yylhsminor.yy280.offset = yymsp[-1].minor.yy0; yylhsminor.yy280.token = yymsp[-5].minor.yy40;} - yymsp[-5].minor.yy280 = yylhsminor.yy280; + case 203: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ +{yylhsminor.yy32.interval = yymsp[-3].minor.yy0; yylhsminor.yy32.offset = yymsp[-1].minor.yy0; yylhsminor.yy32.token = yymsp[-5].minor.yy516;} + yymsp[-5].minor.yy32 = yylhsminor.yy32; break; - case 202: /* interval_option ::= */ -{memset(&yymsp[1].minor.yy280, 0, sizeof(yymsp[1].minor.yy280));} + case 204: /* interval_option ::= */ +{memset(&yymsp[1].minor.yy32, 0, sizeof(yymsp[1].minor.yy32));} break; - case 203: /* intervalKey ::= INTERVAL */ -{yymsp[0].minor.yy40 = TK_INTERVAL;} + case 205: /* intervalKey ::= INTERVAL */ +{yymsp[0].minor.yy516 = TK_INTERVAL;} break; - case 204: /* intervalKey ::= EVERY */ -{yymsp[0].minor.yy40 = TK_EVERY; } + case 206: /* intervalKey ::= EVERY */ +{yymsp[0].minor.yy516 = TK_EVERY; } break; - case 205: /* session_option ::= */ -{yymsp[1].minor.yy295.col.n = 0; yymsp[1].minor.yy295.gap.n = 0;} + case 207: /* session_option ::= */ +{yymsp[1].minor.yy155.col.n = 0; yymsp[1].minor.yy155.gap.n = 0;} break; - case 206: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + case 208: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - yymsp[-6].minor.yy295.col = yymsp[-4].minor.yy0; - yymsp[-6].minor.yy295.gap = yymsp[-1].minor.yy0; + yymsp[-6].minor.yy155.col = yymsp[-4].minor.yy0; + yymsp[-6].minor.yy155.gap = yymsp[-1].minor.yy0; } break; - case 207: /* windowstate_option ::= */ -{ yymsp[1].minor.yy432.col.n = 0; yymsp[1].minor.yy432.col.z = NULL;} + case 209: /* windowstate_option ::= */ +{ yymsp[1].minor.yy336.col.n = 0; yymsp[1].minor.yy336.col.z = NULL;} break; - case 208: /* windowstate_option ::= STATE_WINDOW LP ids RP */ -{ yymsp[-3].minor.yy432.col = yymsp[-1].minor.yy0; } + case 210: /* windowstate_option ::= STATE_WINDOW LP ids RP */ +{ yymsp[-3].minor.yy336.col = yymsp[-1].minor.yy0; } break; - case 209: /* fill_opt ::= */ -{ yymsp[1].minor.yy345 = 0; } + case 211: /* fill_opt ::= */ +{ yymsp[1].minor.yy189 = 0; } break; - case 210: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 212: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); - tVariantCreate(&A, &yymsp[-3].minor.yy0, true); + tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy345, &A, -1, 0); - yymsp[-5].minor.yy345 = yymsp[-1].minor.yy345; + tVariantListInsert(yymsp[-1].minor.yy189, &A, -1, 0); + yymsp[-5].minor.yy189 = yymsp[-1].minor.yy189; } break; - case 211: /* fill_opt ::= FILL LP ID RP */ + case 213: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy345 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); + yymsp[-3].minor.yy189 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); } break; - case 212: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 214: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 213: /* sliding_opt ::= */ + case 215: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 215: /* orderby_opt ::= ORDER BY sortlist */ -{yymsp[-2].minor.yy345 = yymsp[0].minor.yy345;} + case 217: /* orderby_opt ::= ORDER BY sortlist */ +{yymsp[-2].minor.yy189 = yymsp[0].minor.yy189;} break; - case 216: /* sortlist ::= sortlist COMMA item sortorder */ + case 218: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy345 = tVariantListAppend(yymsp[-3].minor.yy345, &yymsp[-1].minor.yy2, yymsp[0].minor.yy281); + yylhsminor.yy189 = commonItemAppend(yymsp[-3].minor.yy189, &yymsp[-1].minor.yy162, NULL, false, yymsp[0].minor.yy420); } - yymsp[-3].minor.yy345 = yylhsminor.yy345; + yymsp[-3].minor.yy189 = yylhsminor.yy189; break; - case 217: /* sortlist ::= item sortorder */ + case 219: /* sortlist ::= sortlist COMMA arrow sortorder */ { - yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[-1].minor.yy2, yymsp[0].minor.yy281); + yylhsminor.yy189 = commonItemAppend(yymsp[-3].minor.yy189, NULL, yymsp[-1].minor.yy18, true, yymsp[0].minor.yy420); } - yymsp[-1].minor.yy345 = yylhsminor.yy345; + yymsp[-3].minor.yy189 = yylhsminor.yy189; break; - case 218: /* item ::= ids cpxName */ + case 220: /* sortlist ::= item sortorder */ { - toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - - tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0, true); + yylhsminor.yy189 = commonItemAppend(NULL, &yymsp[-1].minor.yy162, NULL, false, yymsp[0].minor.yy420); +} + yymsp[-1].minor.yy189 = yylhsminor.yy189; + break; + case 221: /* sortlist ::= arrow sortorder */ +{ + yylhsminor.yy189 = commonItemAppend(NULL, NULL, yymsp[-1].minor.yy18, true, yymsp[0].minor.yy420); } - yymsp[-1].minor.yy2 = yylhsminor.yy2; + yymsp[-1].minor.yy189 = yylhsminor.yy189; + break; + case 222: /* item ::= ID */ +{ + toTSDBType(yymsp[0].minor.yy0.type); + tVariantCreate(&yylhsminor.yy162, &yymsp[0].minor.yy0); +} + yymsp[0].minor.yy162 = yylhsminor.yy162; + break; + case 223: /* item ::= ID DOT ID */ +{ + toTSDBType(yymsp[-2].minor.yy0.type); + yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); + tVariantCreate(&yylhsminor.yy162, &yymsp[-2].minor.yy0); +} + yymsp[-2].minor.yy162 = yylhsminor.yy162; + break; + case 224: /* sortorder ::= ASC */ +{ yymsp[0].minor.yy420 = TSDB_ORDER_ASC; } + break; + case 225: /* sortorder ::= DESC */ +{ yymsp[0].minor.yy420 = TSDB_ORDER_DESC;} break; - case 219: /* sortorder ::= ASC */ -{ yymsp[0].minor.yy281 = TSDB_ORDER_ASC; } + case 226: /* sortorder ::= */ +{ yymsp[1].minor.yy420 = TSDB_ORDER_ASC; } break; - case 220: /* sortorder ::= DESC */ -{ yymsp[0].minor.yy281 = TSDB_ORDER_DESC;} + case 227: /* groupby_opt ::= */ +{ yymsp[1].minor.yy189 = 0;} break; - case 221: /* sortorder ::= */ -{ yymsp[1].minor.yy281 = TSDB_ORDER_ASC; } + case 228: /* groupby_opt ::= GROUP BY grouplist */ +{ yymsp[-2].minor.yy189 = yymsp[0].minor.yy189;} break; - case 222: /* groupby_opt ::= */ -{ yymsp[1].minor.yy345 = 0;} + case 229: /* grouplist ::= grouplist COMMA item */ +{ + yylhsminor.yy189 = commonItemAppend(yymsp[-2].minor.yy189, &yymsp[0].minor.yy162, NULL, false, -1); +} + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; - case 223: /* groupby_opt ::= GROUP BY grouplist */ -{ yymsp[-2].minor.yy345 = yymsp[0].minor.yy345;} + case 230: /* grouplist ::= grouplist COMMA arrow */ +{ + yylhsminor.yy189 = commonItemAppend(yymsp[-2].minor.yy189, NULL, yymsp[0].minor.yy18, true, -1); +} + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; - case 224: /* grouplist ::= grouplist COMMA item */ + case 231: /* grouplist ::= item */ { - yylhsminor.yy345 = tVariantListAppend(yymsp[-2].minor.yy345, &yymsp[0].minor.yy2, -1); + yylhsminor.yy189 = commonItemAppend(NULL, &yymsp[0].minor.yy162, NULL, false, -1); } - yymsp[-2].minor.yy345 = yylhsminor.yy345; + yymsp[0].minor.yy189 = yylhsminor.yy189; break; - case 225: /* grouplist ::= item */ + case 232: /* grouplist ::= arrow */ { - yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[0].minor.yy2, -1); + yylhsminor.yy189 = commonItemAppend(NULL, NULL, yymsp[0].minor.yy18, true, -1); } - yymsp[0].minor.yy345 = yylhsminor.yy345; + yymsp[0].minor.yy189 = yylhsminor.yy189; + break; + case 233: /* having_opt ::= */ + case 243: /* where_opt ::= */ yytestcase(yyruleno==243); + case 293: /* expritem ::= */ yytestcase(yyruleno==293); +{yymsp[1].minor.yy18 = 0;} + break; + case 234: /* having_opt ::= HAVING expr */ + case 244: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==244); +{yymsp[-1].minor.yy18 = yymsp[0].minor.yy18;} + break; + case 235: /* limit_opt ::= */ + case 239: /* slimit_opt ::= */ yytestcase(yyruleno==239); +{yymsp[1].minor.yy38.limit = -1; yymsp[1].minor.yy38.offset = 0;} + break; + case 236: /* limit_opt ::= LIMIT signed */ + case 240: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==240); +{yymsp[-1].minor.yy38.limit = yymsp[0].minor.yy69; yymsp[-1].minor.yy38.offset = 0;} + break; + case 237: /* limit_opt ::= LIMIT signed OFFSET signed */ +{ yymsp[-3].minor.yy38.limit = yymsp[-2].minor.yy69; yymsp[-3].minor.yy38.offset = yymsp[0].minor.yy69;} break; - case 226: /* having_opt ::= */ - case 236: /* where_opt ::= */ yytestcase(yyruleno==236); - case 280: /* expritem ::= */ yytestcase(yyruleno==280); -{yymsp[1].minor.yy418 = 0;} + case 238: /* limit_opt ::= LIMIT signed COMMA signed */ +{ yymsp[-3].minor.yy38.limit = yymsp[0].minor.yy69; yymsp[-3].minor.yy38.offset = yymsp[-2].minor.yy69;} break; - case 227: /* having_opt ::= HAVING expr */ - case 237: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==237); -{yymsp[-1].minor.yy418 = yymsp[0].minor.yy418;} + case 241: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy38.limit = yymsp[-2].minor.yy69; yymsp[-3].minor.yy38.offset = yymsp[0].minor.yy69;} break; - case 228: /* limit_opt ::= */ - case 232: /* slimit_opt ::= */ yytestcase(yyruleno==232); -{yymsp[1].minor.yy114.limit = -1; yymsp[1].minor.yy114.offset = 0;} + case 242: /* slimit_opt ::= SLIMIT signed COMMA signed */ +{yymsp[-3].minor.yy38.limit = yymsp[0].minor.yy69; yymsp[-3].minor.yy38.offset = yymsp[-2].minor.yy69;} break; - case 229: /* limit_opt ::= LIMIT signed */ - case 233: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==233); -{yymsp[-1].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-1].minor.yy114.offset = 0;} + case 245: /* expr ::= LP expr RP */ +{yylhsminor.yy18 = yymsp[-1].minor.yy18; yylhsminor.yy18->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy18->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 230: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy114.limit = yymsp[-2].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[0].minor.yy525;} + case 246: /* expr ::= ID */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 231: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[-2].minor.yy525;} + case 247: /* expr ::= ID DOT ID */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 234: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy114.limit = yymsp[-2].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[0].minor.yy525;} + case 248: /* expr ::= ID DOT STAR */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 235: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[-2].minor.yy525;} + case 249: /* expr ::= INTEGER */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 238: /* expr ::= LP expr RP */ -{yylhsminor.yy418 = yymsp[-1].minor.yy418; yylhsminor.yy418->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy418->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 250: /* expr ::= MINUS INTEGER */ + case 251: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==251); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy18 = yylhsminor.yy18; break; - case 239: /* expr ::= ID */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 252: /* expr ::= FLOAT */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 240: /* expr ::= ID DOT ID */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 253: /* expr ::= MINUS FLOAT */ + case 254: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==254); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy18 = yylhsminor.yy18; break; - case 241: /* expr ::= ID DOT STAR */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 255: /* expr ::= STRING */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 242: /* expr ::= INTEGER */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 256: /* expr ::= NOW */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 243: /* expr ::= MINUS INTEGER */ - case 244: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==244); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy418 = yylhsminor.yy418; + case 257: /* expr ::= VARIABLE */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 245: /* expr ::= FLOAT */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 258: /* expr ::= PLUS VARIABLE */ + case 259: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==259); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_VARIABLE);} + yymsp[-1].minor.yy18 = yylhsminor.yy18; break; - case 246: /* expr ::= MINUS FLOAT */ - case 247: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==247); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy418 = yylhsminor.yy418; + case 260: /* expr ::= BOOL */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 248: /* expr ::= STRING */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 261: /* expr ::= NULL */ +{ yylhsminor.yy18 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NULL);} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 249: /* expr ::= NOW */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 262: /* expr ::= ID LP exprlist RP */ +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy18 = tSqlExprCreateFunction(yymsp[-1].minor.yy189, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy18 = yylhsminor.yy18; break; - case 250: /* expr ::= VARIABLE */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 263: /* expr ::= ID LP STAR RP */ +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy18 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy18 = yylhsminor.yy18; break; - case 251: /* expr ::= PLUS VARIABLE */ - case 252: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==252); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_VARIABLE);} - yymsp[-1].minor.yy418 = yylhsminor.yy418; + case 264: /* expr ::= ID LP expr AS typename RP */ +{ tStrTokenAppend(pInfo->funcs, &yymsp[-5].minor.yy0); yylhsminor.yy18 = tSqlExprCreateFuncWithParams(pInfo, yymsp[-3].minor.yy18, &yymsp[-1].minor.yy279, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, yymsp[-5].minor.yy0.type); } + yymsp[-5].minor.yy18 = yylhsminor.yy18; break; - case 253: /* expr ::= BOOL */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 265: /* expr ::= expr IS NULL */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, NULL, TK_ISNULL);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 254: /* expr ::= NULL */ -{ yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NULL);} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 266: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-3].minor.yy18, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy18 = yylhsminor.yy18; break; - case 255: /* expr ::= ID LP exprlist RP */ -{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy418 = tSqlExprCreateFunction(yymsp[-1].minor.yy345, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy418 = yylhsminor.yy418; + case 267: /* expr ::= expr LT expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_LT);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 256: /* expr ::= ID LP STAR RP */ -{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy418 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy418 = yylhsminor.yy418; + case 268: /* expr ::= expr GT expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_GT);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 257: /* expr ::= expr IS NULL */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, NULL, TK_ISNULL);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 269: /* expr ::= expr LE expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_LE);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 258: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-3].minor.yy418, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy418 = yylhsminor.yy418; + case 270: /* expr ::= expr GE expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_GE);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 259: /* expr ::= expr LT expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LT);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 271: /* expr ::= expr NE expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_NE);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 260: /* expr ::= expr GT expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_GT);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 272: /* expr ::= expr EQ expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_EQ);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 261: /* expr ::= expr LE expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LE);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 273: /* expr ::= expr BETWEEN expr AND expr */ +{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy18); yylhsminor.yy18 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy18, yymsp[-2].minor.yy18, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy18, TK_LE), TK_AND);} + yymsp[-4].minor.yy18 = yylhsminor.yy18; break; - case 262: /* expr ::= expr GE expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_GE);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 274: /* expr ::= expr AND expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_AND);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 263: /* expr ::= expr NE expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_NE);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 275: /* expr ::= expr OR expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_OR); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 264: /* expr ::= expr EQ expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_EQ);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 276: /* expr ::= expr PLUS expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_PLUS); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 265: /* expr ::= expr BETWEEN expr AND expr */ -{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy418); yylhsminor.yy418 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy418, yymsp[-2].minor.yy418, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy418, TK_LE), TK_AND);} - yymsp[-4].minor.yy418 = yylhsminor.yy418; + case 277: /* expr ::= expr MINUS expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_MINUS); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 266: /* expr ::= expr AND expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_AND);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 278: /* expr ::= expr STAR expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_STAR); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 267: /* expr ::= expr OR expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_OR); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 279: /* expr ::= expr SLASH expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_DIVIDE);} + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 268: /* expr ::= expr PLUS expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_PLUS); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 280: /* expr ::= expr REM expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_REM); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 269: /* expr ::= expr MINUS expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_MINUS); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 281: /* expr ::= expr LIKE expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_LIKE); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 270: /* expr ::= expr STAR expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_STAR); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 282: /* expr ::= expr MATCH expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_MATCH); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 271: /* expr ::= expr SLASH expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_DIVIDE);} - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 283: /* expr ::= expr NMATCH expr */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-2].minor.yy18, yymsp[0].minor.yy18, TK_NMATCH); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 272: /* expr ::= expr REM expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_REM); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 284: /* expr ::= ID CONTAINS STRING */ +{ tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING); yylhsminor.yy18 = tSqlExprCreate(S, M, TK_CONTAINS); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 273: /* expr ::= expr LIKE expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LIKE); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 285: /* expr ::= ID DOT ID CONTAINS STRING */ +{ yymsp[-4].minor.yy0.n += (1+yymsp[-2].minor.yy0.n); tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &yymsp[-4].minor.yy0, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING); yylhsminor.yy18 = tSqlExprCreate(S, M, TK_CONTAINS); } + yymsp[-4].minor.yy18 = yylhsminor.yy18; break; - case 274: /* expr ::= expr MATCH expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_MATCH); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 286: /* arrow ::= ID ARROW STRING */ +{tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING); yylhsminor.yy18 = tSqlExprCreate(S, M, TK_ARROW); } + yymsp[-2].minor.yy18 = yylhsminor.yy18; break; - case 275: /* expr ::= expr NMATCH expr */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_NMATCH); } - yymsp[-2].minor.yy418 = yylhsminor.yy418; + case 287: /* arrow ::= ID DOT ID ARROW STRING */ +{yymsp[-4].minor.yy0.n += (1+yymsp[-2].minor.yy0.n); tSqlExpr* S = tSqlExprCreateIdValue(pInfo, &yymsp[-4].minor.yy0, TK_ID); tSqlExpr* M = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING); yylhsminor.yy18 = tSqlExprCreate(S, M, TK_ARROW); } + yymsp[-4].minor.yy18 = yylhsminor.yy18; break; - case 276: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy418 = tSqlExprCreate(yymsp[-4].minor.yy418, (tSqlExpr*)yymsp[-1].minor.yy345, TK_IN); } - yymsp[-4].minor.yy418 = yylhsminor.yy418; + case 288: /* expr ::= arrow */ + case 292: /* expritem ::= expr */ yytestcase(yyruleno==292); +{yylhsminor.yy18 = yymsp[0].minor.yy18;} + yymsp[0].minor.yy18 = yylhsminor.yy18; break; - case 277: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy345 = tSqlExprListAppend(yymsp[-2].minor.yy345,yymsp[0].minor.yy418,0, 0);} - yymsp[-2].minor.yy345 = yylhsminor.yy345; + case 289: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy18 = tSqlExprCreate(yymsp[-4].minor.yy18, (tSqlExpr*)yymsp[-1].minor.yy189, TK_IN); } + yymsp[-4].minor.yy18 = yylhsminor.yy18; break; - case 278: /* exprlist ::= expritem */ -{yylhsminor.yy345 = tSqlExprListAppend(0,yymsp[0].minor.yy418,0, 0);} - yymsp[0].minor.yy345 = yylhsminor.yy345; + case 290: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy189 = tSqlExprListAppend(yymsp[-2].minor.yy189,yymsp[0].minor.yy18,0, 0);} + yymsp[-2].minor.yy189 = yylhsminor.yy189; break; - case 279: /* expritem ::= expr */ -{yylhsminor.yy418 = yymsp[0].minor.yy418;} - yymsp[0].minor.yy418 = yylhsminor.yy418; + case 291: /* exprlist ::= expritem */ +{yylhsminor.yy189 = tSqlExprListAppend(0,yymsp[0].minor.yy18,0, 0);} + yymsp[0].minor.yy189 = yylhsminor.yy189; break; - case 281: /* cmd ::= RESET QUERY CACHE */ + case 294: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 282: /* cmd ::= SYNCDB ids REPLICA */ + case 295: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} break; - case 283: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 296: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 284: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 297: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3551,21 +3824,21 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 285: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + case 298: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 286: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 299: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 287: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 300: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3576,7 +3849,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 288: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 301: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3590,33 +3863,33 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 289: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 302: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); - A = tVariantListAppend(A, &yymsp[0].minor.yy2, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy162, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 290: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + case 303: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 291: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 304: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 292: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 305: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3627,21 +3900,21 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 293: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + case 306: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 294: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 307: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 295: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 308: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3652,7 +3925,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 296: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 309: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3666,32 +3939,32 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 297: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + case 310: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); - A = tVariantListAppend(A, &yymsp[0].minor.yy2, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy162, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 298: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + case 311: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy189, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 299: /* cmd ::= KILL CONNECTION INTEGER */ + case 312: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 300: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 313: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 301: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 314: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: @@ -3862,12 +4135,56 @@ void Parse( } #endif - do{ + while(1){ /* Exit by "break" */ + assert( yypParser->yytos>=yypParser->yystack ); assert( yyact==yypParser->yytos->stateno ); yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, - yyminor ParseCTX_PARAM); + unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); +#ifndef NDEBUG + if( yyTraceFILE ){ + int yysize = yyRuleInfoNRhs[yyruleno]; + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == + (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + break; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + break; + } + } +#endif + } + yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY @@ -3980,7 +4297,7 @@ void Parse( break; #endif } - }while( yypParser->yytos>yypParser->yystack ); + } #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 25495182498f7c1a82f9f9459290e44f082f5eb2..740a1e2b7d2784347b19be328319fc19f417f25d 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -488,6 +488,12 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) { msgLen = (int32_t)htonl((uint32_t)rpcHead.msgLen); int32_t size = msgLen + tsRpcOverhead; + // TODO: reason not found yet, workaround to avoid first + if (msgLen <= 0 || size < 0) { + tError("%s %p invalid size for malloc, msgLen:%d, size:%d", pThreadObj->label, pFdObj->thandle, msgLen, size); + return -1; + } + buffer = malloc(size); if (NULL == buffer) { tError("%s %p TCP malloc(size:%d) fail", pThreadObj->label, pFdObj->thandle, msgLen); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 68bafb09ca82a7a55b8eb3cd94c24138f5ef3a6c..a8b5a8f403cb9ca0c897d77db280e5a331c1f32e 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1502,7 +1502,10 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle } } - int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen); + SOCKET peerFd = pPeer->peerFd; + pthread_mutex_unlock(&pNode->mutex); + int32_t retLen = taosWriteMsg(peerFd, pSyncHead, fwdLen); + pthread_mutex_lock(&pNode->mutex); if (retLen == fwdLen) { sTrace("%s, forward is sent, role:%s sstatus:%s hver:%" PRIu64 " contLen:%d", pPeer->id, syncRole[pPeer->role], syncStatus[pPeer->sstatus], pWalHead->version, pWalHead->len); diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 0f472cfbfc443e57e538068d28cb3c2c8d228dec..875bb6258125b88399558c75a6169dea67bfdde8 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -3,9 +3,10 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tsdb ${SRC}) -TARGET_LINK_LIBRARIES(tsdb tfs common tutil) +TARGET_LINK_LIBRARIES(tsdb tfs common tutil cJson) IF (TD_TSDB_PLUGINS) TARGET_LINK_LIBRARIES(tsdb tsdbPlugins) diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 0b7af561cda8d9c37201f99c7ab467b4e1598d37..9cdb8a83aa266d04d91e07d515f0acb56703f880 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -18,6 +18,14 @@ #define TSDB_MAX_TABLE_SCHEMAS 16 +#pragma pack (push,1) +typedef struct jsonMapValue{ + void* table; // STable * + int16_t colId; // the json col ID. +}JsonMapValue; + +#pragma pack (pop) + typedef struct STable { STableId tableId; ETableType type; @@ -28,6 +36,7 @@ typedef struct STable { STSchema* tagSchema; SKVRow tagVal; SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index + SHashObj* jsonKeyMap; // For json tag key {"key":[t1, t2, t3]} void* eventHandler; // TODO void* streamHandler; // TODO TSKEY lastKey; @@ -89,6 +98,8 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId); int tsdbUpdateLastColSchema(STable *pTable, STSchema *pNewSchema); STSchema* tsdbGetTableLatestSchema(STable *pTable); void tsdbFreeLastColumns(STable* pTable); +int tsdbCompareJsonMapValue(const void* a, const void* b); +void* tsdbGetJsonTagValue(STable* pTable, char* key, int32_t keyLen, int16_t* colId); static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) { if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) { diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index dfa4b74b7a5720398f9fc748078a0be6d870dda7..45872b1dcef5904e269b2fda6fb4aa5cb3b26a9b 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -512,6 +512,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } // Check keep +#if 0 // already checked and set in mnodeSetDefaultDbCfg if (pCfg->keep == -1) { pCfg->keep = TSDB_DEFAULT_KEEP; } else { @@ -532,7 +533,25 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { if (pCfg->keep2 == 0) { pCfg->keep2 = pCfg->keep; } +#endif + + int32_t keepMin = pCfg->keep1; + int32_t keepMid = pCfg->keep2; + int32_t keepMax = pCfg->keep; + if (keepMin > keepMid) { + SWAP(keepMin, keepMid, int32_t); + } + if (keepMin > keepMax) { + SWAP(keepMin, keepMax, int32_t); + } + if (keepMid > keepMax) { + SWAP(keepMid, keepMax, int32_t); + } + + pCfg->keep = keepMax; + pCfg->keep1 = keepMin; + pCfg->keep2 = keepMid; // update check if (pCfg->update < TD_ROW_DISCARD_UPDATE || pCfg->update > TD_ROW_PARTIAL_UPDATE) pCfg->update = TD_ROW_DISCARD_UPDATE; @@ -660,9 +679,9 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea int numColumns; int32_t blockIdx; SDataStatis* pBlockStatis = NULL; - SMemRow row = NULL; + // SMemRow row = NULL; // restore last column data with last schema - + int err = 0; numColumns = schemaNCols(pSchema); @@ -676,15 +695,15 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } } - row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); - if (row == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - err = -1; - goto out; - } + // row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); + // if (row == NULL) { + // terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + // err = -1; + // goto out; + // } - memRowSetType(row, SMEM_ROW_DATA); - tdInitDataRow(memRowDataBody(row), pSchema); + // memRowSetType(row, SMEM_ROW_DATA); + // tdInitDataRow(memRowDataBody(row), pSchema); // first load block index info if (tsdbLoadBlockInfo(pReadh, NULL, NULL) < 0) { @@ -743,10 +762,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; const void* pColData = tdGetColDataOfRow(pDataCol, rowId); - tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); - //SDataCol *pDataCol = readh.pDCols[0]->cols + j; - void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); - if (isNull(value, pCol->type)) { + // tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); + // SDataCol *pDataCol = readh.pDCols[0]->cols + j; + // void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + + // + // pCol->offset); + if (isNull(pColData, pCol->type)) { continue; } @@ -761,13 +782,14 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea pLastCol->pData = malloc(bytes); pLastCol->bytes = bytes; pLastCol->colId = pCol->colId; - memcpy(pLastCol->pData, value, bytes); + memcpy(pLastCol->pData, pColData, bytes); // save row ts(in column 0) pDataCol = pReadh->pDCols[0]->cols + 0; - pCol = schemaColAt(pSchema, 0); - tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); - pLastCol->ts = memRowKey(row); + // pCol = schemaColAt(pSchema, 0); + // tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); + // pLastCol->ts = memRowKey(row); + pLastCol->ts = tdGetKey(*(TKEY *)(tdGetColDataOfRow(pDataCol, rowId))); pTable->restoreColumnNum += 1; @@ -779,7 +801,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } out: - taosTZfree(row); + // taosTZfree(row); tfree(pBlockStatis); if (err == 0 && numColumns <= pTable->restoreColumnNum) { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 6bcffd85f411085f6fc973151fca5d97cf4bd87c..8958df3ced9a1577735660293eda0717b46db49a 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -564,7 +564,7 @@ static void tsdbFreeTableData(STableData *pTableData) { } } -static char *tsdbGetTsTupleKey(const void *data) { return memRowTuple((SMemRow)data); } +static char *tsdbGetTsTupleKey(const void *data) { return memRowKeys((SMemRow)data); } static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ASSERT(pMemTable->maxTables < maxTables); @@ -714,13 +714,17 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pPoints, SMemRow* pLastRow) { - + //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { (*pPoints)++; return NULL; } + tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), + "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), + memRowKey(row1)); + if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) { void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); if(pMem == NULL) return NULL; @@ -841,7 +845,6 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * int64_t dsize = SL_SIZE(pTableData->pData) - osize; (*pAffectedRows) += points; - if(lastRow != NULL) { TSKEY lastRowKey = memRowKey(lastRow); if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey; diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 1bb9733970cf6730366adda7f89ec5f09577df92..18225ab70cde20ab2d18c80b5327bd24be3827df 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -13,6 +13,8 @@ * along with this program. If not, see . */ #include "tsdbint.h" +#include "tcompare.h" +#include "tutil.h" #define TSDB_SUPER_TABLE_SL_LEVEL 5 #define DEFAULT_TAG_INDEX_COLUMN 0 @@ -118,11 +120,13 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { tsdbWLockRepoMeta(pRepo); if (newSuper) { if (tsdbAddTableToMeta(pRepo, super, true, false) < 0) { + super = NULL; tsdbUnlockRepoMeta(pRepo); goto _err; } } if (tsdbAddTableToMeta(pRepo, table, true, false) < 0) { + table = NULL; tsdbUnlockRepoMeta(pRepo); goto _err; } @@ -200,7 +204,7 @@ _err: return -1; } -void *tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes) { +void *tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type) { // TODO: this function should be changed also STSchema *pSchema = tsdbGetTableTagSchema((STable*) pTable); @@ -209,12 +213,13 @@ void *tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_ return NULL; // No matched tag volumn } - char *val = tdGetKVRowValOfCol(((STable*)pTable)->tagVal, colId); - assert(type == pCol->type); - - // if (val != NULL && IS_VAR_DATA_TYPE(type)) { - // assert(varDataLen(val) < pCol->bytes); - // } + char *val = NULL; + if (pCol->type == TSDB_DATA_TYPE_JSON){ + val = ((STable*)pTable)->tagVal; + }else{ + val = tdGetKVRowValOfCol(((STable*)pTable)->tagVal, colId); + assert(type == pCol->type); + } return val; } @@ -388,7 +393,8 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) { TSDB_WUNLOCK_TABLE(pTable->pSuper); } - bool isChangeIndexCol = (pMsg->colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0))); + bool isChangeIndexCol = (pMsg->colId == colColId(schemaColAt(pTable->pSuper->tagSchema, 0))) + || pMsg->type == TSDB_DATA_TYPE_JSON; // STColumn *pCol = bsearch(&(pMsg->colId), pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar); // ASSERT(pCol != NULL); @@ -397,7 +403,12 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) { tsdbRemoveTableFromIndex(pMeta, pTable); } TSDB_WLOCK_TABLE(pTable); - tdSetKVRowDataOfCol(&(pTable->tagVal), pMsg->colId, pMsg->type, POINTER_SHIFT(pMsg->data, pMsg->schemaLen)); + if (pMsg->type == TSDB_DATA_TYPE_JSON){ + kvRowFree(pTable->tagVal); + pTable->tagVal = tdKVRowDup(POINTER_SHIFT(pMsg->data, pMsg->schemaLen)); + }else{ + tdSetKVRowDataOfCol(&(pTable->tagVal), pMsg->colId, pMsg->type, POINTER_SHIFT(pMsg->data, pMsg->schemaLen)); + } TSDB_WUNLOCK_TABLE(pTable); if (isChangeIndexCol) { tsdbAddTableIntoIndex(pMeta, pTable, false); @@ -850,11 +861,22 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } pTable->tagVal = NULL; STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, - SL_ALLOW_DUP_KEY, getTagIndexKey); - if (pTable->pIndex == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + if(pCol->type == TSDB_DATA_TYPE_JSON){ + assert(pTable->tagSchema->numOfCols == 1); + pTable->jsonKeyMap = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + if (pTable->jsonKeyMap == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbFreeTable(pTable); + return NULL; + } + taosHashSetFreeFp(pTable->jsonKeyMap, taosArrayDestroyForHash); + }else{ + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + SL_ALLOW_DUP_KEY, getTagIndexKey); + if (pTable->pIndex == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } } } else { pTable->type = pCfg->type; @@ -924,6 +946,7 @@ static void tsdbFreeTable(STable *pTable) { kvRowFree(pTable->tagVal); tSkipListDestroy(pTable->pIndex); + taosHashCleanup(pTable->jsonKeyMap); taosTZfree(pTable->lastRow); tfree(pTable->sql); @@ -1048,16 +1071,92 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro tsdbUnRefTable(pTable); } +void* tsdbGetJsonTagValue(STable* pTable, char* key, int32_t keyLen, int16_t* retColId){ + assert(TABLE_TYPE(pTable) == TSDB_CHILD_TABLE); + STable* superTable= pTable->pSuper; + SArray** data = (SArray**)taosHashGet(superTable->jsonKeyMap, key, keyLen); + if(data == NULL) return NULL; + JsonMapValue jmvalue = {pTable, 0}; + JsonMapValue* p = taosArraySearch(*data, &jmvalue, tsdbCompareJsonMapValue, TD_EQ); + if (p == NULL) return NULL; + int16_t colId = p->colId + 1; + if(retColId) *retColId = p->colId; + return tdGetKVRowValOfCol(pTable->tagVal, colId); +} + +int tsdbCompareJsonMapValue(const void* a, const void* b) { + const JsonMapValue* x = (const JsonMapValue*)a; + const JsonMapValue* y = (const JsonMapValue*)b; + if (x->table > y->table) return 1; + if (x->table < y->table) return -1; + return 0; +} + static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper) { ASSERT(pTable->type == TSDB_CHILD_TABLE && pTable != NULL); STable *pSTable = tsdbGetTableByUid(pMeta, TABLE_SUID(pTable)); ASSERT(pSTable != NULL); pTable->pSuper = pSTable; + if (refSuper) T_REF_INC(pSTable); - tSkipListPut(pSTable->pIndex, (void *)pTable); + if(pSTable->tagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + ASSERT(pSTable->tagSchema->numOfCols == 1); + int16_t nCols = kvRowNCols(pTable->tagVal); + ASSERT(nCols%2 == 1); + // check first + for (int j = 0; j < nCols; ++j) { + if (j != 0 && j % 2 == 0) continue; // jump value + SColIdx *pColIdx = kvRowColIdxAt(pTable->tagVal, j); + void *val = (kvRowColVal(pTable->tagVal, pColIdx)); + if (j == 0) { // json value is the first + int8_t jsonPlaceHolder = *(int8_t *)val; + ASSERT(jsonPlaceHolder == TSDB_DATA_JSON_PLACEHOLDER); + continue; + } + if (j == 1) { + uint32_t jsonNULL = *(uint32_t *)(varDataVal(val)); + ASSERT(jsonNULL == TSDB_DATA_JSON_NULL); + } + + // then insert + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(varDataVal(val), varDataLen(val), keyMd5); + SArray *tablistNew = NULL; + SArray **tablist = (SArray **)taosHashGet(pSTable->jsonKeyMap, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + if (tablist == NULL) { + tablistNew = taosArrayInit(8, sizeof(JsonMapValue)); + if (tablistNew == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("out of memory when alloc json tag array"); + return -1; + } + if (taosHashPut(pSTable->jsonKeyMap, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN, &tablistNew, sizeof(void *)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("out of memory when put json tag array"); + return -1; + } + } else { + tablistNew = *tablist; + } + + JsonMapValue jmvalue = {pTable, pColIdx->colId}; + void* p = taosArraySearch(tablistNew, &jmvalue, tsdbCompareJsonMapValue, TD_EQ); + if (p == NULL) { + p = taosArraySearch(tablistNew, &jmvalue, tsdbCompareJsonMapValue, TD_GE); + if(p == NULL){ + taosArrayPush(tablistNew, &jmvalue); + }else{ + taosArrayInsert(tablistNew, TARRAY_ELEM_IDX(tablistNew, p), &jmvalue); + } + }else{ + tsdbError("insert dumplicate"); + } + } + }else{ + tSkipListPut(pSTable->pIndex, (void *)pTable); + } - if (refSuper) T_REF_INC(pSTable); return 0; } @@ -1067,22 +1166,58 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STable *pSTable = pTable->pSuper; ASSERT(pSTable != NULL); - char* key = getTagIndexKey(pTable); - SArray *res = tSkipListGet(pSTable->pIndex, key); + if(pSTable->tagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + ASSERT(pSTable->tagSchema->numOfCols == 1); + int16_t nCols = kvRowNCols(pTable->tagVal); + ASSERT(nCols%2 == 1); + for (int j = 0; j < nCols; ++j) { + if (j != 0 && j%2 == 0) continue; // jump value + SColIdx * pColIdx = kvRowColIdxAt(pTable->tagVal, j); + void* val = (kvRowColVal(pTable->tagVal, pColIdx)); + if (j == 0){ // json value is the first + int8_t jsonPlaceHolder = *(int8_t*)val; + ASSERT(jsonPlaceHolder == TSDB_DATA_JSON_PLACEHOLDER); + continue; + } + if (j == 1){ + uint32_t jsonNULL = *(uint32_t*)(varDataVal(val)); + ASSERT(jsonNULL == TSDB_DATA_JSON_NULL); + } + + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(varDataVal(val), varDataLen(val), keyMd5); + SArray** tablist = (SArray **)taosHashGet(pSTable->jsonKeyMap, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + if(tablist == NULL) { + tsdbError("json tag no key error,%d", j); + continue; + } - size_t size = taosArrayGetSize(res); - ASSERT(size > 0); + JsonMapValue jmvalue = {pTable, pColIdx->colId}; + void* p = taosArraySearch(*tablist, &jmvalue, tsdbCompareJsonMapValue, TD_EQ); + if (p == NULL) { + tsdbError("json tag no tableid error,%d", j); + continue; + } + taosArrayRemove(*tablist, TARRAY_ELEM_IDX(*tablist, p)); + } + }else { + char * key = getTagIndexKey(pTable); + SArray *res = tSkipListGet(pSTable->pIndex, key); - for (int32_t i = 0; i < size; ++i) { - SSkipListNode *pNode = taosArrayGetP(res, i); + size_t size = taosArrayGetSize(res); + ASSERT(size > 0); - // STableIndexElem* pElem = (STableIndexElem*) SL_GET_NODE_DATA(pNode); - if ((STable *)SL_GET_NODE_DATA(pNode) == pTable) { // this is the exact what we need - tSkipListRemoveNode(pSTable->pIndex, pNode); + for (int32_t i = 0; i < size; ++i) { + SSkipListNode *pNode = taosArrayGetP(res, i); + + // STableIndexElem* pElem = (STableIndexElem*) SL_GET_NODE_DATA(pNode); + if ((STable *)SL_GET_NODE_DATA(pNode) == pTable) { // this is the exact what we need + tSkipListRemoveNode(pSTable->pIndex, pNode); + } } - } - taosArrayDestroy(res); + taosArrayDestroy(res); + } return 0; } @@ -1320,12 +1455,22 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { buf = tdDecodeSchema(buf, &(pTable->tagSchema)); STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + if(pCol->type == TSDB_DATA_TYPE_JSON){ + assert(pTable->tagSchema->numOfCols == 1); + pTable->jsonKeyMap = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + if (pTable->jsonKeyMap == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbFreeTable(pTable); + return NULL; + } + }else{ + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); - if (pTable->pIndex == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbFreeTable(pTable); - return NULL; + if (pTable->pIndex == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbFreeTable(pTable); + return NULL; + } } } @@ -1341,13 +1486,29 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { return buf; } +static SArray* getJsonTagTableList(STable *pTable){ + uint32_t key = TSDB_DATA_JSON_NULL; + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(&key, INT_BYTES, keyMd5); + SArray** tablist = (SArray**)taosHashGet(pTable->jsonKeyMap, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + + return *tablist; +} + static int tsdbGetTableEncodeSize(int8_t act, STable *pTable) { int tlen = 0; if (act == TSDB_UPDATE_META) { tlen = sizeof(SListNode) + sizeof(SActObj) + sizeof(SActCont) + tsdbEncodeTable(NULL, pTable) + sizeof(TSCKSUM); } else { if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tlen = (int)((sizeof(SListNode) + sizeof(SActObj)) * (SL_SIZE(pTable->pIndex) + 1)); + size_t tableSize = 0; + if(pTable->tagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + SArray* tablist = getJsonTagTableList(pTable); + tableSize = taosArrayGetSize(tablist); + }else{ + tableSize = SL_SIZE(pTable->pIndex); + } + tlen = (int)((sizeof(SListNode) + sizeof(SActObj)) * (tableSize + 1)); } else { tlen = sizeof(SListNode) + sizeof(SActObj); } @@ -1387,19 +1548,28 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) { void *pBuf = buf; if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - SSkipListIterator *pIter = tSkipListCreateIter(pTable->pIndex); - if (pIter == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } + if(pTable->tagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + SArray* tablist = getJsonTagTableList(pTable); + for (int i = 0; i < taosArrayGetSize(tablist); ++i) { + JsonMapValue* p = taosArrayGet(tablist, i); + ASSERT(TABLE_TYPE((STable *)(p->table)) == TSDB_CHILD_TABLE); + pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, p->table); + } + }else { + SSkipListIterator *pIter = tSkipListCreateIter(pTable->pIndex); + if (pIter == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } - while (tSkipListIterNext(pIter)) { - STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); - ASSERT(TABLE_TYPE(tTable) == TSDB_CHILD_TABLE); - pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, tTable); - } + while (tSkipListIterNext(pIter)) { + STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); + ASSERT(TABLE_TYPE(tTable) == TSDB_CHILD_TABLE); + pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, tTable); + } - tSkipListDestroyIter(pIter); + tSkipListDestroyIter(pIter); + } } pBuf = tsdbInsertTableAct(pRepo, TSDB_DROP_META, pBuf, pTable); @@ -1410,25 +1580,27 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) { static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) { if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - SSkipListIterator *pIter = tSkipListCreateIter(pTable->pIndex); - if (pIter == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - tsdbWLockRepoMeta(pRepo); - - while (tSkipListIterNext(pIter)) { - STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); - tsdbRemoveTableFromMeta(pRepo, tTable, false, false); + if(pTable->tagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + SArray* tablist = getJsonTagTableList(pTable); + for (int i = 0; i < taosArrayGetSize(tablist); ++i) { + JsonMapValue* p = taosArrayGet(tablist, i); + tsdbRemoveTableFromMeta(pRepo, p->table, false, false); + } + }else{ + SSkipListIterator *pIter = tSkipListCreateIter(pTable->pIndex); + if (pIter == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + while (tSkipListIterNext(pIter)) { + STable *tTable = (STable *)SL_GET_NODE_DATA(tSkipListIterGet(pIter)); + tsdbRemoveTableFromMeta(pRepo, tTable, false, false); + } + tSkipListDestroyIter(pIter); } - tsdbRemoveTableFromMeta(pRepo, pTable, false, false); - tsdbUnlockRepoMeta(pRepo); - - tSkipListDestroyIter(pIter); - } else { if ((TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) && pTable->cqhandle) pRepo->appH.cqDropFunc(pTable->cqhandle); tsdbRemoveTableFromMeta(pRepo, pTable, true, true); @@ -1511,3 +1683,4 @@ static void tsdbFreeTableSchema(STable *pTable) { taosArrayDestroy(pTable->schema); } } + diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9b138bbd02b7a88b9bdaddcafbc2d2290d5c3d2e..ca38d10d59348f6f2b9b7d1150d7f21d7bc1a3de 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -26,6 +26,7 @@ #include "tsdbint.h" #include "texpr.h" #include "qFilter.h" +#include "cJSON.h" #define EXTRA_BYTES 2 #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) @@ -2673,17 +2674,31 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } static int32_t getAllTableList(STable* pSuperTable, SArray* list) { - SSkipListIterator* iter = tSkipListCreateIter(pSuperTable->pIndex); - while (tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); + STSchema* pTagSchema = tsdbGetTableTagSchema(pSuperTable); + if(pTagSchema && pTagSchema->numOfCols == 1 && pTagSchema->columns[0].type == TSDB_DATA_TYPE_JSON){ + uint32_t key = TSDB_DATA_JSON_NULL; + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(&key, INT_BYTES, keyMd5); + SArray** tablist = (SArray**)taosHashGet(pSuperTable->jsonKeyMap, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN); + + for (int i = 0; i < taosArrayGetSize(*tablist); ++i) { + JsonMapValue* p = taosArrayGet(*tablist, i); + STableKeyInfo info = {.pTable = p->table, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(list, &info); + } + }else{ + SSkipListIterator* iter = tSkipListCreateIter(pSuperTable->pIndex); + while (tSkipListIterNext(iter)) { + SSkipListNode* pNode = tSkipListIterGet(iter); - STable* pTable = (STable*) SL_GET_NODE_DATA((SSkipListNode*) pNode); + STable* pTable = (STable*) SL_GET_NODE_DATA((SSkipListNode*) pNode); - STableKeyInfo info = {.pTable = pTable, .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(list, &info); - } + STableKeyInfo info = {.pTable = pTable, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(list, &info); + } - tSkipListDestroyIter(iter); + tSkipListDestroyIter(iter); + } return TSDB_CODE_SUCCESS; } @@ -3543,8 +3558,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); bytes = pCol->bytes; type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + if (type == TSDB_DATA_TYPE_JSON){ + f1 = getJsonTagValueElment(pTable1, pColIndex->name, (int32_t)strlen(pColIndex->name), NULL, TSDB_MAX_JSON_TAGS_LEN); + f2 = getJsonTagValueElment(pTable2, pColIndex->name, (int32_t)strlen(pColIndex->name), NULL, TSDB_MAX_JSON_TAGS_LEN); + }else{ + f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); + f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + } } } @@ -3660,6 +3680,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { + SArray* res = NULL; if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); @@ -3681,7 +3702,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons } //NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); + res = taosArrayInit(8, sizeof(STableKeyInfo)); STSchema* pTagSchema = tsdbGetTableTagSchema(pTable); // no tags and tbname condition, all child tables of this stable are involved @@ -3719,20 +3740,28 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons // TODO: more error handling } END_TRY - void *filterInfo = NULL; - + void *filterInfo = calloc(1, sizeof(SFilterInfo)); + ((SFilterInfo*)filterInfo)->pTable = pTable; ret = filterInitFromTree(expr, &filterInfo, 0); + tExprTreeDestroy(expr, NULL); + if (ret != TSDB_CODE_SUCCESS) { terrno = ret; + tsdbUnlockRepoMeta(tsdb); + filterFreeInfo(filterInfo); goto _error; } - tsdbQueryTableList(pTable, res, filterInfo); + ret = tsdbQueryTableList(pTable, res, filterInfo); + if (ret != TSDB_CODE_SUCCESS) { + terrno = ret; + tsdbUnlockRepoMeta(tsdb); + filterFreeInfo(filterInfo); + goto _error; + } filterFreeInfo(filterInfo); - tExprTreeDestroy(expr, NULL); - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); @@ -3745,6 +3774,8 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons return ret; _error: + + taosArrayDestroy(res); return terrno; } @@ -4009,22 +4040,257 @@ static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray* tSkipListDestroyIter(iter); } +static FORCE_INLINE int32_t tsdbGetJsonTagDataFromId(void *param, int32_t id, char* name, void **data) { + JsonMapValue* jsonMapV = (JsonMapValue*)(param); + STable* pTable = (STable*)(jsonMapV->table); + + if (id == TSDB_TBNAME_COLUMN_INDEX) { + *data = TABLE_NAME(pTable); + } else { + void* jsonData = tsdbGetJsonTagValue(pTable, name, TSDB_MAX_JSON_KEY_MD5_LEN, NULL); + // jsonData == NULL for ? operation + // if(jsonData != NULL) jsonData += CHAR_BYTES; // jump type + *data = jsonData; + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t queryByJsonTag(STable* pTable, void* filterInfo, SArray* res){ + // get all table in fields, and dumplicate it + SArray* tabList = NULL; + bool needQueryAll = false; + SFilterInfo* info = (SFilterInfo*)filterInfo; + for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { + SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; + SSchema* sch = fi->desc; + if (sch->colId == TSDB_TBNAME_COLUMN_INDEX) { + tabList = taosArrayInit(32, sizeof(JsonMapValue)); + getAllTableList(pTable, tabList); // query all table + needQueryAll = true; + break; + } + } + for (uint16_t i = 0; i < info->unitNum; ++i) { // is null operation need query all table + SFilterUnit* unit = &info->units[i]; + if (unit->compare.optr == TSDB_RELATION_ISNULL) { + tabList = taosArrayInit(32, sizeof(JsonMapValue)); + getAllTableList(pTable, tabList); // query all table + needQueryAll = true; + break; + } + } + + for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { + if (needQueryAll) break; // query all table + SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; + SSchema* sch = fi->desc; + char* key = sch->name; + + SArray** data = (SArray**)taosHashGet(pTable->jsonKeyMap, key, TSDB_MAX_JSON_KEY_MD5_LEN); + if(data == NULL) continue; + if(tabList == NULL) { + tabList = taosArrayDup(*data); + }else{ + for(int j = 0; j < taosArrayGetSize(*data); j++){ + void* element = taosArrayGet(*data, j); + void* p = taosArraySearch(tabList, element, tsdbCompareJsonMapValue, TD_EQ); + if (p == NULL) { + p = taosArraySearch(tabList, element, tsdbCompareJsonMapValue, TD_GE); + if(p == NULL){ + taosArrayPush(tabList, element); + }else{ + taosArrayInsert(tabList, TARRAY_ELEM_IDX(tabList, p), element); + } + } + } + } + } + if(tabList == NULL){ + tsdbError("json key not exist, no candidate table"); + return TSDB_CODE_SUCCESS; + } + size_t size = taosArrayGetSize(tabList); + int8_t *addToResult = NULL; + for(int i = 0; i < size; i++){ + JsonMapValue* data = taosArrayGet(tabList, i); + filterSetJsonColFieldData(filterInfo, data, tsdbGetJsonTagDataFromId); + bool all = filterExecute(filterInfo, 1, &addToResult, NULL, 0); + + if (all || (addToResult && *addToResult)) { + STableKeyInfo kInfo = {.pTable = (void*)(data->table), .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(res, &kInfo); + } + } + tfree(addToResult); + taosArrayDestroy(tabList); + return TSDB_CODE_SUCCESS; +} static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo) { STSchema* pTSSchema = pTable->tagSchema; - bool indexQuery = false; - SSkipList *pSkipList = pTable->pIndex; - filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery); + if(pTSSchema->columns->type == TSDB_DATA_TYPE_JSON){ + return queryByJsonTag(pTable, filterInfo, pRes); + }else{ + bool indexQuery = false; + SSkipList *pSkipList = pTable->pIndex; - if (indexQuery) { - queryIndexedColumn(pSkipList, filterInfo, pRes); - } else { - queryIndexlessColumn(pSkipList, filterInfo, pRes); + filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery); + + if (indexQuery) { + queryIndexedColumn(pSkipList, filterInfo, pRes); + } else { + queryIndexlessColumn(pSkipList, filterInfo, pRes); + } } return TSDB_CODE_SUCCESS; } +void* getJsonTagValueElment(void* data, char* key, int32_t keyLen, char* dst, int16_t bytes){ + char keyMd5[TSDB_MAX_JSON_KEY_MD5_LEN] = {0}; + jsonKeyMd5(key, keyLen, keyMd5); + + void* result = tsdbGetJsonTagValue(data, keyMd5, TSDB_MAX_JSON_KEY_MD5_LEN, NULL); + if (result == NULL){ // json key no result + if(!dst) return NULL; + *dst = TSDB_DATA_TYPE_JSON; + setNull(dst + CHAR_BYTES, TSDB_DATA_TYPE_JSON, 0); + return dst; + } + + char* realData = POINTER_SHIFT(result, CHAR_BYTES); + if(*(char*)result == TSDB_DATA_TYPE_NCHAR || *(char*)result == TSDB_DATA_TYPE_BINARY) { + assert(varDataTLen(realData) < bytes); + if(!dst) return result; + memcpy(dst, result, CHAR_BYTES + varDataTLen(realData)); + return dst; + }else if (*(char*)result == TSDB_DATA_TYPE_DOUBLE || *(char*)result == TSDB_DATA_TYPE_BIGINT) { + if(!dst) return result; + memcpy(dst, result, CHAR_BYTES + LONG_BYTES); + return dst; + }else if (*(char*)result == TSDB_DATA_TYPE_BOOL) { + if(!dst) return result; + memcpy(dst, result, CHAR_BYTES + CHAR_BYTES); + return dst; + }else { + assert(0); + } + return result; +} + +void getJsonTagValueAll(void* data, void* dst, int16_t bytes) { + char* json = parseTagDatatoJson(data); + char* tagData = POINTER_SHIFT(dst, CHAR_BYTES); + *(char*)dst = TSDB_DATA_TYPE_JSON; + if(json == NULL){ + setNull(tagData, TSDB_DATA_TYPE_JSON, 0); + return; + } + + int32_t length = 0; + if(!taosMbsToUcs4(json, strlen(json), varDataVal(tagData), bytes - VARSTR_HEADER_SIZE - CHAR_BYTES, &length)){ + tsdbError("getJsonTagValueAll mbstoucs4 error! length:%d", length); + } + varDataSetLen(tagData, length); + assert(varDataTLen(tagData) <= bytes); + tfree(json); +} + +char* parseTagDatatoJson(void *p){ + char* string = NULL; + cJSON *json = cJSON_CreateObject(); + if (json == NULL) + { + goto end; + } + + int16_t nCols = kvRowNCols(p); + ASSERT(nCols%2 == 1); + char tagJsonKey[TSDB_MAX_JSON_KEY_LEN + 1] = {0}; + for (int j = 0; j < nCols; ++j) { + SColIdx * pColIdx = kvRowColIdxAt(p, j); + void* val = (kvRowColVal(p, pColIdx)); + if (j == 0){ + int8_t jsonPlaceHolder = *(int8_t*)val; + ASSERT(jsonPlaceHolder == TSDB_DATA_JSON_PLACEHOLDER); + continue; + } + if(j == 1){ + uint32_t jsonNULL = *(uint32_t*)(varDataVal(val)); + ASSERT(jsonNULL == TSDB_DATA_JSON_NULL); + continue; + } + if (j == 2){ + if(*(uint32_t*)(varDataVal(val + CHAR_BYTES)) == TSDB_DATA_JSON_NULL) goto end; + continue; + } + if (j%2 == 1) { // json key encode by binary + ASSERT(varDataLen(val) <= TSDB_MAX_JSON_KEY_LEN); + memset(tagJsonKey, 0, sizeof(tagJsonKey)); + memcpy(tagJsonKey, varDataVal(val), varDataLen(val)); + }else{ // json value + char tagJsonValue[TSDB_MAX_JSON_TAGS_LEN] = {0}; + char* realData = POINTER_SHIFT(val, CHAR_BYTES); + char type = *(char*)val; + if(type == TSDB_DATA_TYPE_BINARY) { + assert(*(uint32_t*)varDataVal(realData) == TSDB_DATA_JSON_null); // json null value + assert(varDataLen(realData) == INT_BYTES); + cJSON* value = cJSON_CreateNull(); + if (value == NULL) + { + goto end; + } + cJSON_AddItemToObject(json, tagJsonKey, value); + }else if(type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); + if (length < 0) { + tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, + (char*)val); + goto end; + } + cJSON* value = cJSON_CreateString(tagJsonValue); + + if (value == NULL) + { + goto end; + } + cJSON_AddItemToObject(json, tagJsonKey, value); + }else if(type == TSDB_DATA_TYPE_DOUBLE){ + double jsonVd = *(double*)(realData); + cJSON* value = cJSON_CreateNumber(jsonVd); + if (value == NULL) + { + goto end; + } + cJSON_AddItemToObject(json, tagJsonKey, value); + }else if(type == TSDB_DATA_TYPE_BIGINT){ + int64_t jsonVd = *(int64_t*)(realData); + cJSON* value = cJSON_CreateNumber((double)jsonVd); + if (value == NULL) + { + goto end; + } + cJSON_AddItemToObject(json, tagJsonKey, value); + }else if (type == TSDB_DATA_TYPE_BOOL) { + char jsonVd = *(char*)(realData); + cJSON* value = cJSON_CreateBool(jsonVd); + if (value == NULL) + { + goto end; + } + cJSON_AddItemToObject(json, tagJsonKey, value); + } + else{ + tsdbError("unsupportted json value"); + } + } + } + string = cJSON_PrintUnformatted(json); +end: + cJSON_Delete(json); + return string; +} diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index 2a4cbec4dd1fa5d227e181b07ce103c14120a12b..d41c579a58dd149172ee42c94e97b72ab5687548 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -88,6 +88,8 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp */ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp); +void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp); + /** * return the size of hash table * @param pHashObj diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 2da74eac820e28206cb3e2b7cb6f2c4fb9f481b8..5587b283a47af6b3ee8b38f1f69e95f4d6f7623b 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -178,6 +178,12 @@ void taosArrayClear(SArray* pArray); */ void* taosArrayDestroy(SArray* pArray); +/** + * destroy array list for hash + * @param pArray + */ +void taosArrayDestroyForHash(void* para); + /** * * @param pArray diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 1125516d34c65da1b5d0c47dadd126aa0b1959fa..be62ce0a659d0e07d904cac4f994b4639cd18917 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -88,6 +88,9 @@ int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight); int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight); int32_t compareFindItemInSet(const void *pLeft, const void* pRight); int32_t compareWStrPatternComp(const void* pLeft, const void* pRight); +int32_t compareStrContainJson(const void* pLeft, const void* pRight); +int32_t compareJsonVal(const void* pLeft, const void* pRight); +int32_t jsonCompareUnit(const char* f1, const char* f2, bool* canReturn); #ifdef __cplusplus } diff --git a/src/util/inc/tutil.h b/src/util/inc/tutil.h index 4443716bca3ea280f50eb0402034dc60ee8b5dc8..dd943e8cc45837c814680c9e63b720ddc0c80010 100644 --- a/src/util/inc/tutil.h +++ b/src/util/inc/tutil.h @@ -47,6 +47,8 @@ int taosCheckVersion(char *input_client_version, char *input_server_version, in char * taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); +void jsonKeyMd5(void *pMsg, int msgLen, void *pKey); +bool isValidateTag(char *input); static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, size_t inLen, char *target) { MD5_CTX context; diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 00de532a95363dc22104b4cc75256ccde0c96c2a..20aa146893298a5b9c3f5a85498494ed29afff66 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -203,7 +203,13 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp) { if (pHashObj != NULL && fp != NULL) { pHashObj->equalFp = fp; } -} +} + +void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp) { + if (pHashObj != NULL && fp != NULL) { + pHashObj->freeFp = fp; + } +} int32_t taosHashGetSize(const SHashObj *pHashObj) { if (!pHashObj) { @@ -611,7 +617,7 @@ void taosHashCleanup(SHashObj *pHashObj) { taosArrayDestroy(pHashObj->pMemBlock); memset(pHashObj, 0, sizeof(SHashObj)); - free(pHashObj); + tfree(pHashObj); } // for profile only diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 007ce0682974d06bf506a82d8bbbc809092eb9e4..812cf65a80e928acf918da7c2b3ab82fd55682d6 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -281,13 +281,26 @@ void taosArrayClear(SArray* pArray) { void* taosArrayDestroy(SArray* pArray) { if (pArray) { - free(pArray->pData); - free(pArray); + tfree(pArray->pData); + tfree(pArray); } return NULL; } +void taosArrayDestroyForHash(void* para) { + SArray** ppArray = (SArray**)para; + if(ppArray == NULL) return; + + SArray* pArray = *ppArray; + if (pArray) { + tfree(pArray->pData); + tfree(pArray); + } + + return; +} + void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { if (pArray == NULL) { return; diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 23bb73ff860a2b0c4bd5a81005089910faa7792a..1cfc0c3873a438699c342a7dd4a4b1a8efd32878 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -18,11 +18,12 @@ #define _DEFAULT_SOURCE #include "tcompare.h" -#include "tulog.h" +#include "tvariant.h" #include "hash.h" -#include "regex.h" #include "os.h" +#include "regex.h" #include "ttype.h" +#include "tulog.h" int32_t setCompareBytes1(const void *pLeft, const void *pRight) { return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; @@ -218,6 +219,36 @@ int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) { return compareLenPrefixedWStr(pRight, pLeft); } +int32_t compareJsonVal(const void *pLeft, const void *pRight) { + const tVariant* right = pRight; + if(right->nType != *(char*)pLeft && !(IS_NUMERIC_TYPE(right->nType) && IS_NUMERIC_TYPE(*(char*)pLeft))) + return TSDB_DATA_JSON_CAN_NOT_COMPARE; + + uint8_t type = *(char*)pLeft; + char* realData = POINTER_SHIFT(pLeft, CHAR_BYTES); + if(type == TSDB_DATA_TYPE_BOOL) { + DEFAULT_COMP(GET_INT8_VAL(realData), right->i64); + }else if(type == TSDB_DATA_TYPE_BIGINT){ + DEFAULT_COMP(GET_INT64_VAL(realData), (right->nType == TSDB_DATA_TYPE_BIGINT) ? right->i64 : right->dKey); + }else if(type == TSDB_DATA_TYPE_DOUBLE){ + DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(realData), (right->nType == TSDB_DATA_TYPE_DOUBLE) ? right->dKey : right->i64); + }else if(type == TSDB_DATA_TYPE_NCHAR){ + if (varDataLen(realData) != right->nLen) { + return varDataLen(realData) > right->nLen ? 1 : -1; + } + int32_t ret = memcmp(varDataVal(realData), right->pz, right->nLen); + if (ret == 0) { + return ret; + } + return (ret < 0) ? -1 : 1; + }else if(type == TSDB_DATA_TYPE_BINARY) { //json null + return 0; + }else{ + assert(0); + } + return 0; +} + /* * Compare two strings * TSDB_MATCH: Match @@ -405,6 +436,12 @@ int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { return result; } +int32_t compareStrContainJson(const void* pLeft, const void* pRight) { + if(pLeft) return 0; + return 1; +} + + int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; @@ -487,7 +524,7 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { break; } - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR:{ if (optr == TSDB_RELATION_MATCH) { comparFn = compareStrRegexCompMatch; } else if (optr == TSDB_RELATION_NMATCH) { @@ -501,6 +538,20 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } break; } + case TSDB_DATA_TYPE_JSON:{ + if (optr == TSDB_RELATION_MATCH) { + comparFn = compareStrRegexCompMatch; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = compareStrRegexCompNMatch; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + comparFn = compareWStrPatternComp; + } else if (optr == TSDB_RELATION_CONTAINS) { + comparFn = compareStrContainJson; + } else { + comparFn = compareJsonVal; + } + break; + } case TSDB_DATA_TYPE_UTINYINT: comparFn = compareUint8Val; break; case TSDB_DATA_TYPE_USMALLINT: comparFn = compareUint16Val;break; @@ -565,7 +616,48 @@ __compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) { return comparFn; } +int32_t jsonCompareUnit(const char* f1, const char* f2, bool* canReturn){ + *canReturn = true; + bool f1IsNull = (*f1 == TSDB_DATA_TYPE_JSON && isNull(f1 + CHAR_BYTES, TSDB_DATA_TYPE_JSON)); + bool f2IsNull = (*f2 == TSDB_DATA_TYPE_JSON && isNull(f2 + CHAR_BYTES, TSDB_DATA_TYPE_JSON)); + if(f1IsNull && f2IsNull){ + return 0; + }else if(f1IsNull && !f2IsNull){ + return -1; + }else if(!f1IsNull && f2IsNull){ + return 1; + }else{ + bool f1IsJsonNull = (*f1 == TSDB_DATA_TYPE_BINARY && *(uint32_t*)varDataVal(f1 + CHAR_BYTES) == TSDB_DATA_JSON_null); + bool f2IsJsonNull = (*f2 == TSDB_DATA_TYPE_BINARY && *(uint32_t*)varDataVal(f2 + CHAR_BYTES) == TSDB_DATA_JSON_null); + if(f1IsJsonNull && f2IsJsonNull){ + return 0; + }else if(f1IsJsonNull && !f2IsJsonNull){ + return -1; + }else if(!f1IsJsonNull && f2IsJsonNull) { + return 1; + } + if(*f1 != *f2 && !(IS_NUMERIC_TYPE(*f1) && IS_NUMERIC_TYPE(*f2))) { + return *f1 > *f2 ? 1 : -1; + } + if(*f1 == TSDB_DATA_TYPE_BIGINT && *f2 == TSDB_DATA_TYPE_DOUBLE){ + DEFAULT_COMP(GET_INT64_VAL(f1 + CHAR_BYTES), GET_DOUBLE_VAL(f2 + CHAR_BYTES)); + }else if(*f1 == TSDB_DATA_TYPE_DOUBLE && *f2 == TSDB_DATA_TYPE_BIGINT){ + DEFAULT_COMP(GET_DOUBLE_VAL(f1 + CHAR_BYTES), GET_INT64_VAL(f2 + CHAR_BYTES)); + } + *canReturn = false; + return 0; // meaningless + } +} + int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { + if (type == TSDB_DATA_TYPE_JSON){ + bool canReturn = true; + int32_t result = jsonCompareUnit(f1, f2, &canReturn); + if(canReturn) return result; + type = *f1; + f1 += CHAR_BYTES; + f2 += CHAR_BYTES; + } switch (type) { case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2)); case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); @@ -578,7 +670,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2)); case TSDB_DATA_TYPE_UINT: DEFAULT_COMP(GET_UINT32_VAL(f1), GET_UINT32_VAL(f2)); case TSDB_DATA_TYPE_UBIGINT: DEFAULT_COMP(GET_UINT64_VAL(f1), GET_UINT64_VAL(f2)); - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR:{ tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 69b0d8d7bb9ad5ab37321a5460c3f083e3a71dba..2a49862bac14633f77db921b7d2e17b160019425 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -334,8 +334,9 @@ bool taosReadConfigOption(const char *option, char *value, char *value2, char *v if (taosReadDirectoryConfig(cfg, value)) { taosReadDataDirCfg(value, value2, value3); ret = true; + } else { + ret = false; } - ret = false; break; default: uError("config option:%s, input value:%s, can't be recognized", option, value); @@ -382,6 +383,12 @@ void taosReadGlobalLogCfg() { #elif (_TD_PRO_ == true) printf("configDir:%s not there, use default value: /etc/ProDB", configDir); strcpy(configDir, "/etc/ProDB"); + #elif (_TD_KH_ == true) + printf("configDir:%s not there, use default value: /etc/kinghistorian", configDir); + strcpy(configDir, "/etc/kinghistorian"); + #elif (_TD_JH_ == true) + printf("configDir:%s not there, use default value: /etc/jh_taos", configDir); + strcpy(configDir, "/etc/jh_taos"); #else printf("configDir:%s not there, use default value: /etc/taos", configDir); strcpy(configDir, "/etc/taos"); diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 379b7530fa5a898938b9bf0b552e09ab4fbc70b8..3853d2e9c7491db68abf4ca9f7d42edd62da5729 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -16,7 +16,7 @@ #include "os.h" #define TAOS_ERROR_C - + typedef struct { int32_t val; const char* str; @@ -122,6 +122,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_CONFIG, "Invalid JSON configur TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") @@ -232,6 +233,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message lengt TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories") TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_OPEN_FAILED, "Vnode open failed") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress") @@ -280,6 +282,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_JSON_TAG_KEY, "TSDB no tag json key") // query TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") @@ -298,7 +301,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, "File inconsistance in TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_TIME_CONDITION, "One valid time range condition expected") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_SYS_ERROR, "System error") - // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_DNODE_LIMITED, "DNode creation limited by licence") diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index a2eea5aa7d99a43f2cf7f0552e843ce9a52034c0..232d10a7d07594c9c62cd13767c320da27af2a73 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -87,6 +87,10 @@ char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; #elif (_TD_PRO_ == true) char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB"; +#elif (_TD_KH_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/kinghistorian"; +#elif (_TD_JH_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/jh_taos"; #else char tsLogDir[PATH_MAX] = "/var/log/taos"; #endif diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 2a147ee4f17a38e9a00a6110fcc6f2d21fb7b131..8dc2d4c993e001c95f63d72c60db8b8fe3ac3df8 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -568,6 +568,12 @@ static void taosNetCheckSpeed(char *host, int32_t port, int32_t pkgLen, int32_t compressTmp = tsCompressMsgSize; int32_t maxUdpSize = tsRpcMaxUdpSize; int32_t forceTcp = tsRpcForceTcp; + + //Precheck for FQDN lgenth + if (strlen(host) >= TSDB_FQDN_LEN) { + uError("FQDN length is too long"); + return; + } if (0 == strcmp("tcp", pkgType)){ tsRpcForceTcp = 1; diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 5eee3adbc3e83e3d26a2bd93e0b564d21cfc6668..5876d82bea9f0373b5086b2ce285f7ad86002536 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -44,6 +44,7 @@ static SKeyword keywordTable[] = { {"TIMESTAMP", TK_TIMESTAMP}, {"BINARY", TK_BINARY}, {"NCHAR", TK_NCHAR}, + {"JSON", TK_JSON}, {"OR", TK_OR}, {"AND", TK_AND}, {"NOT", TK_NOT}, @@ -72,7 +73,6 @@ static SKeyword keywordTable[] = { {"STAR", TK_STAR}, {"SLASH", TK_SLASH}, {"REM ", TK_REM}, - {"CONCAT", TK_CONCAT}, {"UMINUS", TK_UMINUS}, {"UPLUS", TK_UPLUS}, {"BITNOT", TK_BITNOT}, @@ -230,7 +230,8 @@ static SKeyword keywordTable[] = { {"OUTPUTTYPE", TK_OUTPUTTYPE}, {"AGGREGATE", TK_AGGREGATE}, {"BUFSIZE", TK_BUFSIZE}, - {"RANGE", TK_RANGE} + {"RANGE", TK_RANGE}, + {"CONTAINS", TK_CONTAINS} }; static const char isIdChar[] = { @@ -312,6 +313,10 @@ uint32_t tGetToken(char* z, uint32_t* tokenId) { *tokenId = TK_COMMENT; return i; } + if (z[1] == '>') { + *tokenId = TK_ARROW; + return 2; + } *tokenId = TK_MINUS; return 1; } @@ -394,9 +399,6 @@ uint32_t tGetToken(char* z, uint32_t* tokenId) { if (z[1] != '|') { *tokenId = TK_BITOR; return 1; - } else { - *tokenId = TK_CONCAT; - return 2; } } case ',': { diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index c8172fc0aff010332de7d13071a28303f37cf7f5..c15197b7537601c0f0ca72420a6711547d1ed0ed 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -21,6 +21,11 @@ #include "tulog.h" #include "taoserror.h" +bool isInteger(double x){ + int truncated = (int)x; + return (x == truncated); +} + int32_t strdequote(char *z) { if (z == NULL) { return 0; @@ -105,7 +110,7 @@ size_t strtrim(char *z) { int32_t j = 0; int32_t delta = 0; - while (z[j] == ' ') { + while (isspace(z[j])) { ++j; } @@ -118,9 +123,9 @@ size_t strtrim(char *z) { int32_t stop = 0; while (z[j] != 0) { - if (z[j] == ' ' && stop == 0) { + if (isspace(z[j]) && stop == 0) { stop = j; - } else if (z[j] != ' ' && stop != 0) { + } else if (!isspace(z[j]) && stop != 0) { stop = 0; } @@ -509,6 +514,24 @@ char *taosIpStr(uint32_t ipInt) { return ipStr; } +void jsonKeyMd5(void *pMsg, int msgLen, void *pKey) { + MD5_CTX context; + + MD5Init(&context); + MD5Update(&context, (uint8_t *)pMsg, msgLen); + MD5Final(&context); + + memcpy(pKey, context.digest, sizeof(context.digest)); +} + +bool isValidateTag(char *input) { + if (!input) return false; + for (size_t i = 0; i < strlen(input); ++i) { + if (isprint(input[i]) == 0) return false; + } + return true; +} + FORCE_INLINE float taos_align_get_float(const char* pBuf) { #if __STDC_VERSION__ >= 201112L static_assert(sizeof(float) == sizeof(uint32_t), "sizeof(float) must equal to sizeof(uint32_t)"); diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index 6845d091b5f300bac0656078492467dc3db67830..247a7f6d7d8af1b1397037bb76e905772898ed47 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -11,7 +11,7 @@ 4. pip install ../src/connector/python ; pip3 install ../src/connector/python -5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py) +5. pip install numpy; pip3 install numpy fabric2 psutil pandas(numpy is required only if you need to run querySort.py) > Note: Both Python2 and Python3 are currently supported by the Python test > framework. Since Python2 is no longer officially supported by Python Software diff --git a/tests/develop-test/0-management/0-database/.gitkeep b/tests/develop-test/0-management/0-database/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/0-management/1-stable/.gitkeep b/tests/develop-test/0-management/1-stable/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/0-management/2-table/.gitkeep b/tests/develop-test/0-management/2-table/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/0-management/3-tag/.gitkeep b/tests/develop-test/0-management/3-tag/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/0-management/3-tag/json_tag.py b/tests/develop-test/0-management/3-tag/json_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..dc7affe870f4462526aa1095311278d280abe77f --- /dev/null +++ b/tests/develop-test/0-management/3-tag/json_tag.py @@ -0,0 +1,521 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, db_test.stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import json + + +class TDTestCase: + def caseDescription(self): + ''' + Json tag test case, include create table with json tag, + select json tag and query with json tag in where condition, + besides, include json tag in group by/order by/join/subquery. + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("============== STEP 1 ===== prepare data & validate json string") + tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)") + tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)") + tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute("insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')") + tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')") + tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')") + tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')") + tdSql.execute("insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')") + tdSql.execute("insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')") + tdSql.execute("insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") + + # test duplicate key using the first one. elimate empty key + tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") + tdSql.query("select jtag from jsons1_8") + tdSql.checkData(0, 0, '{"tag1":null,"1tag$":2," ":90}') + + # test empty json string, save as jtag is NULL + tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')") + tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')") + tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')") + tdSql.execute("CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')") + tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')") + + # test invalidate json + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')") + + # test invalidate json key, key must can be printed assic char + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')") + + #test length limit + char1= ''.join(['abcd']*64) + char3= ''.join(['abcd']*1022) + print(len(char3)) # 4088 + tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257 + tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256 + tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TS\":\"%s\"}')" % char3) # len(object)=4097 + tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"T\":\"%s\"}')" % char3) # len(object)=4096 + tdSql.execute("drop table if exists jsons1_15") + tdSql.execute("drop table if exists jsons1_16") + + print("============== STEP 2 ===== alter table json tag") + tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)") + tdSql.error("ALTER STABLE jsons1 drop tag jtag") + tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)") + + tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'") + tdSql.query("select jtag from jsons1_1") + tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}') + tdSql.execute("ALTER TABLE jsons1 CHANGE TAG jtag jtag_new") + tdSql.execute("ALTER TABLE jsons1 CHANGE TAG jtag_new jtag") + + print("============== STEP 3 ===== query table") + # test error syntax + tdSql.error("select * from jsons1 where jtag->tag1='beijing'") + tdSql.error("select * from jsons1 where jtag->'location'") + tdSql.error("select * from jsons1 where jtag->''") + tdSql.error("select * from jsons1 where jtag->''=9") + tdSql.error("select -> from jsons1") + tdSql.error("select * from jsons1 where contains") + tdSql.error("select * from jsons1 where jtag->") + tdSql.error("select jtag->location from jsons1") + tdSql.error("select jtag contains location from jsons1") + tdSql.error("select * from jsons1 where jtag contains location") + tdSql.error("select * from jsons1 where jtag contains''") + tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'") + + # test function error + tdSql.error("select avg(jtag->'tag1') from jsons1") + tdSql.error("select avg(jtag) from jsons1") + tdSql.error("select min(jtag->'tag1') from jsons1") + tdSql.error("select min(jtag) from jsons1") + tdSql.error("select ceil(jtag->'tag1') from jsons1") + tdSql.error("select ceil(jtag) from jsons1") + + # test select normal column + tdSql.query("select dataint from jsons1") + tdSql.checkRows(9) + tdSql.checkData(1, 0, 1) + + # test select json tag + tdSql.query("select * from jsons1") + tdSql.checkRows(9) + tdSql.query("select jtag from jsons1") + tdSql.checkRows(13) + tdSql.query("select jtag from jsons1 where jtag is null") + tdSql.checkRows(5) + tdSql.query("select jtag from jsons1 where jtag is not null") + tdSql.checkRows(8) + + # test jtag is NULL + tdSql.query("select jtag from jsons1_9") + tdSql.checkData(0, 0, None) + + # test select json tag->'key', value is string + tdSql.query("select jtag->'tag1' from jsons1_1") + tdSql.checkData(0, 0, '"femail"') + tdSql.query("select jtag->'tag2' from jsons1_6") + tdSql.checkData(0, 0, '""') + # test select json tag->'key', value is int + tdSql.query("select jtag->'tag2' from jsons1_1") + tdSql.checkData(0, 0, 35) + # test select json tag->'key', value is bool + tdSql.query("select jtag->'tag3' from jsons1_1") + tdSql.checkData(0, 0, "true") + # test select json tag->'key', value is null + tdSql.query("select jtag->'tag1' from jsons1_4") + tdSql.checkData(0, 0, "null") + # test select json tag->'key', value is double + tdSql.query("select jtag->'tag1' from jsons1_5") + tdSql.checkData(0, 0, "1.232000000") + # test select json tag->'key', key is not exist + tdSql.query("select jtag->'tag10' from jsons1_4") + tdSql.checkData(0, 0, None) + + tdSql.query("select jtag->'tag1' from jsons1") + tdSql.checkRows(13) + # test header name + res = tdSql.getColNameList("select jtag->'tag1' from jsons1") + cname_list = [] + cname_list.append("jtag->'tag1'") + tdSql.checkColNameList(res, cname_list) + + + + # test where with json tag + tdSql.error("select * from jsons1_1 where jtag is not null") + tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'") + tdSql.error("select * from jsons1 where jtag->'tag1'={}") + + # where json value is string + tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'") + tdSql.checkRows(2) + tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'") + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, 'jsons1_2') + tdSql.checkData(0, 2, 5) + tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}') + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'jsons1_3') + tdSql.checkData(1, 2, 'false') + tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag2'>'beijing'") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'") + tdSql.checkRows(4) + tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag2'=''") + tdSql.checkRows(2) + + # where json value is int + tdSql.query("select * from jsons1 where jtag->'tag1'=5") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 2) + tdSql.query("select * from jsons1 where jtag->'tag1'=10") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'<54") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1'<=11") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1'>4") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1'>=5") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1'!=5") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1'!=55") + tdSql.checkRows(3) + + # where json value is double + tdSql.query("select * from jsons1 where jtag->'tag1'=1.232") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1'<1.232") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1'>1.23") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232") + tdSql.checkRows(3) + tdSql.error("select * from jsons1 where jtag->'tag1'/0=3") + tdSql.error("select * from jsons1 where jtag->'tag1'/5=1") + + # where json value is bool + tdSql.query("select * from jsons1 where jtag->'tag1'=true") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'=false") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1'!=false") + tdSql.checkRows(0) + tdSql.error("select * from jsons1 where jtag->'tag1'>false") + + # where json value is null + tdSql.query("select * from jsons1 where jtag->'tag1'=null") # only json suport =null. This synatx will change later. + tdSql.checkRows(1) + + # where json is null + tdSql.query("select * from jsons1 where jtag is null") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag is not null") + tdSql.checkRows(8) + + # where json key is null + tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3") + tdSql.checkRows(0) + + # where json value is not exist + tdSql.query("select * from jsons1 where jtag->'tag1' is null") + tdSql.checkData(0, 0, 'jsons1_9') + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag4' is null") + tdSql.checkRows(9) + tdSql.query("select * from jsons1 where jtag->'tag3' is not null") + tdSql.checkRows(4) + + # test contains + tdSql.query("select * from jsons1 where jtag contains 'tag1'") + tdSql.checkRows(8) + tdSql.query("select * from jsons1 where jtag contains 'tag3'") + tdSql.checkRows(4) + tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'") + tdSql.checkRows(0) + + # test json tag in where condition with and/or + tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'") + tdSql.checkRows(4) + tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'") + tdSql.checkRows(2) + + + # test with between and + tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30") + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'") + tdSql.checkRows(2) + + # test with tbname/normal column + tdSql.query("select * from jsons1 where tbname = 'jsons1_1'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23") + tdSql.checkRows(1) + + + # test where condition like + tdSql.query("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'") + tdSql.checkRows(2) + tdSql.query("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") + tdSql.checkRows(2) + + # test where condition in no support in + tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')") + + # test where condition match/nmath + tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag2' match 'jing$'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1' match '收到'") + tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'") + tdSql.checkRows(1) + + # test distinct + tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") + tdSql.query("select distinct jtag->'tag1' from jsons1") + tdSql.checkRows(8) + tdSql.query("select distinct jtag from jsons1") + tdSql.checkRows(9) + + #test dumplicate key with normal colomn + tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")") + tdSql.query("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'") + tdSql.checkRows(1) + tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'") + tdSql.checkRows(0) + + # test join + tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')") + tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')") + + tdSql.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')") + tdSql.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')") + tdSql.query("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + tdSql.checkData(0, 0, "sss") + tdSql.checkData(0, 2, "true") + + res = tdSql.getColNameList("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + cname_list = [] + cname_list.append("sss") + cname_list.append("33") + cname_list.append("a.jtag->'tag3'") + tdSql.checkColNameList(res, cname_list) + + # test group by & order by json tag + tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'") + tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag") + tdSql.query("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc") + tdSql.checkRows(8) + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, '"femail"') + tdSql.checkData(2, 0, 1) + tdSql.checkData(2, 1, 11) + tdSql.checkData(5, 0, 1) + tdSql.checkData(5, 1, "false") + tdSql.checkData(6, 0, 1) + tdSql.checkData(6, 1, "null") + tdSql.checkData(7, 0, 2) + tdSql.checkData(7, 1, None) + + tdSql.query("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc") + tdSql.checkRows(8) + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, None) + tdSql.checkData(2, 0, 1) + tdSql.checkData(2, 1, "false") + tdSql.checkData(5, 0, 1) + tdSql.checkData(5, 1, 11) + tdSql.checkData(7, 0, 2) + tdSql.checkData(7, 1, '"femail"') + + # test stddev with group by json tag + tdSql.query("select stddev(dataint) from jsons1 group by jtag->'tag1'") + tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 0, 0) + tdSql.checkData(1, 1, "null") + tdSql.checkData(7, 0, 11) + tdSql.checkData(7, 1, '"femail"') + + res = tdSql.getColNameList("select stddev(dataint) from jsons1 group by jsons1.jtag->'tag1'") + cname_list = [] + cname_list.append("stddev(dataint)") + cname_list.append("jsons1.jtag->'tag1'") + tdSql.checkColNameList(res, cname_list) + + # test top/bottom with group by json tag + tdSql.query("select top(dataint,100) from jsons1 group by jtag->'tag1'") + tdSql.checkRows(11) + tdSql.checkData(0, 1, 4) + tdSql.checkData(1, 1, 24) + tdSql.checkData(1, 2, None) + tdSql.checkData(10, 1, 1) + tdSql.checkData(10, 2, '"femail"') + + # test having + tdSql.query("select stddev(dataint) from jsons1 group by jtag->'tag1' having stddev(dataint) > 0") + tdSql.checkRows(2) + + # subquery with json tag + tdSql.query("select * from (select jtag, dataint from jsons1)") + tdSql.checkRows(11) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, '{"tag1":5,"tag2":"beijing"}') + + tdSql.query("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") + tdSql.checkRows(11) + tdSql.checkData(0, 0, '"femail"') + tdSql.checkData(2, 0, 5) + + res = tdSql.getColNameList("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") + cname_list = [] + cname_list.append("jtag->'tag1'") + tdSql.checkColNameList(res, cname_list) + + tdSql.query("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)") + tdSql.checkRows(11) + tdSql.checkData(1, 1, "jsons1_1") + tdSql.checkData(1, 2, '"femail"') + + # union all + tdSql.error("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2") + tdSql.error("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1") + + tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag1' from jsons2_1") + tdSql.checkRows(2) + tdSql.query("select dataint,jtag->'tag1',tbname from jsons1 union all select dataint,jtag->'tag1',tbname from jsons2") + tdSql.checkRows(13) + tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2") + tdSql.checkRows(13) + + #show create table + tdSql.query("show create table jsons1") + tdSql.checkData(0, 1, 'create table `jsons1` (`ts` TIMESTAMP,`dataint` INT,`databool` BOOL,`datastr` NCHAR(50),`datastrbin` BINARY(150)) TAGS (`jtag` JSON)') + + #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares + tdSql.query("select count(*) from jsons1 where jtag is not null") + tdSql.checkData(0, 0, 10) + tdSql.query("select avg(dataint) from jsons1 where jtag is not null") + tdSql.checkData(0, 0, 5.3) + tdSql.error("select twa(dataint) from jsons1 where jtag is not null") + tdSql.error("select irate(dataint) from jsons1 where jtag is not null") + tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null") + tdSql.checkData(0, 0, 49) + tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 4.496912521) + tdSql.error("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null") + + #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp + tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 1) + tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 11) + tdSql.query("select first(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 2) + tdSql.query("select last(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 11) + tdSql.query("select top(dataint,100) from jsons1 where jtag->'tag1'>1") + tdSql.checkRows(3) + tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1") + tdSql.checkRows(3) + tdSql.error("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") + tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 1.5) + tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 11) + tdSql.error("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") + + #test calculation function:diff/derivative/spread/ceil/floor/round/ + tdSql.error("select diff(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.error("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1") + tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, 10) + tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkRows(3) + tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkRows(3) + tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.checkRows(3) + + #test TD-12077 + tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')") + tdSql.query("select jtag->'tag3' from jsons1_16") + tdSql.checkData(0, 0, '-2.111000000') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/develop-test/0-management/4-others/.gitkeep b/tests/develop-test/0-management/4-others/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/1-insert/0-sql/.gitkeep b/tests/develop-test/1-insert/0-sql/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/1-insert/0-sql/basic.py b/tests/develop-test/1-insert/0-sql/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..273ff8ba488db609cfa4aa5978c689288d14750b --- /dev/null +++ b/tests/develop-test/1-insert/0-sql/basic.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: insert 倒序插入 + case2: 语法解析错误同时meta请求也发出去了导致callback中处理逻辑失效 + case3: [TD-XXXX]insert语句在values之间加入多个逗号 + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.execute('create table tb (ts timestamp, speed int)') + + insertRows = 10 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into tb values (now + %dm, %d)' % + (i, i)) + + tdLog.info("insert earlier data") + tdSql.execute('insert into tb values (now - 5m , 10)') + tdSql.execute('insert into tb values (now - 6m , 10)') + tdSql.execute('insert into tb values (now - 7m , 10)') + tdSql.execute('insert into tb values (now - 8m , 10)') + + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4) + + # test case for https://jira.taosdata.com:18080/browse/TD-3716: + tdSql.error("insert into tb(now, 1)") + # test case for TD-10717 + tdSql.error("insert into tb values(now,1),,(now+1s,1)") + tdSql.execute("insert into tb values(now+2s,1),(now+3s,1),(now+4s,1)") + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4 +3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/1-insert/0-sql/batchInsert.py b/tests/develop-test/1-insert/0-sql/batchInsert.py new file mode 100644 index 0000000000000000000000000000000000000000..22b1b5250b844648541b90361ec36cbf4da6c94f --- /dev/null +++ b/tests/develop-test/1-insert/0-sql/batchInsert.py @@ -0,0 +1,234 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TS-854] normal table batch insert with binding same table, different number of columns and timestamp in ascending order + case2: [TS-854] normal table batch insert with binding same table, different number of columns and timestamp in descending order + case3: [TS-854] normal table batch insert with binding same table, different number of columns and timestamp out of order + case4: [TS-854] normal table batch insert with binding same table, different number of columns and same timestamp + + case5: [TS-854] normal table batch insert with binding different tables, different number of columns and timestamp in ascending order + case6: [TS-854] normal table batch insert with binding different tables, different number of columns and timestamp in descending order + case7: [TS-854] normal table batch insert with binding different tables, different number of columns and timestamp out of order + case8: [TS-854] normal table batch insert with binding different tables, different number of columns and same timestamp + + case9: [TS-854] sub table batch insert with binding same table, different number of columns and timestamp in ascending order + case10: [TS-854] sub table batch insert with binding same table, different number of columns and timestamp in descending order + case11: [TS-854] sub table batch insert with binding same table, different number of columns and timestamp out of order + case12: [TS-854] sub table batch insert with binding same table, different number of columns and same timestamp + + case13: [TS-854] sub table batch insert with binding different tables, different number of columns and timestamp in ascending order + case14: [TS-854] sub table batch insert with binding different tables, different number of columns and timestamp in descending order + case15: [TS-854] sub table batch insert with binding different tables, different number of columns and timestamp out of order + case16: [TS-854] sub table batch insert with binding different tables, different number of columns and same timestamp + + case17: [TS-854] sub table batch insert with binding same table, different number of columns, different number of tags and timestamp in ascending order + case18: [TS-854] sub table batch insert with binding same table, different number of columns, different number of tags and timestamp in descending order + case19: [TS-854] sub table batch insert with binding same table, different number of columns, different number of tags and timestamp out of order + case20: [TS-854] sub table batch insert with binding same table, different number of columns, different number of tags and same timestamp + + case21: [TS-854] sub table batch insert with binding different tables, different number of columns, different number of tags and timestamp in ascending order + case22: [TS-854] sub table batch insert with binding different tables, different number of columns, different number of tags and timestamp in descending order + case23: [TS-854] sub table batch insert with binding different tables, different number of columns, different number of tags and timestamp out of order + case24: [TS-854] sub table batch insert with binding different tables, different number of columns, different number of tags and same timestamp + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.ts = 1607817000000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table tb1(ts timestamp, c1 double, c2 double, c3 double, c4 double)") + + args = [(self.ts + 1000, self.ts + 3000, self.ts + 5000, self.ts + 7000), + (self.ts + 8000, self.ts + 6000, self.ts + 4000, self.ts + 2000), + (self.ts - 1000 , self.ts - 5000, self.ts - 3000, self.ts - 8000), + (self.ts, self.ts, self.ts, self.ts)] + + # case 1, 2, 3, 4 + tdLog.info("test case for case 1, 2, 3, 4") + sql = "insert into tb1(ts, c1) values(%d, 0.0) tb1(ts, c1) values(%d, 0.0) tb1(ts, c1) values(%d, 0.0) tb1(ts, c1, c2, c3, c4) values(%d, 0.0, 0.0, 0.0, 0.0)" + i = 1 + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + + if i == 4: + rows = rows + 1 + else: + rows = rows + 4 + + tdSql.query("select * from tb1") + tdSql.checkRows(rows) + i = i + 1 + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from tb1") + tdSql.checkRows(rows) + + # case 5, 6, 7, 8 + tdSql.execute("create table tb2(ts timestamp, c1 int, c2 int, c3 int, c4 int)") + tdSql.execute("create table tb3(ts timestamp, c1 double, c2 double, c3 double, c4 double)") + tdLog.info("test case for case 5, 6, 7, 8") + sql = "insert into tb2(ts, c1) values(%d, 0) tb2(ts, c1, c2, c3, c4) values(%d, 0, 0, 0, 0) tb3(ts, c2) values(%d, 0.0) tb3(ts, c1, c2, c3, c4) values(%d, 0.0, 0.0, 0.0, 0.0)" + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + tdSql.query("select * from tb2") + if i == 8: + rows = rows + 1 + else: + rows = rows + 2 + tdSql.query("select * from tb2") + tdSql.checkRows(rows) + tdSql.query("select * from tb3") + tdSql.checkRows(rows) + i = i + 1 + + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from tb2") + tdSql.checkRows(rows) + tdSql.query("select * from tb3") + tdSql.checkRows(rows) + + # case 9, 10, 11, 12 + tdSql.execute("create table stb(ts timestamp, c1 double, c2 double, c3 double, c4 double) tags(t1 nchar(20))") + tdLog.info("test case for case 9, 10, 11, 12") + sql = "insert into t1(ts, c1) using stb tags('tag1') values(%d, 0.0) t1(ts, c1) using stb tags('tag1') values(%d, 0.0) t1(ts, c1) using stb tags('tag1') values(%d, 0.0) t1(ts, c1, c2, c3, c4) using stb tags('tag1') values(%d, 0.0, 0.0, 0.0, 0.0)" + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + if i == 12: + rows = rows + 1 + else: + rows = rows + 4 + tdSql.query("select * from stb") + tdSql.checkRows(rows) + i = i + 1 + + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from stb") + tdSql.checkRows(rows) + + # case 13, 14, 15, 16 + tdSql.execute("create table stb2(ts timestamp, c1 int, c2 int, c3 int, c4 int) tags(t1 nchar(20))") + tdSql.execute("create table stb3(ts timestamp, c1 double, c2 double, c3 double, c4 double) tags(t1 binary(20))") + tdLog.info("test case for case 13, 14, 15, 16") + sql = "insert into t2(ts, c1) using stb2 tags('tag2') values(%d, 0) t2(ts, c1, c2, c3, c4) using stb2 tags('tag2') values(%d, 0, 0, 0, 0) t3(ts, c2) using stb3 tags('tag3') values(%d, 0.0) t3(ts, c1, c2, c3, c4) using stb3 tags('tag3') values(%d, 0.0, 0.0, 0.0, 0.0)" + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + if i == 16: + rows = rows + 1 + else: + rows = rows + 2 + tdSql.query("select * from stb2") + tdSql.checkRows(rows) + tdSql.query("select * from stb3") + tdSql.checkRows(rows) + i = i + 1 + + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from stb2") + tdSql.checkRows(rows) + tdSql.query("select * from stb3") + tdSql.checkRows(rows) + + # case 17, 18, 19, 20 + tdSql.execute("drop table if exists stb") + tdSql.execute("create table stb(ts timestamp, c1 double, c2 double, c3 double, c4 double) tags(t1 nchar(20), t2 int, t3 binary(20))") + tdLog.info("test case for case 17, 18, 19, 20") + sql = "insert into t1(ts, c1) using stb(t1) tags('tag1') values(%d, 0.0) t1(ts, c1) using stb(t2) tags(1) values(%d, 0.0) t1(ts, c1) using stb(t1, t2) tags('tag1', 1) values(%d, 0.0) t1(ts, c1, c2, c3, c4) using stb(t1, t2, t3) tags('tag1', 1, 'tag3') values(%d, 0.0, 0.0, 0.0, 0.0)" + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + if i == 20: + rows = rows + 1 + else: + rows = rows + 4 + tdSql.query("select * from stb") + tdSql.checkRows(rows) + i = i + 1 + + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from stb") + tdSql.checkRows(rows) + + # case 21, 22, 23, 24 + tdSql.execute("drop table if exists stb2") + tdSql.execute("drop table if exists stb3") + tdSql.execute("create table stb2(ts timestamp, c1 int, c2 int, c3 int, c4 int) tags(t1 nchar(20), t2 int)") + tdSql.execute("create table stb3(ts timestamp, c1 double, c2 double, c3 double, c4 double) tags(t1 binary(20), t2 double)") + tdLog.info("test case for case 21, 22, 23, 24") + sql = "insert into t2(ts, c1) using stb2(t1) tags('tag2') values(%d, 0) t2(ts, c1, c2, c3, c4) using stb2(t1, t2) tags('tag2', 1) values(%d, 0, 0, 0, 0) t3(ts, c2) using stb3(t1) tags('tag3') values(%d, 0.0) t3(ts, c1, c2, c3, c4) using stb3(t1, t2) tags('tag3', 0.0) values(%d, 0.0, 0.0, 0.0, 0.0)" + rows = 0 + for arg in args: + tdLog.info("test case for case %d" % i) + tdLog.info(sql % arg) + tdSql.execute(sql % arg) + if i == 24: + rows = rows + 1 + else: + rows = rows + 2 + tdSql.query("select * from stb2") + tdSql.checkRows(rows) + tdSql.query("select * from stb3") + tdSql.checkRows(rows) + i = i + 1 + + # restart taosd and check again + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from stb2") + tdSql.checkRows(rows) + tdSql.query("select * from stb3") + tdSql.checkRows(rows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/1-insert/1-stmt/.gitkeep b/tests/develop-test/1-insert/1-stmt/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/1-insert/2-schemaless/json/.gitkeep b/tests/develop-test/1-insert/2-schemaless/json/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/1-insert/2-schemaless/line/.gitkeep b/tests/develop-test/1-insert/2-schemaless/line/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/1-insert/2-schemaless/telnet/.gitkeep b/tests/develop-test/1-insert/2-schemaless/telnet/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/0-aggregate/.gitkeep b/tests/develop-test/2-query/0-aggregate/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/1-select/.gitkeep b/tests/develop-test/2-query/1-select/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/2-compute/.gitkeep b/tests/develop-test/2-query/2-compute/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/3-join/.gitkeep b/tests/develop-test/2-query/3-join/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/4-union/.gitkeep b/tests/develop-test/2-query/4-union/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/5-session/.gitkeep b/tests/develop-test/2-query/5-session/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/6-state_window/.gitkeep b/tests/develop-test/2-query/6-state_window/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/7-nest/.gitkeep b/tests/develop-test/2-query/7-nest/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/8-udf/.gitkeep b/tests/develop-test/2-query/8-udf/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/2-query/9-others/.gitkeep b/tests/develop-test/2-query/9-others/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/c#/.gitkeep b/tests/develop-test/3-connectors/c#/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/go/.gitkeep b/tests/develop-test/3-connectors/go/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/java/.gitkeep b/tests/develop-test/3-connectors/java/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/nodejs/.gitkeep b/tests/develop-test/3-connectors/nodejs/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/restful/.gitkeep b/tests/develop-test/3-connectors/restful/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/3-connectors/rust/.gitkeep b/tests/develop-test/3-connectors/rust/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/4-taosAdapter/.gitkeep b/tests/develop-test/4-taosAdapter/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/.gitkeep b/tests/develop-test/5-taos-tools/taosbenchmark/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/5-taos-tools/taosdump/.gitkeep b/tests/develop-test/5-taos-tools/taosdump/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/6-cluster/.gitkeep b/tests/develop-test/6-cluster/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/7-customer/.gitkeep b/tests/develop-test/7-customer/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/fulltest.sh b/tests/develop-test/fulltest.sh new file mode 100755 index 0000000000000000000000000000000000000000..9ec1dd23ac27928950befc35bd49ba8b4e6270eb --- /dev/null +++ b/tests/develop-test/fulltest.sh @@ -0,0 +1,3 @@ +python3 test.py -f 0-management/3-tag/json_tag.py +python3 test.py -f 1-insert/0-sql/basic.py +python3 test.py -f 1-insert/0-sql/batchInsert.py \ No newline at end of file diff --git a/tests/develop-test/test.py b/tests/develop-test/test.py new file mode 100644 index 0000000000000000000000000000000000000000..b39b95c9030e14a2442883991cadb7d21e5e7a5d --- /dev/null +++ b/tests/develop-test/test.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import subprocess +import time +from distutils.log import warn as printf +from fabric2 import Connection +sys.path.append("../pytest") +from util.log import * +from util.dnodes import * +from util.cases import * + +import taos + + +if __name__ == "__main__": + + fileName = "all" + deployPath = "" + masterIp = "" + testCluster = False + valgrind = 0 + logSql = True + stop = 0 + restart = False + windows = 0 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-f Name of test case file written by Python') + tdLog.printNoPrefix('-p Deploy Path for Simulator') + tdLog.printNoPrefix('-m Master Ip for Simulator') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-c Test Cluster Flag') + tdLog.printNoPrefix('-g valgrind Test Flag') + tdLog.printNoPrefix('-r taosd restart test') + tdLog.printNoPrefix('-w taos on windows') + sys.exit(0) + + if key in ['-r', '--restart']: + restart = True + + if key in ['-f', '--file']: + fileName = value + + if key in ['-p', '--path']: + deployPath = value + + if key in ['-m', '--master']: + masterIp = value + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-c', '--cluster']: + testCluster = True + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if key in ['-w', '--windows']: + windows = 1 + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -TERM %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + tdLog.info('stop All dnodes') + + if masterIp == "": + host = '127.0.0.1' + else: + host = masterIp + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + if windows: + tdCases.logSql(logSql) + tdLog.info("Procedures for testing self-deployment") + td_clinet = TDSimClient("C:\\TDengine") + td_clinet.deploy() + remote_conn = Connection("root@%s"%host) + with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): + remote_conn.run("python3 ./test.py") + conn = taos.connect( + host="%s"%(host), + config=td_clinet.cfgDir) + tdCases.runOneWindows(conn, fileName) + else: + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + tdDnodes.deploy(1,ucase.updatecfgDict) + except : + tdDnodes.deploy(1,{}) + else: + pass + tdDnodes.deploy(1,{}) + tdDnodes.start(1) + + + + tdCases.logSql(logSql) + + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) + else: + tdLog.info("Procedures for testing self-deployment") + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + if fileName == "all": + tdCases.runAllLinux(conn) + else: + tdCases.runOneWindows(conn, fileName) + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + conn = taos.connect( host, config=tdDnodes.getSimCfgPath()) + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") + else: + tdLog.info("not need to query") + conn.close() diff --git a/tests/examples/C#/C#checker/C#checker.csproj b/tests/examples/C#/C#checker/C#checker.csproj new file mode 100644 index 0000000000000000000000000000000000000000..afeeaf3f01301210c0e945c8e02b40790ebec743 --- /dev/null +++ b/tests/examples/C#/C#checker/C#checker.csproj @@ -0,0 +1,13 @@ + + + + Exe + net5.0 + C_checker + + + + + + + diff --git a/tests/examples/C#/C#checker/TDengineDriver.cs b/tests/examples/C#/C#checker/TDengineDriver.cs deleted file mode 100644 index 0f6477ff75a9b457069112c477746dd036c71251..0000000000000000000000000000000000000000 --- a/tests/examples/C#/C#checker/TDengineDriver.cs +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - - //get precision in restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - - //schemaless API - [DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision); - } -} diff --git a/tests/examples/C#/TDengineDriver.cs b/tests/examples/C#/TDengineDriver.cs deleted file mode 100644 index 6e86b692f7edf512c751a49590eca3bf74949091..0000000000000000000000000000000000000000 --- a/tests/examples/C#/TDengineDriver.cs +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - - //get precision in restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - - //schemaless API - [DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision); - } -} diff --git a/tests/examples/C#/TDengineTest.cs b/tests/examples/C#/TDengineTest.cs deleted file mode 100644 index f4ee62527feda4d43b21f37e9c513af2053e1f9d..0000000000000000000000000000000000000000 --- a/tests/examples/C#/TDengineTest.cs +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Text; -using System.Collections.Generic; -using System.Runtime.InteropServices; -using System.Collections; - -namespace TDengineDriver -{ - class TDengineTest - { - //connect parameters - private string host; - private string configDir; - private string user; - private string password; - private short port = 0; - - //sql parameters - private string dbName; - private string stableName; - private string tablePrefix; - - private bool isInsertData; - private bool isQueryData; - - private long tableCount; - private long totalRows; - private long batchRows; - private long beginTimestamp = 1551369600000L; - - private IntPtr conn = IntPtr.Zero; - private long rowsInserted = 0; - - static void Main(string[] args) - { - TDengineTest tester = new TDengineTest(); - tester.ReadArgument(args); - - Console.WriteLine("---------------------------------------------------------------"); - Console.WriteLine("Starting Testing..."); - Console.WriteLine("---------------------------------------------------------------"); - - tester.InitTDengine(); - tester.ConnectTDengine(); - tester.CreateDbAndTable(); - tester.ExecuteInsert(); - tester.ExecuteQuery(); - tester.CloseConnection(); - - Console.WriteLine("---------------------------------------------------------------"); - Console.WriteLine("Stop Testing..."); - Console.WriteLine("---------------------------------------------------------------"); - - } - - public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - - long tmpVal = Convert.ToInt64(tmp); - if (tmpVal < minVal || tmpVal > maxVal) - { - Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); - ExitProgram(); - } - - return tmpVal; - } - } - - return defaultValue; - } - - public String GetArgumentAsString(String[] argv, String argName, String defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - return tmp; - } - } - - return defaultValue; - } - - public void PrintHelp(String[] argv) - { - for (int i = 0; i < argv.Length; ++i) - { - if ("--help" == argv[i]) - { - String indent = " "; - Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); - Console.WriteLine("{0:G}{1:G}", indent, "-h"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); - Console.WriteLine("{0:G}{1:G}", indent, "-u"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); - Console.WriteLine("{0:G}{1:G}", indent, "-p"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); - Console.WriteLine("{0:G}{1:G}", indent, "-d"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); - Console.WriteLine("{0:G}{1:G}", indent, "-s"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); - Console.WriteLine("{0:G}{1:G}", indent, "-t"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); - Console.WriteLine("{0:G}{1:G}", indent, "-w"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); - Console.WriteLine("{0:G}{1:G}", indent, "-r"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); - Console.WriteLine("{0:G}{1:G}", indent, "-n"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-b"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-i"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); - Console.WriteLine("{0:G}{1:G}", indent, "-c"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); - - ExitProgram(); - } - } - } - - public void ReadArgument(String[] argv) - { - PrintHelp(argv); - host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); - user = this.GetArgumentAsString(argv, "-u", "root"); - password = this.GetArgumentAsString(argv, "-p", "taosdata"); - dbName = this.GetArgumentAsString(argv, "-d", "db"); - stableName = this.GetArgumentAsString(argv, "-s", "st"); - tablePrefix = this.GetArgumentAsString(argv, "-t", "t"); - isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; - isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; - tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); - batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000, 500); - totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); - configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); - } - - public void InitTDengine() - { - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); - TDengine.Init(); - Console.WriteLine("TDengine Initialization finished"); - } - - public void ConnectTDengine() - { - string db = ""; - this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); - if (this.conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - ExitProgram(); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - } - - public void CreateDbAndTable() - { - if (!this.isInsertData) - { - return; - } - - StringBuilder sql = new StringBuilder(); - sql.Append("create database if not exists ").Append(this.dbName); - IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - sql.Clear(); - sql.Append("use ").Append(this.dbName); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - sql.Clear(); - sql.Append("create table if not exists ").Append(this.stableName).Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - for (int i = 0; i < this.tableCount; i++) - { - sql.Clear(); - sql = sql.Append("create table if not exists ").Append(this.tablePrefix).Append(i) - .Append(" using ").Append(this.stableName).Append(" tags(").Append(i).Append(")"); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - } - - Console.WriteLine("create db and table success"); - } - - public void ExecuteInsert() - { - if (!this.isInsertData) - { - return; - } - - System.DateTime start = new System.DateTime(); - long loopCount = this.totalRows / this.batchRows; - - for (int table = 0; table < this.tableCount; ++table) - { - for (long loop = 0; loop < loopCount; loop++) - { - StringBuilder sql = new StringBuilder(); - sql.Append("insert into ").Append(this.tablePrefix).Append(table).Append(" values"); - for (int batch = 0; batch < this.batchRows; ++batch) - { - long rows = loop * this.batchRows + batch; - sql.Append("(") - .Append(this.beginTimestamp + rows) - .Append(", 1, 2, 3,") - .Append(rows) - .Append(", 5, 6, 7, 'abc', 'def')"); - } - IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - } - - int affectRows = TDengine.AffectRows(res); - this.rowsInserted += affectRows; - - TDengine.FreeResult(res); - } - } - - System.DateTime end = new System.DateTime(); - TimeSpan ts = end - start; - - Console.Write("Total {0:G} rows inserted, {1:G} rows failed, time spend {2:G} seconds.\n" - , this.rowsInserted, this.totalRows * this.tableCount - this.rowsInserted, ts.TotalSeconds); - } - - public void ExecuteQuery() - { - if (!this.isQueryData) - { - return; - } - - System.DateTime start = new System.DateTime(); - long queryRows = 0; - - for (int i = 0; i < 1/*this.tableCount*/; ++i) - { - String sql = "select * from " + this.dbName + "." + tablePrefix + i; - Console.WriteLine(sql); - - IntPtr res = TDengine.Query(conn, sql); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - - int fieldCount = TDengine.FieldCount(res); - Console.WriteLine("field count: " + fieldCount); - - List metas = TDengine.FetchFields(res); - for (int j = 0; j < metas.Count; j++) - { - TDengineMeta meta = (TDengineMeta)metas[j]; - Console.WriteLine("index:" + j + ", type:" + meta.type + ", typename:" + meta.TypeName() + ", name:" + meta.name + ", size:" + meta.size); - } - - IntPtr rowdata; - StringBuilder builder = new StringBuilder(); - while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) - { - queryRows++; - for (int fields = 0; fields < fieldCount; ++fields) - { - TDengineMeta meta = metas[fields]; - int offset = IntPtr.Size * fields; - IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - - builder.Append("---"); - - if (data == IntPtr.Zero) - { - builder.Append("NULL"); - continue; - } - - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) == 0 ? false : true; - builder.Append(v1); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - byte v2 = Marshal.ReadByte(data); - builder.Append(v2); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - builder.Append(v3); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - builder.Append(v4); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - builder.Append(v5); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - builder.Append(v6); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - builder.Append(v7); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringAnsi(data); - builder.Append(v8); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - builder.Append(v9); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringAnsi(data); - builder.Append(v10); - break; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - byte v11 = Marshal.ReadByte(data); - builder.Append(v11); - break; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - ushort v12 = (ushort)Marshal.ReadInt16(data); - builder.Append(v12); - break; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - uint v13 = (uint)Marshal.ReadInt32(data); - builder.Append(v13); - break; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - ulong v14 = (ulong)Marshal.ReadInt64(data); - builder.Append(v14); - break; - } - } - builder.Append("---"); - - if (queryRows <= 10) - { - Console.WriteLine(builder.ToString()); - } - builder.Clear(); - } - - if (TDengine.ErrorNo(res) != 0) - { - Console.Write("Query is not complete, Error {0:G}", - TDengine.ErrorNo(res), TDengine.Error(res)); - } - Console.WriteLine(""); - - TDengine.FreeResult(res); - } - - System.DateTime end = new System.DateTime(); - TimeSpan ts = end - start; - - Console.Write("Total {0:G} rows inserted, {1:G} rows query, time spend {2:G} seconds.\n" - , this.rowsInserted, queryRows, ts.TotalSeconds); - } - - public void CloseConnection() - { - if (this.conn != IntPtr.Zero) - { - TDengine.Close(this.conn); - } - } - - static void ExitProgram() - { - TDengine.Cleanup(); - System.Environment.Exit(0); - } - } -} diff --git a/tests/examples/C#/TDengineTest/TDengineTest.cs b/tests/examples/C#/TDengineTest/TDengineTest.cs new file mode 100644 index 0000000000000000000000000000000000000000..9f84634ffb400e5d891a9fdeaeee0c013829f969 --- /dev/null +++ b/tests/examples/C#/TDengineTest/TDengineTest.cs @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; + +namespace TDengineDriver +{ + class TDengineTest + { + //connect parameters + private string host; + private string configDir; + private string user; + private string password; + private short port = 0; + + //sql parameters + private string dbName; + private string stableName; + private string tablePrefix; + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineTest tester = new TDengineTest(); + tester.ReadArgument(args); + + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Starting Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.CreateDbAndTable(); + tester.ExecuteInsert(); + tester.ExecuteQuery(); + tester.CloseConnection(); + + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Stop Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + } + + public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + + long tmpVal = Convert.ToInt64(tmp); + if (tmpVal < minVal || tmpVal > maxVal) + { + Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); + ExitProgram(); + } + + return tmpVal; + } + } + + return defaultValue; + } + + public String GetArgumentAsString(String[] argv, String argName, String defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + return tmp; + } + } + + return defaultValue; + } + + public void PrintHelp(String[] argv) + { + for (int i = 0; i < argv.Length; ++i) + { + if ("--help" == argv[i]) + { + String indent = " "; + Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); + Console.WriteLine("{0:G}{1:G}", indent, "-h"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); + Console.WriteLine("{0:G}{1:G}", indent, "-u"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); + Console.WriteLine("{0:G}{1:G}", indent, "-p"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); + Console.WriteLine("{0:G}{1:G}", indent, "-d"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); + Console.WriteLine("{0:G}{1:G}", indent, "-s"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); + Console.WriteLine("{0:G}{1:G}", indent, "-t"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); + Console.WriteLine("{0:G}{1:G}", indent, "-w"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); + Console.WriteLine("{0:G}{1:G}", indent, "-r"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); + Console.WriteLine("{0:G}{1:G}", indent, "-n"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-b"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-i"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); + Console.WriteLine("{0:G}{1:G}", indent, "-c"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); + + ExitProgram(); + } + } + } + + public void ReadArgument(String[] argv) + { + PrintHelp(argv); + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); + user = this.GetArgumentAsString(argv, "-u", "root"); + password = this.GetArgumentAsString(argv, "-p", "taosdata"); + dbName = this.GetArgumentAsString(argv, "-d", "tdengint_test_cs"); + stableName = this.GetArgumentAsString(argv, "-s", "st"); + tablePrefix = this.GetArgumentAsString(argv, "-t", "t"); + isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; + isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; + tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); + batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000,500 ); + totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); + configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + Console.WriteLine("TDengine Initialization finished"); + } + + public void ConnectTDengine() + { + string db = ""; + Console.WriteLine("Host:{0}", this.host); + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + ExitProgram(); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + } + + public void CreateDbAndTable() + { + if (!this.isInsertData) + { + return; + } + + StringBuilder sql = new StringBuilder(); + sql.Append("create database if not exists ").Append(this.dbName); + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + sql.Clear(); + sql.Append("use ").Append(this.dbName); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + sql.Clear(); + sql.Append("create table if not exists ").Append(this.stableName).Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + for (int i = 0; i < this.tableCount; i++) + { + sql.Clear(); + sql = sql.Append("create table if not exists ").Append(this.tablePrefix).Append(i) + .Append(" using ").Append(this.stableName).Append(" tags(").Append(i).Append(")"); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + + Console.WriteLine("create db and table success"); + } + + public void ExecuteInsert() + { + if (!this.isInsertData) + { + return; + } + + System.DateTime start = new System.DateTime(); + long loopCount = this.totalRows / this.batchRows; + + for (int table = 0; table < this.tableCount; ++table) + { + for (long loop = 0; loop < loopCount; loop++) + { + StringBuilder sql = new StringBuilder(); + sql.Append("insert into ").Append(this.tablePrefix).Append(table).Append(" values"); + for (int batch = 0; batch < this.batchRows; ++batch) + { + + long rows = loop * this.batchRows + batch; + sql.Append("(") + .Append(this.beginTimestamp + rows) + .Append(", 1, 2, 3,") + .Append(rows) + .Append(", 5, 6, 7, 'abc', 'def')"); + } + IntPtr res = TDengine.Query(this.conn,sql.ToString() ); + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + } + + int affectRows = TDengine.AffectRows(res); + this.rowsInserted += affectRows; + + TDengine.FreeResult(res); + } + } + + System.DateTime end = new System.DateTime(); + TimeSpan ts = end - start; + + Console.Write("Total {0:G} rows inserted, {1:G} rows failed, time spend {2:G} seconds.\n" + , this.rowsInserted, this.totalRows * this.tableCount - this.rowsInserted, ts.TotalSeconds); + } + + public void ExecuteQuery() + { + if (!this.isQueryData) + { + return; + } + + System.DateTime start = new System.DateTime(); + long queryRows = 0; + + for (int i = 0; i < 1/*this.tableCount*/; ++i) + { + String sql = "select * from " + this.dbName + "." + tablePrefix + i; + Console.WriteLine(sql); + + IntPtr res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + Console.WriteLine("field count: " + fieldCount); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + Console.WriteLine("index:" + j + ", type:" + meta.type + ", typename:" + meta.TypeName() + ", name:" + meta.name + ", size:" + meta.size); + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v11 = Marshal.ReadByte(data); + builder.Append(v11); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v12 = (ushort)Marshal.ReadInt16(data); + builder.Append(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v13 = (uint)Marshal.ReadInt32(data); + builder.Append(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v14 = (ulong)Marshal.ReadInt64(data); + builder.Append(v14); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", + TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + } + + System.DateTime end = new System.DateTime(); + TimeSpan ts = end - start; + + Console.Write("Total {0:G} rows inserted, {1:G} rows query, time spend {2:G} seconds.\n" + , this.rowsInserted, queryRows, ts.TotalSeconds); + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + } + } + + static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + } +} diff --git a/tests/examples/C#/TDengineTest/TDengineTest.csproj b/tests/examples/C#/TDengineTest/TDengineTest.csproj new file mode 100644 index 0000000000000000000000000000000000000000..211c927d3d36df5941291319e3c85707610c6a8f --- /dev/null +++ b/tests/examples/C#/TDengineTest/TDengineTest.csproj @@ -0,0 +1,12 @@ + + + + Exe + net5.0 + + + + + + + diff --git a/tests/examples/C#/schemaless/schemaless.csproj b/tests/examples/C#/schemaless/schemaless.csproj new file mode 100644 index 0000000000000000000000000000000000000000..d132e34589525826d5b0ff0f0055156fad2d5a38 --- /dev/null +++ b/tests/examples/C#/schemaless/schemaless.csproj @@ -0,0 +1,12 @@ + + + + Exe + net5.0 + + + + + + + diff --git a/tests/examples/C#/stmt/StmtDemo.cs b/tests/examples/C#/stmt/StmtDemo.cs new file mode 100644 index 0000000000000000000000000000000000000000..c2b299140976ed36f245f5693a2a047607c5b5be --- /dev/null +++ b/tests/examples/C#/stmt/StmtDemo.cs @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; +namespace TDengineDriver +{ + public class StmtDemo + { + //connection parameters + private string host = "127.0.0.1"; + private string configDir = "C:/TDengine/cfg"; + private string user = "root"; + private string passwd = "taosdata"; + private short port = 0; + + private IntPtr conn = IntPtr.Zero; + private IntPtr stmt = IntPtr.Zero; + + static void Main(string[] args) + { + string dropDB = "drop database if exists csharp"; + string createDB = "create database if not exists csharp keep 36500"; + string selectDB = "use csharp"; + string stmtSql = "insert into ? using stmtdemo tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + string createTable = "create stable stmtdemo (ts timestamp " + + ",b bool" + + ",v1 tinyint" + + ",v2 smallint" + + ",v4 int" + + ",v8 bigint" + + ",f4 float" + + ",f8 double" + + ",u1 tinyint unsigned" + + ",u2 smallint unsigned" + + ",u4 int unsigned" + + ",u8 bigint unsigned" + + ",bin binary(200)" + + ",blob nchar(200)" + + ")tags(" + + "bo bool" + + ",tt tinyint" + + ",si smallint" + + ",ii int" + + ",bi bigint" + + ",tu tinyint unsigned" + + ",su smallint unsigned" + + ",iu int unsigned" + + ",bu bigint unsigned" + + ",ff float " + + ",dd double " + + ",bb binary(200)" + + ",nc nchar(200)" + + ")"; + + string dropTable = "drop table if exists stmtdemo"; + + string tableName = "t1"; + StmtDemo stmtDemo = new StmtDemo(); + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Start StmtDemo insert Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + stmtDemo.InitTDengine(); + //TDengine connect + stmtDemo.ConnectTDengine(); + + //before stmt + stmtDemo.ExecuteQuery(dropDB); + stmtDemo.ExecuteQuery(createDB); + stmtDemo.ExecuteQuery(selectDB); + stmtDemo.ExecuteQuery(dropTable); + stmtDemo.ExecuteQuery(createTable); + + stmtDemo.StmtInit(); + string[] tableList = { "stmtdemo" }; + stmtDemo.loadTableInfo(tableList); + + stmtDemo.StmtPrepare(stmtSql); + TAOS_BIND[] binds = stmtDemo.InitBindArr(); + TAOS_MULTI_BIND[] mbinds = stmtDemo.InitMultBindArr(); + stmtDemo.SetTableNameTags(tableName, binds); + stmtDemo.BindParamBatch(mbinds); + stmtDemo.AddBatch(); + stmtDemo.StmtExecute(); + TaosBind.FreeTaosBind(binds); + TaosMultiBind.FreeTaosBind(mbinds); + stmtDemo.StmtClose(); + + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("start StmtDemo select Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + stmtDemo.StmtInit(); + string selectSql = "SELECT * FROM stmtdemo WHERE v1 > ? AND v4 < ?"; + + stmtDemo.StmtPrepare(selectSql); + + TAOS_BIND[] queryCondition = new TAOS_BIND[2]; + queryCondition[0] = TaosBind.BindTinyInt(0); + queryCondition[1] = TaosBind.BindInt(1000); + + Console.WriteLine(selectSql); + stmtDemo.BindParam(queryCondition); + stmtDemo.StmtExecute(); + + stmtDemo.StmtUseResult(); + + stmtDemo.StmtClose(); + TaosBind.FreeTaosBind(queryCondition); + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Stop StmtDemo Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + stmtDemo.CloseConnection(); + } + public TAOS_BIND[] InitBindArr() + { + TAOS_BIND[] binds = new TAOS_BIND[13]; + binds[0] = TaosBind.BindBool(true); + binds[1] = TaosBind.BindTinyInt(-2); + binds[2] = TaosBind.BindSmallInt(short.MaxValue); + binds[3] = TaosBind.BindInt(int.MaxValue); + binds[4] = TaosBind.BindBigInt(Int64.MaxValue); + binds[5] = TaosBind.BindUTinyInt(byte.MaxValue - 1); + binds[6] = TaosBind.BindUSmallInt(UInt16.MaxValue - 1); + binds[7] = TaosBind.BindUInt(uint.MinValue + 1); + binds[8] = TaosBind.BindUBigInt(UInt64.MinValue + 1); + binds[9] = TaosBind.BindFloat(11.11F); + binds[10] = TaosBind.BindDouble(22.22D); + binds[11] = TaosBind.BindBinary("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + binds[12] = TaosBind.BindNchar("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}"); + return binds; + } + + public TAOS_MULTI_BIND[] InitMultBindArr() + { + TAOS_MULTI_BIND[] mBinds = new TAOS_MULTI_BIND[14]; + long[] tsArr = new long[5] { 1637064040000, 1637064041000, 1637064042000, 1637064043000, 1637064044000 }; + bool?[] boolArr = new bool?[5] { true, false, null, true, true }; + sbyte?[] tinyIntArr = new sbyte?[5] { -127, 0, null, 8, 127 }; + short?[] shortArr = new short?[5] { short.MinValue + 1, -200, null, 100, short.MaxValue }; + int?[] intArr = new int?[5] { -200, -100, null, 0, 300 }; + long?[] longArr = new long?[5] { long.MinValue + 1, -2000, null, 1000, long.MaxValue }; + float?[] floatArr = new float?[5] { float.MinValue + 1, -12.1F, null, 0F, float.MaxValue }; + double?[] doubleArr = new double?[5] { double.MinValue + 1, -19.112D, null, 0D, double.MaxValue }; + byte?[] uTinyIntArr = new byte?[5] { byte.MinValue, 12, null, 89, byte.MaxValue - 1 }; + ushort?[] uShortArr = new ushort?[5] { ushort.MinValue, 200, null, 400, ushort.MaxValue - 1 }; + uint?[] uIntArr = new uint?[5] { uint.MinValue, 100, null, 2, uint.MaxValue - 1 }; + ulong?[] uLongArr = new ulong?[5] { ulong.MinValue, 2000, null, 1000, long.MaxValue - 1 }; + string[] binaryArr = new string[5] { "1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", String.Empty, null, "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM", "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890~!@#$%^&*()_+=-`[]{}:,./<>?" }; + string[] ncharArr = new string[5] { "1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", null, "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM", "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890~!@#$%^&*()_+=-`[]{}:,./<>?", string.Empty }; + mBinds[0] = TaosMultiBind.MultiBindTimestamp(tsArr); + mBinds[1] = TaosMultiBind.MultiBindBool(boolArr); + mBinds[2] = TaosMultiBind.MultiBindTinyInt(tinyIntArr); + mBinds[3] = TaosMultiBind.MultiBindSmallInt(shortArr); + mBinds[4] = TaosMultiBind.MultiBindInt(intArr); + mBinds[5] = TaosMultiBind.MultiBindBigint(longArr); + mBinds[6] = TaosMultiBind.MultiBindFloat(floatArr); + mBinds[7] = TaosMultiBind.MultiBindDouble(doubleArr); + mBinds[8] = TaosMultiBind.MultiBindUTinyInt(uTinyIntArr); + mBinds[9] = TaosMultiBind.MultiBindUSmallInt(uShortArr); + mBinds[10] = TaosMultiBind.MultiBindUInt(uIntArr); + mBinds[11] = TaosMultiBind.MultiBindUBigInt(uLongArr); + mBinds[12] = TaosMultiBind.MultiBindBinary(binaryArr); + mBinds[13] = TaosMultiBind.MultiBindNchar(ncharArr); + return mBinds; + } + + public void loadTableInfo(string[] arr) + { + if (TDengine.LoadTableInfo(this.conn, arr) == 0) + { + Console.WriteLine("load table info success"); + } + else + { + Console.WriteLine("load table info failed"); + ExitProgram(); + } + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + Console.WriteLine("TDengine Initialization finished"); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.passwd, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + ExitProgram(); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + } + + public void ExecuteQuery(String sql) + { + IntPtr res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + + public void StmtInit() + { + this.stmt = TDengine.StmtInit(conn); + if (this.stmt == IntPtr.Zero) + { + Console.WriteLine("Init stmt failed"); + ExitProgram(); + } + else + { + Console.WriteLine("Init stmt success"); + } + } + + public void StmtPrepare(string sql) + { + int res = TDengine.StmtPrepare(this.stmt, sql); + if (res == 0) + { + Console.WriteLine("stmt prepare success"); + } + else + { + Console.WriteLine("stmt prepare failed " + TDengine.StmtErrorStr(stmt)); + ExitProgram(); + } + } + + public void SetTableName(String tableName) + { + int res = TDengine.StmtSetTbname(this.stmt, tableName); + Console.WriteLine("setTableName():" + res); + if (res == 0) + { + Console.WriteLine("set_tbname success"); + } + else + { + Console.Write("set_tbname failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + + public void SetTableNameTags(String tableName, TAOS_BIND[] tags) + { + int res = TDengine.StmtSetTbnameTags(this.stmt, tableName, tags); + if (res == 0) + { + Console.WriteLine("set tbname && tags success"); + + } + else + { + Console.Write("set tbname && tags failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + + public void SetSubTableName(string name) + { + int res = TDengine.StmtSetSubTbname(this.stmt, name); + if (res == 0) + { + Console.WriteLine("set subtable name success"); + } + else + { + Console.Write("set subtable name failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + + } + + public void BindParam(TAOS_BIND[] binds) + { + Console.WriteLine("in bindParam()"); + + int res = TDengine.StmtBindParam(this.stmt, binds); + if (res == 0) + { + Console.WriteLine("bind para success"); + } + else + { + Console.Write("bind para failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + + public void BindSingleParamBatch(TAOS_MULTI_BIND bind, int index) + { + int res = TDengine.StmtBindSingleParamBatch(this.stmt,ref bind, index); + if (res == 0) + { + Console.WriteLine("single bind batch success"); + } + else + { + Console.Write("single bind batch failed: " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + + public void BindParamBatch(TAOS_MULTI_BIND[] bind) + { + int res = TDengine.StmtBindParamBatch(this.stmt, bind); + if (res == 0) + { + Console.WriteLine("bind parameter batch success"); + } + else + { + Console.WriteLine("bind parameter batch failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + + public void AddBatch() + { + int res = TDengine.StmtAddBatch(this.stmt); + if (res == 0) + { + Console.WriteLine("stmt add batch success"); + } + else + { + Console.Write("stmt add batch failed,reason: " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + public void StmtExecute() + { + int res = TDengine.StmtExecute(this.stmt); + if (res == 0) + { + Console.WriteLine("Execute stmt success"); + } + else + { + Console.Write("Execute stmt failed,reason: " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + public void StmtClose() + { + int res = TDengine.StmtClose(this.stmt); + if (res == 0) + { + Console.WriteLine("close stmt success"); + } + else + { + Console.WriteLine("close stmt failed, " + TDengine.StmtErrorStr(stmt)); + StmtClose(); + ExitProgram(); + } + } + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + if (TDengine.Close(this.conn) == 0) + { + Console.WriteLine("close connection sucess"); + } + else + { + Console.WriteLine("close Connection failed"); + } + } + } + + //select only + public void StmtUseResult() + { + IntPtr res = TDengine.StmtUseResult(this.stmt); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + StmtClose(); + CloseConnection(); + ExitProgram(); + } + else + { + Console.WriteLine("{0},query success"); + DisplayRes(res); + TDengine.FreeResult(res); + } + + } + + public void DisplayRes(IntPtr res) + { + + long queryRows = 0; + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + } + public static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + } +} diff --git a/tests/examples/C#/stmt/stmt.csproj b/tests/examples/C#/stmt/stmt.csproj new file mode 100644 index 0000000000000000000000000000000000000000..bc14850edbf9023e885436016141f24d6d042127 --- /dev/null +++ b/tests/examples/C#/stmt/stmt.csproj @@ -0,0 +1,12 @@ + + + + + + + + Exe + net5.0 + + + diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs deleted file mode 100644 index e6c3a598adc0bc4bcf5ea84953f649b418199555..0000000000000000000000000000000000000000 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - //get precisionin parameter restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - } -} diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index e092c48f15314f5cad0a9509190d7b9970a7073a..2c77285488830323ed03a04a1d1c89c048ad2ea8 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -118,7 +118,7 @@ namespace TDengineDriver port = (short)this.GetArgumentAsLong(argv, "-p", 0, 65535, 6030); user = this.GetArgumentAsString(argv, "-u", "root"); password = this.GetArgumentAsString(argv, "-P", "taosdata"); - dbName = this.GetArgumentAsString(argv, "-d", "db"); + dbName = this.GetArgumentAsString(argv, "-d", "taosdemo_cs"); stablePrefix = this.GetArgumentAsString(argv, "-s", "st"); tablePrefix = this.GetArgumentAsString(argv, "-m", "t"); isInsertOnly = this.GetArgumentAsFlag(argv, "-x", true); diff --git a/tests/examples/C#/taosdemo/taosdemo.csproj b/tests/examples/C#/taosdemo/taosdemo.csproj index 15ec155d45e34aae7276fe596c177619dfddd3e9..8d4b786ba3a99b600783a5b4ee55d99f03e47655 100644 --- a/tests/examples/C#/taosdemo/taosdemo.csproj +++ b/tests/examples/C#/taosdemo/taosdemo.csproj @@ -6,4 +6,8 @@ false + + + + diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml index 81c549274c81ddc69d52508c46cd215edd8c5467..91f1cb36f28840b7d5eeb428aca3f940365ed59d 100644 --- a/tests/examples/JDBC/connectionPools/pom.xml +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -53,7 +53,7 @@ org.apache.logging.log4j log4j-core - 2.14.1 + 2.15.0 diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml index 91b976c2ae6c76a5ae2d7b76c3b90d05e4dae57f..c6e61f5d22ca83c5d56deef7db9354913a3321b1 100644 --- a/tests/examples/JDBC/taosdemo/pom.xml +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -88,7 +88,7 @@ org.apache.logging.log4j log4j-core - 2.14.1 + 2.15.0 diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index e94de3cbca574de71c8bcefc4b52173922c05a98..9d5dfc37b1045cb771cf6bd20da7087d7523e2e2 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -10,13 +10,13 @@ IF (TD_LINUX) ADD_EXECUTABLE(subscribe subscribe.c) TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread ) ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) + TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ${LINK_LUA}) ENDIF () IF (TD_DARWIN) INCLUDE_DIRECTORIES(. ${TD_COMMUNITY_DIR}/src/inc ${TD_COMMUNITY_DIR}/src/client/inc ${TD_COMMUNITY_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(demo demo.c) - TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua) + TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ${LINK_LUA}) ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) + TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ${LINK_LUA}) ENDIF () diff --git a/tests/examples/c/schemaless.c b/tests/examples/c/schemaless.c index 9a2d2f063573d26093bd5032e2f68cc54fc5f908..0b46d2ff134689992d648065511dd5c21766759e 100644 --- a/tests/examples/c/schemaless.c +++ b/tests/examples/c/schemaless.c @@ -8,7 +8,8 @@ #include #include -#define MAX_THREAD_LINE_BATCHES 1024 +bool verbose = false; + void printThreadId(pthread_t id, char* buf) { @@ -30,11 +31,10 @@ typedef struct { typedef struct { TAOS* taos; + int protocol; int numBatches; - SThreadLinesBatch batches[MAX_THREAD_LINE_BATCHES]; + SThreadLinesBatch *batches; int64_t costTime; - int tsPrecision; - int lineProtocol; } SThreadInsertArgs; static void* insertLines(void* args) { @@ -43,27 +43,33 @@ static void* insertLines(void* args) { printThreadId(pthread_self(), tidBuf); for (int i = 0; i < insertArgs->numBatches; ++i) { SThreadLinesBatch* batch = insertArgs->batches + i; - printf("%s, thread: 0x%s\n", "begin taos_insert_lines", tidBuf); + if (verbose) printf("%s, thread: 0x%s\n", "begin taos_insert_lines", tidBuf); int64_t begin = getTimeInUs(); - TAOS_RES *res = taos_schemaless_insert(insertArgs->taos, batch->lines, batch->numLines, insertArgs->lineProtocol, insertArgs->tsPrecision); + //int32_t code = taos_insert_lines(insertArgs->taos, batch->lines, batch->numLines); + TAOS_RES * res = taos_schemaless_insert(insertArgs->taos, batch->lines, batch->numLines, insertArgs->protocol, TSDB_SML_TIMESTAMP_MILLI_SECONDS); int32_t code = taos_errno(res); int64_t end = getTimeInUs(); insertArgs->costTime += end - begin; - printf("code: %d, %s. affected lines:%d time used:%"PRId64", thread: 0x%s\n", code, taos_errstr(res), taos_affected_rows(res), end - begin, tidBuf); - taos_free_result(res); + if (verbose) printf("code: %d, %s. time used:%"PRId64", thread: 0x%s\n", code, tstrerror(code), end - begin, tidBuf); } return NULL; } +int32_t getTelenetTemplate(char* lineTemplate, int templateLen) { + char* sample = "sta%d %lld 44.3 t0=False t1=127i8 t2=32 t3=%di32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"hpxzrdiw\" t8=\"ncharTagValue\" t9=127i8"; + snprintf(lineTemplate, templateLen, "%s", sample); + return 0; +} + int32_t getLineTemplate(char* lineTemplate, int templateLen, int numFields) { if (numFields <= 4) { - char* sample = "sta%d,t3=%di32 c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64 %lldms"; + char* sample = "sta%d,t3=%di32 c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64 %lld"; snprintf(lineTemplate, templateLen, "%s", sample); return 0; } if (numFields <= 13) { - char* sample = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms"; + char* sample = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lld"; snprintf(lineTemplate, templateLen, "%s", sample); return 0; } @@ -84,14 +90,24 @@ int32_t getLineTemplate(char* lineTemplate, int templateLen, int numFields) { for (int i = offset[1]+1; i < offset[2]; ++i) { snprintf(lineTemplate+strlen(lineTemplate), templateLen-strlen(lineTemplate), "c%d=\"%d\",", i, i); } - char* lineFormatTs = " %lldms"; + char* lineFormatTs = " %lld"; snprintf(lineTemplate+strlen(lineTemplate)-1, templateLen-strlen(lineTemplate)+1, "%s", lineFormatTs); return 0; } +int32_t generateLine(char* line, int lineLen, char* lineTemplate, int protocol, int superTable, int childTable, int64_t ts) { + if (protocol == TSDB_SML_LINE_PROTOCOL) { + snprintf(line, lineLen, lineTemplate, superTable, childTable, ts); + } else if (protocol == TSDB_SML_TELNET_PROTOCOL) { + snprintf(line, lineLen, lineTemplate, superTable, ts, childTable); + } + return TSDB_CODE_SUCCESS; +} + int main(int argc, char* argv[]) { int numThreads = 8; + int maxBatchesPerThread = 1024; int numSuperTables = 1; int numChildTables = 256; @@ -99,11 +115,11 @@ int main(int argc, char* argv[]) { int numFields = 13; int maxLinesPerBatch = 16384; - int tsPrecision = TSDB_SML_TIMESTAMP_NOT_CONFIGURED; - int lineProtocol = TSDB_SML_UNKNOWN_PROTOCOL; + + int protocol = TSDB_SML_TELNET_PROTOCOL; int opt; - while ((opt = getopt(argc, argv, "s:c:r:f:t:m:p:P:h")) != -1) { + while ((opt = getopt(argc, argv, "s:c:r:f:t:b:p:hv")) != -1) { switch (opt) { case 's': numSuperTables = atoi(optarg); @@ -120,28 +136,35 @@ int main(int argc, char* argv[]) { case 't': numThreads = atoi(optarg); break; - case 'm': + case 'b': maxLinesPerBatch = atoi(optarg); break; - case 'p': - tsPrecision = atoi(optarg); + case 'v': + verbose = true; break; - case 'P': - lineProtocol = atoi(optarg); + case 'p': + if (optarg[0] == 't') { + protocol = TSDB_SML_TELNET_PROTOCOL; + } else if (optarg[0] == 'l') { + protocol = TSDB_SML_LINE_PROTOCOL; + } else if (optarg[0] == 'j') { + protocol = TSDB_SML_JSON_PROTOCOL; + } break; case 'h': - fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -m maxlines_per_batch\n", + fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -v\n", argv[0]); exit(0); default: /* '?' */ - fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -m maxlines_per_batch\n", + fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -v\n", argv[0]); exit(-1); } } TAOS_RES* result; - const char* host = "127.0.0.1"; + //const char* host = "127.0.0.1"; + const char* host = NULL; const char* user = "root"; const char* passwd = "taosdata"; @@ -152,10 +175,7 @@ int main(int argc, char* argv[]) { exit(1); } - if (numThreads * MAX_THREAD_LINE_BATCHES* maxLinesPerBatch < numSuperTables*numChildTables*numRowsPerChildTable) { - printf("too many rows to be handle by threads with %d batches", MAX_THREAD_LINE_BATCHES); - exit(2); - } + maxBatchesPerThread = (numSuperTables*numChildTables*numRowsPerChildTable)/(numThreads * maxLinesPerBatch) + 1; char* info = taos_get_server_info(taos); printf("server info: %s\n", info); @@ -171,28 +191,33 @@ int main(int argc, char* argv[]) { (void)taos_select_db(taos, "db"); time_t ct = time(0); - int64_t ts = ct * 1000; + int64_t ts = ct * 1000 ; char* lineTemplate = calloc(65536, sizeof(char)); - getLineTemplate(lineTemplate, 65535, numFields); + if (protocol == TSDB_SML_LINE_PROTOCOL) { + getLineTemplate(lineTemplate, 65535, numFields); + } else if (protocol == TSDB_SML_TELNET_PROTOCOL ) { + getTelenetTemplate(lineTemplate, 65535); + } printf("setup supertables..."); { char** linesStb = calloc(numSuperTables, sizeof(char*)); for (int i = 0; i < numSuperTables; i++) { char* lineStb = calloc(strlen(lineTemplate)+128, 1); - snprintf(lineStb, strlen(lineTemplate)+128, lineTemplate, i, + generateLine(lineStb, strlen(lineTemplate)+128, lineTemplate, protocol, i, numSuperTables * numChildTables, ts + numSuperTables * numChildTables * numRowsPerChildTable); linesStb[i] = lineStb; } SThreadInsertArgs args = {0}; + args.protocol = protocol; + args.batches = calloc(maxBatchesPerThread, sizeof(maxBatchesPerThread)); args.taos = taos; args.batches[0].lines = linesStb; args.batches[0].numLines = numSuperTables; - args.tsPrecision = tsPrecision; - args.lineProtocol = lineProtocol; insertLines(&args); + free(args.batches); for (int i = 0; i < numSuperTables; ++i) { free(linesStb[i]); } @@ -203,8 +228,10 @@ int main(int argc, char* argv[]) { pthread_t* tids = calloc(numThreads, sizeof(pthread_t)); SThreadInsertArgs* argsThread = calloc(numThreads, sizeof(SThreadInsertArgs)); for (int i = 0; i < numThreads; ++i) { + argsThread[i].batches = calloc(maxBatchesPerThread, sizeof(SThreadLinesBatch)); argsThread[i].taos = taos; argsThread[i].numBatches = 0; + argsThread[i].protocol = protocol; } int64_t totalLines = numSuperTables * numChildTables * numRowsPerChildTable; @@ -229,7 +256,7 @@ int main(int argc, char* argv[]) { int stIdx = i; int ctIdx = numSuperTables*numChildTables + j; char* line = calloc(strlen(lineTemplate)+128, 1); - snprintf(line, strlen(lineTemplate)+128, lineTemplate, stIdx, ctIdx, ts + l); + generateLine(line, strlen(lineTemplate)+128, lineTemplate, protocol, stIdx, ctIdx, ts + l); int batchNo = l / maxLinesPerBatch; int lineNo = l % maxLinesPerBatch; allBatches[batchNo][lineNo] = line; @@ -262,6 +289,9 @@ int main(int argc, char* argv[]) { } free(allBatches); + for (int i = 0; i < numThreads; i++) { + free(argsThread[i].batches); + } free(argsThread); free(tids); diff --git a/tests/examples/lua/OpenResty/rest/test.lua b/tests/examples/lua/OpenResty/rest/test.lua index 48aeef3fb4dd8c9a0dc18e8039b4b8c781760666..2dc0cf10f22b90c8bcb925700b1d7ebd00ff153a 100644 --- a/tests/examples/lua/OpenResty/rest/test.lua +++ b/tests/examples/lua/OpenResty/rest/test.lua @@ -63,6 +63,7 @@ else end +--[[ local flag = false function query_callback(res) if res.code ~=0 then @@ -80,9 +81,10 @@ end driver.query_a(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 3, 'robotspace'),('2019-09-01 00:00:00.006', 4, 'Hilink'),('2019-09-01 00:00:00.007', 6, 'Harmony')", query_callback) while not flag do --- ngx.say("i am here once...") + ngx.say("i am here once...") ngx.sleep(0.001) -- time unit is second end +--]] ngx.say("pool water_mark:"..pool:get_water_mark()) diff --git a/tests/examples/lua/README.md b/tests/examples/lua/README.md index 32d6a4cace9bd0bf66238ff32af1d3ecf0285046..bdc88edbd7b5d6798a8df6530ea82d24eb22915b 100644 --- a/tests/examples/lua/README.md +++ b/tests/examples/lua/README.md @@ -1,7 +1,10 @@ # TDengine driver connector for Lua -It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 . - +It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 . +As TDengine is built with lua-enable, the built-in lua module conflicts with external lua. The following commands require TDengine built with lua-disable. +To disable built-in lua: +mkdir debug && cd debug +cmake .. -DBUILD_LUA=false && cmake --build . ## Lua Dependencies - Lua: ``` diff --git a/tests/examples/lua/lua51/lua_connector51.c b/tests/examples/lua/lua51/lua_connector51.c index fe2152945dc1915dca5de31458a8cbb2f007f4f2..b6e0b6d1de200b09750ffba6845ae9bf0606f4d8 100644 --- a/tests/examples/lua/lua51/lua_connector51.c +++ b/tests/examples/lua/lua51/lua_connector51.c @@ -102,7 +102,7 @@ static int l_query(lua_State *L){ printf("failed, reason:%s\n", taos_errstr(result)); lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); - lua_pushstring(L, taos_errstr(taos)); + lua_pushstring(L, taos_errstr(result)); lua_setfield(L, table_index, "error"); return 1; diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 8c2ea3e9e83237fc8ed9ebce687f5131352e4d14..06568f35d656d5d9af1ae2e88eeaeba92f0ede91 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -102,7 +102,7 @@ static int l_query(lua_State *L){ printf("failed, reason:%s\n", taos_errstr(result)); lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); - lua_pushstring(L, taos_errstr(taos)); + lua_pushstring(L, taos_errstr(result)); lua_setfield(L, table_index, "error"); return 1; diff --git a/tests/examples/lua/luaconnector.so b/tests/examples/lua/luaconnector.so deleted file mode 100755 index 08bf6a6156aebe053132545193cd111fb436bc4b..0000000000000000000000000000000000000000 Binary files a/tests/examples/lua/luaconnector.so and /dev/null differ diff --git a/tests/get_catalog.py b/tests/get_catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8a290c1966b278c1c84d52e15407428578fc1d --- /dev/null +++ b/tests/get_catalog.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import os +from fabric2 import Connection + +sys.path.append("pytest") +import importlib +import re + + +class CatalogGen: + def __init__(self, CaseDirList, CatalogName, DirDepth): + self.CaseDirList = CaseDirList + self.CatalogName = CatalogName + self.blacklist = [] + self.DirDepth = DirDepth + + def CatalogGen(self): + for i in self.CaseDirList: + self.GetCatalog(i) + self.CollectLog(i) + print("Catalog Generation done") + + def CollectLog(self, CaseDir): + DirorFiles = os.listdir(CaseDir) + for loop in range(self.DirDepth): + for i in DirorFiles: + fileName = os.path.join(CaseDir, i) + if os.path.isdir(fileName): + self.CollectLog(fileName) + else: + if i == self.CatalogName and fileName not in self.blacklist: + self.blacklist.append(fileName) + with open(fileName, "r") as f: + Catalog = f.read() + title = CaseDir.split("/")[-1] + TitleLevel = CaseDir.count("/") + with open( + os.path.dirname(CaseDir) + "/" + self.CatalogName, "a" + ) as f: + f.write("#" * TitleLevel + " %s\n" % title) + f.write(Catalog) + + def GetCatalog(self, CaseDir): + for root, dirs, files in os.walk(CaseDir): + for file in files: + if file.endswith(".py"): + fileName = os.path.join(root, file) + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + title = file.split(".")[0] + TitleLevel = root.count("/") + 1 + try: + ucase = uModule.TDTestCase() + with open(root + "/" + self.CatalogName, "a") as f: + f.write("#" * TitleLevel + " %s\n" % title) + for i in ucase.caseDescription.__doc__.split("\n"): + if i.lstrip() == "": + continue + if re.match("^case.*:", i.strip()): + f.write("* " + i.strip() + "\n") + else: + f.write(i.strip() + "\n") + except: + print(fileName) + + def CleanCatalog(self): + for i in self.CaseDirList: + for root, dirs, files in os.walk(i): + for file in files: + if file == self.CatalogName: + os.remove(root + "/" + self.CatalogName) + print("clean is done") + + +if __name__ == "__main__": + CaseDirList = [] + CatalogName = "" + DirDepth = 0 + generate = True + delete = True + opts, args = getopt.gnu_getopt(sys.argv[1:], "d:c:v:n:th") + for key, value in opts: + if key in ["-h", "--help"]: + print("A collection of test cases catalog written using Python") + print( + "-d root dir of test case files written by Python, default: system-test,develop-test" + ) + print("-c catalog file name, default: catalog.md") + print("-v dir depth of test cases.default: 5") + print("-n generate") + print("-r delete") + sys.exit(0) + + if key in ["-d"]: + CaseDirList = value.split(",") + + if key in ["-c"]: + CatalogName = value + + if key in ["-v"]: + DirDepth = int(value) + + if key in ["-n"]: + if value.upper() == "TRUE": + generate = True + elif value.upper() == "FALSE": + generate = False + + if key in ["-r"]: + if value.upper() == "TRUE": + delete = True + elif value.upper() == "FALSE": + delete = False + + print(CaseDirList, CatalogName) + if CaseDirList == []: + CaseDirList = ["system-test", "develop-test"] + if CatalogName == "": + CatalogName = "catalog.md" + if DirDepth == 0: + DirDepth = 5 + print( + "opt:\n\tcatalogname: %s\n\tcasedirlist: %s\n\tdepth: %d\n\tgenerate: %s\n\tdelete: %s" + % (CatalogName, ",".join(CaseDirList), DirDepth, generate, delete) + ) + f = CatalogGen(CaseDirList, CatalogName, DirDepth) + if delete: + f.CleanCatalog() + if generate: + f.CatalogGen() diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx new file mode 100644 index 0000000000000000000000000000000000000000..bbc1e5a27a857a8d374e935b41e91a614c48dc4b --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx @@ -0,0 +1,209 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + {"metric": "cpu.usage_user.rows", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${rows_counter}","team":"NYC"}} + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/json/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + 1 + row_count + 1 + rows_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx new file mode 100644 index 0000000000000000000000000000000000000000..11f0ed8f64a51aee50be99cc4ee8f1b7450b34e8 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx @@ -0,0 +1,191 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + {"metric": "cpu.usage_user_${__Random(1,100000,)}_${__Random(1,100000,)}_${__Random(1,100000,)}", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}} + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/json/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx new file mode 100644 index 0000000000000000000000000000000000000000..053480dc47a9c26cd5faf0b0fb277c3fa060b87d --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx @@ -0,0 +1,191 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + {"metric": "cpu.usage_user", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${__Random(1,100000,)}","region":"us-west-1","service":"${__Random(1,100000,)}","service_environment":"staging","service_version":"${__Random(1,100000,)}","team":"NYC"}} + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/json/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx new file mode 100644 index 0000000000000000000000000000000000000000..be8b8bdc2b8491b6f1689ad45b31e026dcae6f23 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 24 + + false + + + true + + + + true + + + + false + {"metric": "cpu.usage_user.rows", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}} + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/json/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx new file mode 100644 index 0000000000000000000000000000000000000000..7192421de4ca8f88f86226852bbe7242423efe65 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx @@ -0,0 +1,203 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + {"metric": "cpu.usage_user.rows", "timestamp":${ts_csv_count}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${row_csv_count}","team":"NYC"}} + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/json/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + , + UTF-8 + import_file_name + false + false + true + shareMode.all + false + ts_csv_count,row_csv_count + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx new file mode 100644 index 0000000000000000000000000000000000000000..0b001cd57776deaf0fc1268ac13bab72c50faafa --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx @@ -0,0 +1,209 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=${rows_counter} team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + 1 + row_count + 1 + rows_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx new file mode 100644 index 0000000000000000000000000000000000000000..0a7a6aad4790bb0e8dca56f58eb8ef447ebe1f89 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx @@ -0,0 +1,191 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + cpu.usage_user_${__Random(1,100000,)}_${__Random(1,100000,)}_${__Random(1,100000,)} 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx new file mode 100644 index 0000000000000000000000000000000000000000..6e7a8268f092c13ddf3e1896702aa23fe94221d0 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx @@ -0,0 +1,191 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + cpu.usage_user 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${__Random(1,100000,)} region=us-west-1 service=${__Random(1,100000,)} service_environment=staging service_version=${__Random(1,100000,)} team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx new file mode 100644 index 0000000000000000000000000000000000000000..eb86c0b2fc1b307fae1881f278fde9c5f0ed27b8 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 24 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx new file mode 100644 index 0000000000000000000000000000000000000000..672c377fcad2e988443efa28ae10c0b3c09152be --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx @@ -0,0 +1,203 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + looptimes + + 100 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_csv_count} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=${row_csv_count} team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + , + UTF-8 + import_file_name + false + false + true + shareMode.all + false + ts_csv_count,row_csv_count + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py b/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py new file mode 100644 index 0000000000000000000000000000000000000000..480c25206065fc62a53156e899c213f8e2b487db --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py @@ -0,0 +1,224 @@ +from fabric import Connection +from loguru import logger +import shutil +import os +import time + +class TaosadapterPerftest(): + def __init__(self): + self.ip = "192.168.1.85" + self.port = "22" + self.user = "root" + self.passwd = "tbase125!" + self.telnetCreateStbJmxFile = "opentsdb_telnet_createStb.jmx" + self.telnetCreateTbJmxFile = "opentsdb_telnet_createTb.jmx" + self.telnetInsertRowsFile = "opentsdb_telnet_insertRows.jmx" + # self.telnetMixJmxFile = "opentsdb_telnet_MixTbRows.jmx" + self.telnetMixJmxFile = "opentsdb_telnet_jmeter_csv_import.jmx" + + self.jsonCreateStbJmxFile = "opentsdb_json_createStb.jmx" + self.jsonCreateTbJmxFile = "opentsdb_json_createTb.jmx" + self.jsonInsertRowsFile = "opentsdb_json_insertRows.jmx" + # self.jsonMixJmxFile = "opentsdb_json_MixTbRows.jmx" + self.jsonMixJmxFile = "opentsdb_json_jmeter_csv_import.jmx" + + self.logfile = "taosadapter_perftest.log" + self.createStbThreads = 100 + self.createTbThreads = 100 + self.insertRowsThreads = 24 + + logger.add(self.logfile) + + def exec_remote_cmd(self, cmd): + """ + remote exec shell cmd + """ + try: + c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd}) + result = c.run(cmd, pty=False, warn=True, hide=True).stdout + c.close() + return result + except Exception as e: + logger.error(f"exec cmd {cmd} failed:{e}"); + + def exec_local_cmd(self, shell_cmd): + ''' + exec local shell cmd + ''' + result = os.popen(shell_cmd).read().strip() + return result + + def modifyJxmLooptimes(self, filename, looptimes, row_count=None, import_file_name=None): + ''' + modify looptimes + ''' + with open(filename, "r", encoding="utf-8") as f: + lines = f.readlines() + with open(filename, "w", encoding="utf-8") as f_w: + for line in lines: + if "looptimes" in line: + line = line.replace("looptimes", looptimes) + if row_count is not None: + if "row_count" in line: + line = line.replace("row_count", row_count) + if import_file_name is not None: + if "import_file_name" in line: + line = line.replace("import_file_name", import_file_name) + f_w.write(line) + + def cleanAndRestartTaosd(self): + ''' + restart taosd and clean env + ''' + logger.info("---- restarting taosd and taosadapter ----") + self.exec_remote_cmd("systemctl stop taosd") + self.exec_remote_cmd("rm -rf /var/lib/taos/* /var/log/taos/*") + self.exec_remote_cmd("systemctl start taosd") + logger.info("---- finish restart ----") + time.sleep(60) + + def recreateReportDir(self, path): + ''' + recreate jmeter report path + ''' + if os.path.exists(path): + self.exec_local_cmd(f'rm -rf {path}/*') + else: + os.makedirs(path) + + def cleanLog(self): + ''' + clean log + ''' + with open(self.logfile, 'w') as f: + f.seek(0) + f.truncate() + + def genMixTbRows(self, filename, table_count, row_count): + logger.info('generating import data file') + ts_start = 1614530008000 + with open(filename, "w", encoding="utf-8") as f_w: + for i in range(table_count): + for j in range(row_count): + input_line = str(ts_start) + "," + str(i) + '\n' + ts_start += 1 + f_w.write(input_line) + + def outputParams(self, protocol, create_type): + ''' + procotol is "telnet" or "json" + create_type is "stb" or "tb" or "rows" + ''' + if protocol == "telnet": + if create_type == "stb": + return self.telnetCreateStbJmxFile, self.createStbThreads + elif create_type == "tb": + return self.telnetCreateTbJmxFile, self.createTbThreads + elif create_type == "rows": + return self.telnetInsertRowsFile, self.insertRowsThreads + else: + logger.error("create type error!") + else: + if create_type == "stb": + return self.jsonCreateStbJmxFile, self.createStbThreads + elif create_type == "tb": + return self.jsonCreateTbJmxFile, self.createTbThreads + elif create_type == "rows": + return self.jsonInsertRowsFile, self.insertRowsThreads + else: + logger.error("create type error!") + + def insertTDengine(self, procotol, create_type, count): + ''' + create stb/tb or insert rows + ''' + self.cleanAndRestartTaosd() + jmxfile, threads = self.outputParams(procotol, create_type) + handle_file = str(count) + jmxfile + report_dir = f'testreport/{handle_file}' + self.recreateReportDir(report_dir) + shutil.copyfile(jmxfile, handle_file) + replace_count = int(count/threads) + self.modifyJxmLooptimes(handle_file, str(replace_count)) + logger.info(f'jmeter running ----- jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}') + result = self.exec_local_cmd(f"jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}") + logger.info(result) + logger.info("----- sleep 120s and please record data -----") + time.sleep(120) + + def insertMixTbRows(self, procotol, table_count, row_count): + self.cleanAndRestartTaosd() + local_path = os.getcwd() + jmxfile = f"opentsdb_{procotol}_{table_count}Tb{row_count}Rows.jmx" + import_file_name = f"import_opentsdb_{procotol}_{table_count}Tb{row_count}Rows.txt" + import_file_path = local_path + '/' + import_file_name + self.genMixTbRows(import_file_name, table_count, row_count) + report_dir = f'testreport/{jmxfile}' + self.recreateReportDir(report_dir) + if procotol == "telnet": + shutil.copyfile(self.telnetMixJmxFile, jmxfile) + else: + shutil.copyfile(self.jsonMixJmxFile, jmxfile) + self.modifyJxmLooptimes(jmxfile, str(int(table_count*row_count/100)), import_file_name=import_file_path) + logger.info(f'jmeter running ----- jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}') + result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}") + logger.info(result) + logger.info("----- sleep 120s and please record data -----") + time.sleep(120) + + # def insertMixTbRows(self, procotol, looptimes, row_count): + # self.cleanAndRestartTaosd() + # jmxfile = f"opentsdb_{procotol}_{looptimes}Tb100Rows.jmx" + # report_dir = f'testreport/{jmxfile}' + # self.recreateReportDir(report_dir) + # if procotol == "telnet": + # shutil.copyfile(self.telnetMixJmxFile, jmxfile) + # else: + # shutil.copyfile(self.jsonMixJmxFile, jmxfile) + + # self.modifyJxmLooptimes(jmxfile, str(looptimes), str(row_count)) + # result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}") + # logger.info(result) + # logger.info("----- sleep 120s and please record data -----") + # time.sleep(120) + + + +if __name__ == '__main__': + taosadapterPerftest = TaosadapterPerftest() + taosadapterPerftest.cleanLog() + + logger.info('------------ Start testing the scenarios in the report chapter 3.4.1 ------------') + for procotol in ["telnet", "json"]: + logger.info(f'----- {procotol} protocol ------- Creating 30W stable ------------') + taosadapterPerftest.insertTDengine(procotol, "stb", 300000) + logger.info(f'----- {procotol} protocol ------- Creating 100W table with stb "cpu.usage_user" ------------') + taosadapterPerftest.insertTDengine(procotol, "tb", 1000000) + logger.info(f'----- {procotol} protocol ------- inserting 100W rows ------------') + taosadapterPerftest.insertTDengine(procotol, "rows", 1000000) + + logger.info(f'----- {procotol} protocol ------- Creating 50W stable ------------') + taosadapterPerftest.insertTDengine(procotol, "stb", 500000) + logger.info(f'----- {procotol} protocol ------- Creating 500W table with stb "cpu.usage_user" ------------') + taosadapterPerftest.insertTDengine(procotol, "tb", 5000000) + logger.info(f'----- {procotol} protocol ------- inserting 500W rows ------------') + taosadapterPerftest.insertTDengine(procotol, "rows", 5000000) + + logger.info(f'----- {procotol} protocol ------- Creating 100W stable ------------') + taosadapterPerftest.insertTDengine(procotol, "stb", 1000000) + logger.info(f'----- {procotol} protocol ------- Creating 1000W table with stb "cpu.usage_user" ------------') + taosadapterPerftest.insertTDengine(procotol, "tb", 10000000) + logger.info(f'----- {procotol} protocol ------- inserting 1000W rows ------------') + taosadapterPerftest.insertTDengine(procotol, "rows", 10000000) + + logger.info(f'----- {procotol} protocol ------- Creating 10W stable 1000Rows ------------') + taosadapterPerftest.insertMixTbRows(procotol, 100000, 1000) + + logger.info(f'----- {procotol} protocol ------- Creating 100W stable 100Rows ------------') + taosadapterPerftest.insertMixTbRows(procotol, 1000000, 100) + + logger.info(f'----- {procotol} protocol ------- Creating 500W stable 20Rows ------------') + taosadapterPerftest.insertMixTbRows(procotol, 5000000, 20) + + logger.info(f'----- {procotol} protocol ------- Creating 1000W stable 10Rows ------------') + taosadapterPerftest.insertMixTbRows(procotol, 10000000, 10) diff --git a/tests/pytest/alter/alter_cacheLastRow.py b/tests/pytest/alter/alter_cacheLastRow.py index 36a2864d0f91cf485505ee105870a98df71c4df8..1112a6a99a71d25b1bf146d7f6eea13104787121 100644 --- a/tests/pytest/alter/alter_cacheLastRow.py +++ b/tests/pytest/alter/alter_cacheLastRow.py @@ -52,7 +52,7 @@ class TDTestCase: #write 5M rows into db, then restart to force the data move into disk. #create 500 tables - os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath) tdDnodes.stop(1) tdDnodes.start(1) tdSql.execute('use db') diff --git a/tests/pytest/client/change_time_1_1.py b/tests/pytest/client/change_time_1_1.py index acdea17fbf1d093cef522d9a99ec17f80b9a4d3b..c66871119b582a133b32f2f57c8c4d95723c5734 100644 --- a/tests/pytest/client/change_time_1_1.py +++ b/tests/pytest/client/change_time_1_1.py @@ -42,7 +42,7 @@ class TDTestCase: #11 data files should be generated #vnode at TDinternal/community/sim/dnode1/data/vnode try: - os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json") + os.system(f"{binPath}taosBenchmark -f tools/taosdemoAllTest/manual_change_time_1_1_A.json") commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data'] result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8') except BaseException: @@ -59,7 +59,7 @@ class TDTestCase: #leaving 7 data files. try: os.system ('timedatectl set-time 2020-10-25') - os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_B.json") + os.system(f"{binPath}taosBenchmark -f tools/taosdemoAllTest/manual_change_time_1_1_B.json") except BaseException: os.system('sudo timedatectl set-ntp on') tdLog.sleep(10) diff --git a/tests/pytest/client/change_time_1_2.py b/tests/pytest/client/change_time_1_2.py index ec483b00be34ad52c2b22f77ed8d81fdfc43c068..58fca4e4356b4e198bbb23cabb3fc5072ec003d8 100644 --- a/tests/pytest/client/change_time_1_2.py +++ b/tests/pytest/client/change_time_1_2.py @@ -41,7 +41,7 @@ class TDTestCase: #11 data files should be generated #vnode at TDinternal/community/sim/dnode1/data/vnode try: - os.system(f"{binPath}taosdemo -f tools/taosdemoAllTest/manual_change_time_1_1_A.json") + os.system(f"{binPath}taosBenchmark -f tools/taosdemoAllTest/manual_change_time_1_1_A.json") commandArray = ['ls', '-l', f'{TDenginePath}/sim/dnode1/data/vnode/vnode2/tsdb/data'] result = subprocess.run(commandArray, stdout=subprocess.PIPE).stdout.decode('utf-8') except BaseException: diff --git a/tests/pytest/client/taoshellCheckCase.py b/tests/pytest/client/taoshellCheckCase.py index 936f7dfa159d2949ed7f029c3f754f6a039bce2d..f1e0c725ddee3a5e14f8d4fd82b92926e7142506 100644 --- a/tests/pytest/client/taoshellCheckCase.py +++ b/tests/pytest/client/taoshellCheckCase.py @@ -99,26 +99,41 @@ class TDTestCase: else: shutil.rmtree("./dumpdata") os.mkdir("./dumpdata") - - os.system(build_path + "/" + "taosdump -D test -o ./dumpdata") - sleep(2) - os.system("cd ./dumpdata && mv dbs.sql tables.sql") - os.system('sed -i "s/test/dumptest/g" `grep test -rl ./dumpdata`') - os.system(build_path + "/" + "taos -D ./dumpdata") - tdSql.query("select count(*) from dumptest.st") - tdSql.checkData(0, 0, 50) - - tdLog.info("========test other file name about tables.sql========") - os.system("rm -rf ./dumpdata/*") - os.system(build_path + "/" + "taosdump -D test -o ./dumpdata") - sleep(2) - os.system("cd ./dumpdata && mv dbs.sql table.sql") - os.system('sed -i "s/test/tt/g" `grep test -rl ./dumpdata`') + + # write data into sqls file + tables = ["CREATE DATABASE IF NOT EXISTS opendbtest REPLICA 1 QUORUM 1 DAYS\ + 10 KEEP 3650 CACHE 16 BLOCKS 16 MINROWS 100 MAXROWS 4096 FSYNC 3000 CACHELAST 0 COMP 2 PRECISION 'ms' UPDATE 0;", + + "CREATE TABLE IF NOT EXISTS opendbtest.cpus (ts TIMESTAMP, value DOUBLE) TAGS (author NCHAR(2), \ + department NCHAR(7), env NCHAR(4), hostname NCHAR(5), os NCHAR(6), production NCHAR(8), \ + team NCHAR(7), type NCHAR(10), useage NCHAR(7), workflow NCHAR(4));"] + with open("./dumpdata/tables.sql" ,"a") as f : + for item in tables: + f.write(item) + f.write("\n") + f.close() + + records = [ "CREATE TABLE IF NOT EXISTS opendbtest.tb USING opendbtest.cpus TAGS ('dd', 'Beijing', 'test', 'vm_7', 'ubuntu', 'taosdata', 'develop', 'usage_user', 'monitor', 'TIMI');", + "INSERT INTO opendbtest.tb VALUES (1420070400000, 59.078475);", + "INSERT INTO opendbtest.tb VALUES (1420070410000, 44.844490);", + "INSERT INTO opendbtest.tb VALUES (1420070420000, 34.796703);", + "INSERT INTO opendbtest.tb VALUES (1420070430000, 35.758099);", + "INSERT INTO opendbtest.tb VALUES (1420070440000, 51.502387);"] + + with open("./dumpdata/opendbtest.0.sql" ,"a") as f : + for item in records: + f.write(item) + f.write("\n") + f.close() + cmd = build_path + "/" + "taos -D ./dumpdata" out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE).stderr.read().decode("utf-8") if out.find("error:") >=0: print("===========expected error occured======") + tdSql.query("select value from opendbtest.tb") + tdSql.checkRows(5) + tdLog.info("====== check taos shell params ========") @@ -162,6 +177,7 @@ class TDTestCase: continue else: cmd = build_path + "/" + "taos -s \" insert into dbst.tb2 values(now ,2,2.0,'"+code+"','汉字"+code+"\')\"" + print(cmd) self.execute_cmd(cmd) @@ -192,6 +208,7 @@ class TDTestCase: for query in querys: cmd = build_path + "/" + "taos -s \""+query+"\"" self.execute_cmd(cmd) + print(cmd) def stop(self): tdSql.close() diff --git a/tests/pytest/cluster/TD-3693/multClient.py b/tests/pytest/cluster/TD-3693/multClient.py index 24c27d9de9ff383f412af33e8d5f8318d1032f63..6eae7310fae902f0bb1114e60ef86e6d11370e8b 100644 --- a/tests/pytest/cluster/TD-3693/multClient.py +++ b/tests/pytest/cluster/TD-3693/multClient.py @@ -51,9 +51,9 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # insert data to cluster'db - os.system("%staosdemo -f cluster/TD-3693/insert1Data.json -y " % binPath) + os.system("%staosBenchmark -f cluster/TD-3693/insert1Data.json -y " % binPath) # multiple new and cloes connection with query data - os.system("%staosdemo -f cluster/TD-3693/insert2Data.json -y " % binPath) + os.system("%staosBenchmark -f cluster/TD-3693/insert2Data.json -y " % binPath) os.system("nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & " % binPath) diff --git a/tests/pytest/compress/compressChangeVersion.py b/tests/pytest/compress/compressChangeVersion.py index b7b9ebe6b35dc4729e0dcae705ac7d93c73010e7..ad1c50149bdc0ffa5db8be4d9c4cc191b021e250 100644 --- a/tests/pytest/compress/compressChangeVersion.py +++ b/tests/pytest/compress/compressChangeVersion.py @@ -66,7 +66,7 @@ class TwoClients: # create backgroud db and tb tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f compress/insertDataDb1.json -y " % binPath) + os.system("%staosBenchmark -f compress/insertDataDb1.json -y " % binPath) # create foreground db and tb tdSql.execute("drop database if exists foredb") tdSql.execute("create database foredb") diff --git a/tests/pytest/dockerCluster/taosdemoWrapper.py b/tests/pytest/dockerCluster/taosdemoWrapper.py index 457dd4ee5aa5919951adbcea834d34cd367d3080..f2bd7bbc2307e2676a083c77f573609720c52450 100644 --- a/tests/pytest/dockerCluster/taosdemoWrapper.py +++ b/tests/pytest/dockerCluster/taosdemoWrapper.py @@ -35,11 +35,11 @@ class taosdemoWrapper: def run(self): if self.metadata is None: - os.system("taosdemo -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y" + os.system("%staosBenchmark -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y" % (self.host, self.database, self.tables, self.threads, self.configDir, self.replica, self.columnType, self.rowsPerTable, self.disorderRatio, self.disorderRange, self.charTypeLen)) else: - os.system("taosdemo -f %s" % self.metadata) + os.system("%staosBenchmark -f %s" % self.metadata) parser = argparse.ArgumentParser() diff --git a/tests/pytest/fulltest-connector.sh b/tests/pytest/fulltest-connector.sh new file mode 100644 index 0000000000000000000000000000000000000000..eb685bd2146508a55d470c45bd3b4780997de64c --- /dev/null +++ b/tests/pytest/fulltest-connector.sh @@ -0,0 +1,9 @@ +#!/bin/bash +ulimit -c unlimited +#======================p1-start=============== +#======================p1-end=============== + +# restful test for python +# python3 test.py -f restful/restful_bind_db1.py +# python3 test.py -f restful/restful_bind_db2.py +python3 ./test.py -f client/nettest.py \ No newline at end of file diff --git a/tests/pytest/fulltest-insert.sh b/tests/pytest/fulltest-insert.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a2a027c818ecc018829436b63e86a9d96629e5b --- /dev/null +++ b/tests/pytest/fulltest-insert.sh @@ -0,0 +1,154 @@ +#!/bin/bash +ulimit -c unlimited +#======================p1-start=============== +#======================p1-end=============== + +python3 testCompress.py +python3 testNoCompress.py + +python3 ./test.py -f import_merge/importBlock1HO.py +python3 ./test.py -f import_merge/importBlock1HPO.py +python3 ./test.py -f import_merge/importBlock1H.py +python3 ./test.py -f import_merge/importBlock1S.py +python3 ./test.py -f import_merge/importBlock1Sub.py +python3 ./test.py -f import_merge/importBlock1TO.py +python3 ./test.py -f import_merge/importBlock1TPO.py +python3 ./test.py -f import_merge/importBlock1T.py +python3 ./test.py -f import_merge/importBlock2HO.py +python3 ./test.py -f import_merge/importBlock2HPO.py +python3 ./test.py -f import_merge/importBlock2H.py +python3 ./test.py -f import_merge/importBlock2S.py +python3 ./test.py -f import_merge/importBlock2Sub.py +python3 ./test.py -f import_merge/importBlock2TO.py +python3 ./test.py -f import_merge/importBlock2TPO.py +python3 ./test.py -f import_merge/importBlock2T.py +python3 ./test.py -f import_merge/importBlockbetween.py +python3 ./test.py -f import_merge/importCacheFileHO.py +python3 ./test.py -f import_merge/importCacheFileHPO.py +python3 ./test.py -f import_merge/importCacheFileH.py +python3 ./test.py -f import_merge/importCacheFileS.py +python3 ./test.py -f import_merge/importCacheFileSub.py +python3 ./test.py -f import_merge/importCacheFileTO.py +python3 ./test.py -f import_merge/importCacheFileTPO.py +python3 ./test.py -f import_merge/importCacheFileT.py +python3 ./test.py -f import_merge/importDataH2.py +python3 ./test.py -f import_merge/importDataHO2.py +python3 ./test.py -f import_merge/importDataHO.py +python3 ./test.py -f import_merge/importDataHPO.py +python3 ./test.py -f import_merge/importDataLastHO.py +python3 ./test.py -f import_merge/importDataLastHPO.py +python3 ./test.py -f import_merge/importDataLastH.py +python3 ./test.py -f import_merge/importDataLastS.py +python3 ./test.py -f import_merge/importDataLastSub.py +python3 ./test.py -f import_merge/importDataLastTO.py +python3 ./test.py -f import_merge/importDataLastTPO.py +python3 ./test.py -f import_merge/importDataLastT.py +python3 ./test.py -f import_merge/importDataS.py +python3 ./test.py -f import_merge/importDataSub.py +python3 ./test.py -f import_merge/importDataTO.py +python3 ./test.py -f import_merge/importDataTPO.py +python3 ./test.py -f import_merge/importDataT.py +python3 ./test.py -f import_merge/importHeadOverlap.py +python3 ./test.py -f import_merge/importHeadPartOverlap.py +python3 ./test.py -f import_merge/importHead.py +python3 ./test.py -f import_merge/importHORestart.py +python3 ./test.py -f import_merge/importHPORestart.py +python3 ./test.py -f import_merge/importHRestart.py +python3 ./test.py -f import_merge/importLastHO.py +python3 ./test.py -f import_merge/importLastHPO.py +python3 ./test.py -f import_merge/importLastH.py +python3 ./test.py -f import_merge/importLastS.py +python3 ./test.py -f import_merge/importLastSub.py +python3 ./test.py -f import_merge/importLastTO.py +python3 ./test.py -f import_merge/importLastTPO.py +python3 ./test.py -f import_merge/importLastT.py +python3 ./test.py -f import_merge/importSpan.py +python3 ./test.py -f import_merge/importSRestart.py +python3 ./test.py -f import_merge/importSubRestart.py +python3 ./test.py -f import_merge/importTailOverlap.py +python3 ./test.py -f import_merge/importTailPartOverlap.py +python3 ./test.py -f import_merge/importTail.py +python3 ./test.py -f import_merge/importToCommit.py +python3 ./test.py -f import_merge/importTORestart.py +python3 ./test.py -f import_merge/importTPORestart.py +python3 ./test.py -f import_merge/importTRestart.py +python3 ./test.py -f import_merge/importInsertThenImport.py +python3 ./test.py -f import_merge/importCSV.py +python3 ./test.py -f import_merge/import_update_0.py +python3 ./test.py -f import_merge/import_update_1.py +python3 ./test.py -f import_merge/import_update_2.py + +python3 ./test.py -f insert/basic.py +python3 ./test.py -f insert/int.py +python3 ./test.py -f insert/float.py +python3 ./test.py -f insert/bigint.py +python3 ./test.py -f insert/bool.py +python3 ./test.py -f insert/double.py +python3 ./test.py -f insert/smallint.py +python3 ./test.py -f insert/tinyint.py +python3 ./test.py -f insert/date.py +python3 ./test.py -f insert/binary.py +python3 ./test.py -f insert/nchar.py +#python3 ./test.py -f insert/nchar-boundary.py +python3 ./test.py -f insert/nchar-unicode.py +python3 ./test.py -f insert/multi.py +python3 ./test.py -f insert/randomNullCommit.py +python3 insert/retentionpolicy.py +python3 ./test.py -f insert/alterTableAndInsert.py +python3 ./test.py -f insert/insertIntoTwoTables.py +python3 ./test.py -f insert/before_1970.py +python3 ./test.py -f insert/special_character_show.py +python3 bug2265.py +python3 ./test.py -f insert/bug3654.py +python3 ./test.py -f insert/insertDynamicColBeforeVal.py +python3 ./test.py -f insert/in_function.py +python3 ./test.py -f insert/modify_column.py +#python3 ./test.py -f insert/line_insert.py +python3 ./test.py -f insert/specialSql.py +python3 ./test.py -f insert/timestamp.py +python3 ./test.py -f insert/metadataUpdate.py +python3 ./test.py -f insert/unsignedInt.py +python3 ./test.py -f insert/unsignedBigint.py +python3 ./test.py -f insert/unsignedSmallint.py +python3 ./test.py -f insert/unsignedTinyint.py +python3 ./test.py -f insert/insertFromCSV.py +python3 ./test.py -f insert/boundary2.py +python3 ./test.py -f insert/insert_locking.py +python3 test.py -f insert/insert_before_use_db.py +python3 ./test.py -f insert/flushwhiledrop.py +python3 ./test.py -f insert/verifyMemToDiskCrash.py +#python3 ./test.py -f insert/schemalessInsert.py +#python3 ./test.py -f insert/openTsdbJsonInsert.py +python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py + + +# update +python3 ./test.py -f update/merge_commit_data.py +python3 ./test.py -f update/allow_update.py +python3 ./test.py -f update/allow_update-0.py +python3 ./test.py -f update/append_commit_data.py +python3 ./test.py -f update/append_commit_last-0.py +python3 ./test.py -f update/append_commit_last.py + + +python3 ./test.py -f update/merge_commit_data2.py +python3 ./test.py -f update/merge_commit_data2_update0.py +python3 ./test.py -f update/merge_commit_last-0.py +python3 ./test.py -f update/merge_commit_last.py +python3 ./test.py -f update/update_options.py +python3 ./test.py -f update/merge_commit_data-0.py + +# wal +python3 ./test.py -f wal/addOldWalTest.py +python3 ./test.py -f wal/sdbComp.py + + + + + + + + + + + diff --git a/tests/pytest/fulltest-others.sh b/tests/pytest/fulltest-others.sh new file mode 100644 index 0000000000000000000000000000000000000000..e0b0851a22de16dbc3e6aea17937121573eb9997 --- /dev/null +++ b/tests/pytest/fulltest-others.sh @@ -0,0 +1,48 @@ +#!/bin/bash +ulimit -c unlimited +#======================p1-start=============== +#======================p1-end=============== + +#python3 ./test.py -f dbmgmt/database-name-boundary.py +python3 test.py -f dbmgmt/nanoSecondCheck.py + +# +python3 ./test.py -f tsdb/tsdbComp.py + +# user +python3 ./test.py -f user/user_create.py +python3 ./test.py -f user/pass_len.py + +# perfbenchmark +python3 ./test.py -f perfbenchmark/bug3433.py +#python3 ./test.py -f perfbenchmark/bug3589.py +#python3 ./test.py -f perfbenchmark/taosdemoInsert.py + +#alter table +python3 ./test.py -f alter/alter_table_crash.py +python3 ./test.py -f alter/alterTabAddTagWithNULL.py +python3 ./test.py -f alter/alterTimestampColDataProcess.py +python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f alter/alter_debugFlag.py +python3 ./test.py -f alter/alter_keep.py +python3 ./test.py -f alter/alter_cacheLastRow.py +python3 ./test.py -f alter/alter_create_exception.py +python3 ./test.py -f alter/alterColMultiTimes.py + +python3 ./test.py -f account/account_create.py + +# client +python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py +python3 ./test.py -f client/alterDatabase.py +python3 ./test.py -f client/noConnectionErrorTest.py +python3 ./test.py -f client/taoshellCheckCase.py +# python3 ./test.py -f client/change_time_1_1.py +# python3 ./test.py -f client/change_time_1_2.py +python3 client/twoClients.py + +python3 testMinTablesPerVnode.py + +# topic +python3 ./test.py -f topic/topicQuery.py + diff --git a/tests/pytest/fulltest-query.sh b/tests/pytest/fulltest-query.sh new file mode 100644 index 0000000000000000000000000000000000000000..99321351370d9d09b31dea57b9890cccb00205b2 --- /dev/null +++ b/tests/pytest/fulltest-query.sh @@ -0,0 +1,199 @@ +#!/bin/bash +ulimit -c unlimited +#======================p1-start=============== +#======================p1-end=============== + + +# timezone +python3 ./test.py -f TimeZone/TestCaseTimeZone.py + +#stable +python3 ./test.py -f stable/insert.py +python3 ./test.py -f stable/query_after_reset.py + +#table +python3 ./test.py -f table/alter_wal0.py +python3 ./test.py -f table/column_name.py +python3 ./test.py -f table/column_num.py +python3 ./test.py -f table/db_table.py +python3 ./test.py -f table/create_sensitive.py +python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/max_table_length.py +python3 ./test.py -f table/alter_column.py +python3 ./test.py -f table/boundary.py +#python3 ./test.py -f table/create.py +python3 ./test.py -f table/del_stable.py +python3 ./test.py -f table/create_db_from_normal_db.py + +# tag +python3 ./test.py -f tag_lite/filter.py +python3 ./test.py -f tag_lite/create-tags-boundary.py +python3 ./test.py -f tag_lite/3.py +python3 ./test.py -f tag_lite/4.py +python3 ./test.py -f tag_lite/5.py +python3 ./test.py -f tag_lite/6.py +python3 ./test.py -f tag_lite/add.py +python3 ./test.py -f tag_lite/bigint.py +python3 ./test.py -f tag_lite/binary_binary.py +python3 ./test.py -f tag_lite/binary.py +python3 ./test.py -f tag_lite/bool_binary.py +python3 ./test.py -f tag_lite/bool_int.py +python3 ./test.py -f tag_lite/bool.py +python3 ./test.py -f tag_lite/change.py +python3 ./test.py -f tag_lite/column.py +python3 ./test.py -f tag_lite/commit.py +python3 ./test.py -f tag_lite/create.py +python3 ./test.py -f tag_lite/datatype.py +python3 ./test.py -f tag_lite/datatype-without-alter.py +python3 ./test.py -f tag_lite/delete.py +python3 ./test.py -f tag_lite/double.py +python3 ./test.py -f tag_lite/float.py +python3 ./test.py -f tag_lite/int_binary.py +python3 ./test.py -f tag_lite/int_float.py +python3 ./test.py -f tag_lite/int.py +python3 ./test.py -f tag_lite/set.py +python3 ./test.py -f tag_lite/smallint.py +python3 ./test.py -f tag_lite/tinyint.py +python3 ./test.py -f tag_lite/timestamp.py +python3 ./test.py -f tag_lite/TestModifyTag.py +python3 ./test.py -f tag_lite/unsignedInt.py +python3 ./test.py -f tag_lite/unsignedBigint.py +python3 ./test.py -f tag_lite/unsignedSmallint.py +python3 ./test.py -f tag_lite/unsignedTinyint.py +python3 ./test.py -f tag_lite/alter_tag.py +python3 ./test.py -f tag_lite/drop_auto_create.py + +#query +python3 ./test.py -f query/distinctOneColTb.py +python3 ./test.py -f query/filter.py +python3 ./test.py -f query/filterCombo.py +python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/queryError.py +python3 ./test.py -f query/filterAllIntTypes.py +python3 ./test.py -f query/filterFloatAndDouble.py +python3 ./test.py -f query/filterOtherTypes.py +python3 ./test.py -f query/querySort.py +python3 ./test.py -f query/queryJoin.py +python3 ./test.py -f query/select_last_crash.py +python3 ./test.py -f query/queryNullValueTest.py +python3 ./test.py -f query/queryInsertValue.py +python3 ./test.py -f query/queryConnection.py +python3 ./test.py -f query/queryCountCSVData.py +python3 ./test.py -f query/natualInterval.py +python3 ./test.py -f query/bug1471.py +#python3 ./test.py -f query/dataLossTest.py +python3 ./test.py -f query/bug1874.py +python3 ./test.py -f query/bug1875.py +python3 ./test.py -f query/bug1876.py +python3 ./test.py -f query/bug2218.py +python3 ./test.py -f query/bug2117.py +python3 ./test.py -f query/bug2118.py +python3 ./test.py -f query/bug2143.py +python3 ./test.py -f query/sliding.py +python3 ./test.py -f query/unionAllTest.py +python3 ./test.py -f query/bug2281.py +python3 ./test.py -f query/udf.py +python3 ./test.py -f query/bug2119.py +python3 ./test.py -f query/isNullTest.py +python3 ./test.py -f query/queryWithTaosdKilled.py +python3 ./test.py -f query/floatCompare.py +python3 ./test.py -f query/query1970YearsAf.py +python3 ./test.py -f query/bug3351.py +python3 ./test.py -f query/bug3375.py +python3 ./test.py -f query/queryJoin10tables.py +python3 ./test.py -f query/queryStddevWithGroupby.py +python3 ./test.py -f query/querySecondtscolumnTowherenow.py +python3 ./test.py -f query/queryFilterTswithDateUnit.py +python3 ./test.py -f query/queryTscomputWithNow.py +python3 ./test.py -f query/queryStableJoin.py +python3 ./test.py -f query/computeErrorinWhere.py +python3 ./test.py -f query/queryTsisNull.py +python3 ./test.py -f query/subqueryFilter.py +python3 ./test.py -f query/nestedQuery/queryInterval.py +python3 ./test.py -f query/queryStateWindow.py +# python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py +python3 ./test.py -f query/nestquery_last_row.py +python3 ./test.py -f query/nestedQuery/nestedQuery.py +python3 ./test.py -f query/nestedQuery/nestedQuery_datacheck.py +python3 ./test.py -f query/queryCnameDisplay.py +# python3 ./test.py -f query/operator_cost.py +# python3 ./test.py -f query/long_where_query.py +python3 test.py -f query/nestedQuery/queryWithSpread.py +python3 ./test.py -f query/bug6586.py +# python3 ./test.py -f query/bug5903.py +python3 test.py -f query/queryInterval.py +python3 test.py -f query/queryFillTest.py +python3 ./test.py -f query/last_cache.py +python3 ./test.py -f query/last_row_cache.py +python3 ./test.py -f query/queryGroupbySort.py +python3 ./test.py -f query/filterAllUnsignedIntTypes.py +python3 ./test.py -f query/queryBetweenAnd.py +python3 ./test.py -f query/querySession.py +python3 ./test.py -f query/queryWildcardLength.py +python3 ./test.py -f query/queryTbnameUpperLower.py +python3 ./test.py -f query/query.py +python3 ./test.py -f query/queryDiffColsTagsAndOr.py +python3 ./test.py -f query/queryGroupTbname.py +python3 ./test.py -f query/queryRegex.py + +#stream +python3 ./test.py -f stream/metric_1.py +python3 ./test.py -f stream/metric_n.py +python3 ./test.py -f stream/new.py +python3 ./test.py -f stream/stream1.py +python3 ./test.py -f stream/stream2.py +#python3 ./test.py -f stream/parser.py +python3 ./test.py -f stream/history.py +python3 ./test.py -f stream/sys.py +python3 ./test.py -f stream/table_1.py +python3 ./test.py -f stream/table_n.py +python3 ./test.py -f stream/showStreamExecTimeisNull.py +python3 ./test.py -f stream/cqSupportBefore1970.py + +python3 ./test.py -f query/queryGroupbyWithInterval.py +python3 queryCount.py + +# subscribe +python3 test.py -f subscribe/singlemeter.py +#python3 test.py -f subscribe/stability.py +python3 test.py -f subscribe/supertable.py + +# functions +python3 ./test.py -f functions/all_null_value.py +python3 ./test.py -f functions/function_avg.py -r 1 +python3 ./test.py -f functions/function_bottom.py -r 1 +python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_count_last_stab.py +python3 ./test.py -f functions/function_diff.py -r 1 +python3 ./test.py -f functions/function_first.py -r 1 +python3 ./test.py -f functions/function_last.py -r 1 +python3 ./test.py -f functions/function_last_row.py -r 1 +python3 ./test.py -f functions/function_leastsquares.py -r 1 +python3 ./test.py -f functions/function_max.py -r 1 +python3 ./test.py -f functions/function_min.py -r 1 +python3 ./test.py -f functions/function_operations.py -r 1 +python3 ./test.py -f functions/function_percentile.py -r 1 +python3 ./test.py -f functions/function_spread.py -r 1 +python3 ./test.py -f functions/function_stddev.py -r 1 +python3 ./test.py -f functions/function_sum.py -r 1 +python3 ./test.py -f functions/function_top.py -r 1 +python3 ./test.py -f functions/function_sample.py -r 1 +python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py +python3 ./test.py -f functions/function_stddev_td2555.py +python3 ./test.py -f functions/showOfflineThresholdIs864000.py +python3 ./test.py -f functions/function_interp.py +#python3 ./test.py -f functions/queryTestCases.py +python3 ./test.py -f functions/function_stateWindow.py +python3 ./test.py -f functions/function_derivative.py +python3 ./test.py -f functions/function_irate.py +python3 ./test.py -f functions/function_ceil.py +python3 ./test.py -f functions/function_floor.py +python3 ./test.py -f functions/function_round.py +python3 ./test.py -f functions/function_elapsed.py +python3 ./test.py -f functions/function_mavg.py +python3 ./test.py -f functions/function_csum.py +python3 ./test.py -f functions/function_percentile2.py +python3 ./test.py -f functions/variable_httpDbNameMandatory.py + + diff --git a/tests/pytest/fulltest-tools.sh b/tests/pytest/fulltest-tools.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4ac42cb57f84cd2f4769a2e1608508d68e6d1a6 --- /dev/null +++ b/tests/pytest/fulltest-tools.sh @@ -0,0 +1,44 @@ +#!/bin/bash +ulimit -c unlimited +#======================p1-start=============== +#======================p1-end=============== + +# tools +python3 test.py -f tools/taosdumpTest.py +python3 test.py -f tools/taosdumpTest2.py + +python3 test.py -f tools/taosdemoTest.py +python3 test.py -f tools/taosdemoTestWithoutMetric.py +python3 test.py -f tools/taosdemoTestWithJson.py +python3 test.py -f tools/taosdemoTestLimitOffset.py +python3 test.py -f tools/taosdemoTestTblAlt.py +python3 test.py -f tools/taosdemoTestSampleData.py +python3 test.py -f tools/taosdemoTestInterlace.py +# python3 test.py -f tools/taosdemoTestQuery.py +python3 ./test.py -f tools/taosdemoTestdatatype.py + +# nano support +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +python3 test.py -f tools/taosdumpTestNanoSupport.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py +python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +#python3 test.py -f tools/taosdemoAllTest/TD-10539/create_taosdemo.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py + + + + + + + + + + + diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c23f0614c43ddb5548a305f6761888e9b56b244c..47e535cbd81b9cbc9675493342c1b734ccbeeca2 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -29,6 +29,7 @@ python3 ./test.py -f insert/in_function.py python3 ./test.py -f insert/modify_column.py #python3 ./test.py -f insert/line_insert.py python3 ./test.py -f insert/specialSql.py +python3 ./test.py -f insert/timestamp.py # timezone @@ -44,8 +45,9 @@ python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py python3 ./test.py -f table/alter_column.py python3 ./test.py -f table/boundary.py -python3 ./test.py -f table/create.py +#python3 ./test.py -f table/create.py python3 ./test.py -f table/del_stable.py +python3 ./test.py -f table/create_db_from_normal_db.py #stable python3 ./test.py -f stable/insert.py @@ -171,7 +173,7 @@ python3 test.py -f tools/taosdemoTestLimitOffset.py python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py -python3 test.py -f tools/taosdemoTestQuery.py +# python3 test.py -f tools/taosdemoTestQuery.py # restful test for python # python3 test.py -f restful/restful_bind_db1.py @@ -215,12 +217,12 @@ python3 ./test.py -f stable/query_after_reset.py # perfbenchmark python3 ./test.py -f perfbenchmark/bug3433.py #python3 ./test.py -f perfbenchmark/bug3589.py -python3 ./test.py -f perfbenchmark/taosdemoInsert.py +#python3 ./test.py -f perfbenchmark/taosdemoInsert.py #taosdemo -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py -# python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py -# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py #query python3 test.py -f query/distinctOneColTb.py @@ -371,6 +373,9 @@ python3 ./test.py -f functions/function_irate.py python3 ./test.py -f functions/function_ceil.py python3 ./test.py -f functions/function_floor.py python3 ./test.py -f functions/function_round.py +python3 ./test.py -f functions/function_elapsed.py +python3 ./test.py -f functions/function_mavg.py +python3 ./test.py -f functions/function_csum.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py @@ -393,7 +398,7 @@ python3 ./test.py -f tag_lite/alter_tag.py python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py -python3 test.py -f tools/taosdemoAllTest/TD-10539/create_taosdemo.py +#python3 test.py -f tools/taosdemoAllTest/TD-10539/create_taosdemo.py python3 ./test.py -f tag_lite/drop_auto_create.py python3 test.py -f insert/insert_before_use_db.py python3 test.py -f alter/alter_keep.py @@ -418,8 +423,9 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py python3 ./test.py -f query/queryRegex.py python3 ./test.py -f tools/taosdemoTestdatatype.py #python3 ./test.py -f insert/schemalessInsert.py -#python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py #python3 ./test.py -f insert/openTsdbJsonInsert.py +python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py +python3 ./test.py -f functions/variable_httpDbNameMandatory.py #======================p4-end=============== diff --git a/tests/pytest/functions/function_all_sample.py b/tests/pytest/functions/function_all_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..de136976ea9a755edd0ca1e90cab5301fb48d764 --- /dev/null +++ b/tests/pytest/functions/function_all_sample.py @@ -0,0 +1,671 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + sample function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "sample(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: sample query statement,default: select sample(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ","") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + # colname = col if "." not in col else col.split(".")[1] + # col_index = collist.index(colname) + # if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias]): + if any([ not alias.isalnum(), re.compile('^[a-zA-Z]{1}.*$').match(col) is None ]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("sample\([a-z0-9 .,]*\)", f"count({col})", self.sample_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + if tdSql.queryRows == 0: + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sample_result = tdSql.queryResult + sample_len = tdSql.queryRows + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"and {tb_condition}='{group_name}' and {col} is not null", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}' and {col} is not null", clear_condition) + + tdSql.query(f"select ts, {col} {alias} from {table_expr} {pre_condition}") + # pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + # pre_sample = np.convolve(pre_data, np.ones(k), "valid")/k + pre_sample = tdSql.queryResult + pre_len = tdSql.queryRows + step = pre_len if pre_len < k else k + # tdSql.query(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + for i in range(step): + if sample_result[pre_row:pre_row+step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + # for j in range(len(pre_sample)): + # print(f"case in {line}:", end='') + # tdSql.checkData(pre_row+j, 1, pre_sample[j]) + pre_row += step + return + elif "union" in condition: + union_sql_0 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + # union_sample_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + # union_sample_1 = tdSql.queryResult + row_union_1 = tdSql.queryRows + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + # for i in range(tdSql.queryRows): + # print(f"case in {line}: ", end='') + # if i < row_union_0: + # tdSql.checkData(i, 1, union_sample_0[i][1]) + # else: + # tdSql.checkData(i, 1, union_sample_1[i-row_union_0][1]) + if row_union_0 + row_union_1 != sample_len: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + return + + else: + if "where" in condition: + condition = re.sub('where', f"where {col} is not null and ", condition) + else: + condition = f"where {col} is not null" + condition + tdSql.query(f"select ts, {col}, {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_sample = tdSql.queryResult + # pre_len = tdSql.queryRows + for i in range(sample_len): + if sample_result[pre_row:pre_row + step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + pass + + def sample_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checksample() + case2 = {"col": "c2"} + self.checksample(**case2) + case3 = {"col": "c5"} + self.checksample(**case3) + case4 = {"col": "c7"} + self.checksample(**case4) + case5 = {"col": "c8"} + self.checksample(**case5) + case6 = {"col": "c9"} + self.checksample(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checksample(**case7) + case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} + self.checksample(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checksample(**case9) + case10 = {"alias": ", _c0"} + self.checksample(**case10) + # case11 = {"alias": ", st1"} + # self.checksample(**case11) + # case12 = {"alias": ", c1"} + # self.checksample(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checksample(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checksample(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checksample(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checksample(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checksample(**case17) + # # case18~19: with group by + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checksample(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checksample(**case20) + case21 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + self.checksample(**case21) + + # case22: with union + case22 = { + "condition": "union all select sample( c1 , 1 ) from t2" + } + self.checksample(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checksample(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checksample(**case24) + case25 = {"k": 2.999} + self.checksample(**case25) + case26 = {"k": 1000} + self.checksample(**case26) + case27 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + self.checksample(**case27) # with slimit + case28 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + self.checksample(**case28) # with soffset + + pass + + def sample_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checksample(**err1) # no col + err2 = {"sel": ""} + self.checksample(**err2) # no select + err3 = {"func": "sample", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checksample(**err3) # no sample condition: select sample from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checksample(**err4) # no sample condition: select sample() from + err5 = {"func": "sample", "r_comm": ""} + self.checksample(**err5) # no brackets: select sample col, k from + err6 = {"fr": ""} + self.checksample(**err6) # no from + err7 = {"k": ""} + self.checksample(**err7) # no k + err8 = {"table_expr": ""} + self.checksample(**err8) # no table_expr + + err9 = {"col": "st1"} + self.checksample(**err9) # col: tag + err10 = {"col": 1} + self.checksample(**err10) # col: value + err11 = {"col": "NULL"} + self.checksample(**err11) # col: NULL + err12 = {"col": "%_"} + self.checksample(**err12) # col: %_ + err13 = {"col": "c3"} + self.checksample(**err13) # col: timestamp col + err14 = {"col": "_c0"} + # self.checksample(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + # self.checksample(**err15) # expr col + err16 = {"col": "c4"} + self.checksample(**err16) # binary col + err17 = {"col": "c10"} + self.checksample(**err17) # nchar col + err18 = {"col": "c6"} + self.checksample(**err18) # bool col + err19 = {"col": "'c1'"} + self.checksample(**err19) # col: string + err20 = {"col": None} + self.checksample(**err20) # col: None + err21 = {"col": "''"} + self.checksample(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checksample(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checksample(**err23) # tbname + err24 = {"col": "stb1"} + self.checksample(**err24) # stbname + err25 = {"col": "db"} + self.checksample(**err25) # datbasename + err26 = {"col": "True"} + self.checksample(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checksample(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checksample(**err28) # col: all col + err29 = {"func": "sample[", "r_comm": "]"} + self.checksample(**err29) # form: sample[col, k] + err30 = {"func": "sample{", "r_comm": "}"} + self.checksample(**err30) # form: sample{col, k} + err31 = {"col": "[c1]"} + self.checksample(**err31) # form: sample([col], k) + err32 = {"col": "c1, c2"} + self.checksample(**err32) # form: sample(col, col2, k) + err33 = {"col": "c1, 2"} + self.checksample(**err33) # form: sample(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checksample(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checksample(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checksample(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checksample(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checksample(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checksample(**err39) # mix with calculation function 2 + err40 = {"alias": "+ 2"} + self.checksample(**err40) # mix with arithmetic 1 + err41 = {"alias": "+ avg(c1)"} + self.checksample(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checksample(**err42) # mix with other col + err43 = {"table_expr": "stb1"} + self.checksample(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checksample(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checksample(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + # self.checksample(**err46) # group by normal col + + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checksample(**err49) # k: timestamp + err50 = {"k": False} + self.checksample(**err50) # k: False + err51 = {"k": "%"} + self.checksample(**err51) # k: special char + err52 = {"k": ""} + self.checksample(**err52) # k: "" + err53 = {"k": None} + self.checksample(**err53) # k: None + err54 = {"k": "NULL"} + self.checksample(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checksample(**err55) # k: string + err56 = {"k": "c1"} + self.checksample(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checksample(**err57) # form: sample(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checksample(**err58) # form: sample(col newname, k) + err59 = {"k": "'1'"} + # self.checksample(**err59) # formL sample(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checksample(**err60) # formL sample(colm, -1-2) + err61 = {"k": 1001} + self.checksample(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checksample(**err62) # k: negative number + err63 = {"k": 0} + self.checksample(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checksample(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checksample(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checksample(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checksample(**err67) # k: left out of [1, 1000] + + pass + + def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def sample_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def sample_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.sample_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.sample_test_table(tbnum) + self.sample_test_data(tbnum, per_table_rows, nowtime) + self.sample_current_query() + self.sample_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.sample_current_query() + self.sample_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.sample_current_query() + self.sample_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.sample_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_ceil.py b/tests/pytest/functions/function_ceil.py index 08769c7f7aeff422f955f18efa525b5250be0761..cb3408c3774c09c41b637c0ddff2600e56d8aea9 100644 --- a/tests/pytest/functions/function_ceil.py +++ b/tests/pytest/functions/function_ceil.py @@ -1322,21 +1322,21 @@ class TDTestCase: double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( - "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1366,11 +1366,11 @@ class TDTestCase: self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) tdSql.execute( - "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1400,139 +1400,572 @@ class TDTestCase: self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) + shouldPass = ['select ceil(int_col) from super', + 'select ceil(int_col) from t1', + 'select ceil(bigint_col) from super', + 'select ceil(bigint_col) from t1', + 'select ceil(float_col) from super', + 'select ceil(float_col) from t1', + 'select ceil(double_col) from super', + 'select ceil(double_col) from t1', + 'select ceil(smallint_col) from super', + 'select ceil(smallint_col) from t1', + 'select ceil(tinyint_col) from super', + 'select ceil(tinyint_col) from t1', + 'select ceil(uint_col) from super', + 'select ceil(uint_col) from t1', + 'select ceil(ubigint_col) from super', + 'select ceil(ubigint_col) from t1', + 'select ceil(usmallint_col) from super', + 'select ceil(usmallint_col) from t1', + 'select ceil(utinyint_col) from super', + 'select ceil(utinyint_col) from t1', + 'select ceil(int_col) - ceil(int_col) from super', + 'select ceil(int_col) - ceil(int_col) from t1', + 'select ceil(bigint_col) - ceil(bigint_col) from super', + 'select ceil(bigint_col) - ceil(bigint_col) from t1', + 'select ceil(float_col) - ceil(float_col) from super', + 'select ceil(float_col) - ceil(float_col) from t1', + 'select ceil(double_col) - ceil(double_col) from super', + 'select ceil(double_col) - ceil(double_col) from t1', + 'select ceil(smallint_col) - ceil(smallint_col) from super', + 'select ceil(smallint_col) - ceil(smallint_col) from t1', + 'select ceil(tinyint_col) - ceil(tinyint_col) from super', + 'select ceil(tinyint_col) - ceil(tinyint_col) from t1', + 'select ceil(uint_col) - ceil(uint_col) from super', + 'select ceil(uint_col) - ceil(uint_col) from t1', + 'select ceil(ubigint_col) - ceil(ubigint_col) from super', + 'select ceil(ubigint_col) - ceil(ubigint_col) from t1', + 'select ceil(usmallint_col) - ceil(usmallint_col) from super', + 'select ceil(usmallint_col) - ceil(usmallint_col) from t1', + 'select ceil(utinyint_col) - ceil(utinyint_col) from super', + 'select ceil(utinyint_col) - ceil(utinyint_col) from t1', + 'select ceil(int_col) / ceil(int_col) from super', + 'select ceil(int_col) / ceil(int_col) from t1', + 'select ceil(bigint_col) / ceil(bigint_col) from super', + 'select ceil(bigint_col) / ceil(bigint_col) from t1', + 'select ceil(float_col) / ceil(float_col) from super', + 'select ceil(float_col) / ceil(float_col) from t1', + 'select ceil(double_col) / ceil(double_col) from super', + 'select ceil(double_col) / ceil(double_col) from t1', + 'select ceil(smallint_col) / ceil(smallint_col) from super', + 'select ceil(smallint_col) / ceil(smallint_col) from t1', + 'select ceil(tinyint_col) / ceil(tinyint_col) from super', + 'select ceil(tinyint_col) / ceil(tinyint_col) from t1', + 'select ceil(uint_col) / ceil(uint_col) from super', + 'select ceil(uint_col) / ceil(uint_col) from t1', + 'select ceil(ubigint_col) / ceil(ubigint_col) from super', + 'select ceil(ubigint_col) / ceil(ubigint_col) from t1', + 'select ceil(usmallint_col) / ceil(usmallint_col) from super', + 'select ceil(usmallint_col) / ceil(usmallint_col) from t1', + 'select ceil(utinyint_col) / ceil(utinyint_col) from super', + 'select ceil(utinyint_col) / ceil(utinyint_col) from t1', + 'select ceil(int_col) * ceil(int_col) from super', + 'select ceil(int_col) * ceil(int_col) from t1', + 'select ceil(bigint_col) * ceil(bigint_col) from super', + 'select ceil(bigint_col) * ceil(bigint_col) from t1', + 'select ceil(float_col) * ceil(float_col) from super', + 'select ceil(float_col) * ceil(float_col) from t1', + 'select ceil(double_col) * ceil(double_col) from super', + 'select ceil(double_col) * ceil(double_col) from t1', + 'select ceil(smallint_col) * ceil(smallint_col) from super', + 'select ceil(smallint_col) * ceil(smallint_col) from t1', + 'select ceil(tinyint_col) * ceil(tinyint_col) from super', + 'select ceil(tinyint_col) * ceil(tinyint_col) from t1', + 'select ceil(uint_col) * ceil(uint_col) from super', + 'select ceil(uint_col) * ceil(uint_col) from t1', + 'select ceil(ubigint_col) * ceil(ubigint_col) from super', + 'select ceil(ubigint_col) * ceil(ubigint_col) from t1', + 'select ceil(usmallint_col) * ceil(usmallint_col) from super', + 'select ceil(usmallint_col) * ceil(usmallint_col) from t1', + 'select ceil(utinyint_col) * ceil(utinyint_col) from super', + 'select ceil(utinyint_col) * ceil(utinyint_col) from t1', + 'select ceil(count(ts)) from super', + 'select ceil(count(ts)) from t1', + 'select ceil(count(timestamp_col)) from super', + 'select ceil(count(timestamp_col)) from t1', + 'select ceil(count(int_col)) from super', + 'select ceil(count(int_col)) from t1', + 'select ceil(count(bigint_col)) from super', + 'select ceil(count(bigint_col)) from t1', + 'select ceil(count(float_col)) from super', + 'select ceil(count(float_col)) from t1', + 'select ceil(count(double_col)) from super', + 'select ceil(count(double_col)) from t1', + 'select ceil(count(binary_col)) from super', + 'select ceil(count(binary_col)) from t1', + 'select ceil(count(smallint_col)) from super', + 'select ceil(count(smallint_col)) from t1', + 'select ceil(count(tinyint_col)) from super', + 'select ceil(count(tinyint_col)) from t1', + 'select ceil(count(bool_col)) from super', + 'select ceil(count(bool_col)) from t1', + 'select ceil(count(nchar_col)) from super', + 'select ceil(count(nchar_col)) from t1', + 'select ceil(count(uint_col)) from super', + 'select ceil(count(uint_col)) from t1', + 'select ceil(count(ubigint_col)) from super', + 'select ceil(count(ubigint_col)) from t1', + 'select ceil(count(usmallint_col)) from super', + 'select ceil(count(usmallint_col)) from t1', + 'select ceil(count(utinyint_col)) from super', + 'select ceil(count(utinyint_col)) from t1', + 'select ceil(count(timestamp_tag)) from super', + 'select ceil(count(timestamp_tag)) from t1', + 'select ceil(count(int_tag)) from super', + 'select ceil(count(int_tag)) from t1', + 'select ceil(count(bigint_tag)) from super', + 'select ceil(count(bigint_tag)) from t1', + 'select ceil(count(float_tag)) from super', + 'select ceil(count(float_tag)) from t1', + 'select ceil(count(double_tag)) from super', + 'select ceil(count(double_tag)) from t1', + 'select ceil(count(binary_tag)) from super', + 'select ceil(count(binary_tag)) from t1', + 'select ceil(count(smallint_tag)) from super', + 'select ceil(count(smallint_tag)) from t1', + 'select ceil(count(tinyint_tag)) from super', + 'select ceil(count(tinyint_tag)) from t1', + 'select ceil(count(bool_tag)) from super', + 'select ceil(count(bool_tag)) from t1', + 'select ceil(count(nchar_tag)) from super', + 'select ceil(count(nchar_tag)) from t1', + 'select ceil(count(uint_tag)) from super', + 'select ceil(count(uint_tag)) from t1', + 'select ceil(count(ubigint_tag)) from super', + 'select ceil(count(ubigint_tag)) from t1', + 'select ceil(count(usmallint_tag)) from super', + 'select ceil(count(usmallint_tag)) from t1', + 'select ceil(count(utinyint_tag)) from super', + 'select ceil(count(utinyint_tag)) from t1', + 'select ceil(avg(int_col)) from super', + 'select ceil(avg(int_col)) from t1', + 'select ceil(avg(bigint_col)) from super', + 'select ceil(avg(bigint_col)) from t1', + 'select ceil(avg(float_col)) from super', + 'select ceil(avg(float_col)) from t1', + 'select ceil(avg(double_col)) from super', + 'select ceil(avg(double_col)) from t1', + 'select ceil(avg(smallint_col)) from super', + 'select ceil(avg(smallint_col)) from t1', + 'select ceil(avg(tinyint_col)) from super', + 'select ceil(avg(tinyint_col)) from t1', + 'select ceil(avg(uint_col)) from super', + 'select ceil(avg(uint_col)) from t1', + 'select ceil(avg(ubigint_col)) from super', + 'select ceil(avg(ubigint_col)) from t1', + 'select ceil(avg(usmallint_col)) from super', + 'select ceil(avg(usmallint_col)) from t1', + 'select ceil(avg(utinyint_col)) from super', + 'select ceil(avg(utinyint_col)) from t1', + 'select ceil(twa(int_col)) from t1', + 'select ceil(twa(bigint_col)) from t1', + 'select ceil(twa(float_col)) from t1', + 'select ceil(twa(double_col)) from t1', + 'select ceil(twa(smallint_col)) from t1', + 'select ceil(twa(tinyint_col)) from t1', + 'select ceil(twa(uint_col)) from t1', + 'select ceil(twa(ubigint_col)) from t1', + 'select ceil(twa(usmallint_col)) from t1', + 'select ceil(twa(utinyint_col)) from t1', + 'select ceil(sum(int_col)) from super', + 'select ceil(sum(int_col)) from t1', + 'select ceil(sum(bigint_col)) from super', + 'select ceil(sum(bigint_col)) from t1', + 'select ceil(sum(float_col)) from super', + 'select ceil(sum(float_col)) from t1', + 'select ceil(sum(double_col)) from super', + 'select ceil(sum(double_col)) from t1', + 'select ceil(sum(smallint_col)) from super', + 'select ceil(sum(smallint_col)) from t1', + 'select ceil(sum(tinyint_col)) from super', + 'select ceil(sum(tinyint_col)) from t1', + 'select ceil(sum(uint_col)) from super', + 'select ceil(sum(uint_col)) from t1', + 'select ceil(sum(ubigint_col)) from super', + 'select ceil(sum(ubigint_col)) from t1', + 'select ceil(sum(usmallint_col)) from super', + 'select ceil(sum(usmallint_col)) from t1', + 'select ceil(sum(utinyint_col)) from super', + 'select ceil(sum(utinyint_col)) from t1', + 'select ceil(stddev(int_col)) from super', + 'select ceil(stddev(int_col)) from t1', + 'select ceil(stddev(bigint_col)) from super', + 'select ceil(stddev(bigint_col)) from t1', + 'select ceil(stddev(float_col)) from super', + 'select ceil(stddev(float_col)) from t1', + 'select ceil(stddev(double_col)) from super', + 'select ceil(stddev(double_col)) from t1', + 'select ceil(stddev(smallint_col)) from super', + 'select ceil(stddev(smallint_col)) from t1', + 'select ceil(stddev(tinyint_col)) from super', + 'select ceil(stddev(tinyint_col)) from t1', + 'select ceil(stddev(uint_col)) from super', + 'select ceil(stddev(uint_col)) from t1', + 'select ceil(stddev(ubigint_col)) from super', + 'select ceil(stddev(ubigint_col)) from t1', + 'select ceil(stddev(usmallint_col)) from super', + 'select ceil(stddev(usmallint_col)) from t1', + 'select ceil(stddev(utinyint_col)) from super', + 'select ceil(stddev(utinyint_col)) from t1', + 'select ceil(irate(int_col)) from t1', + 'select ceil(irate(bigint_col)) from t1', + 'select ceil(irate(float_col)) from t1', + 'select ceil(irate(double_col)) from t1', + 'select ceil(irate(smallint_col)) from t1', + 'select ceil(irate(tinyint_col)) from t1', + 'select ceil(irate(uint_col)) from t1', + 'select ceil(irate(ubigint_col)) from t1', + 'select ceil(irate(usmallint_col)) from t1', + 'select ceil(irate(utinyint_col)) from t1', + 'select ceil(min(int_col)) from super', + 'select ceil(min(int_col)) from t1', + 'select ceil(min(bigint_col)) from super', + 'select ceil(min(bigint_col)) from t1', + 'select ceil(min(float_col)) from super', + 'select ceil(min(float_col)) from t1', + 'select ceil(min(double_col)) from super', + 'select ceil(min(double_col)) from t1', + 'select ceil(min(smallint_col)) from super', + 'select ceil(min(smallint_col)) from t1', + 'select ceil(min(tinyint_col)) from super', + 'select ceil(min(tinyint_col)) from t1', + 'select ceil(min(uint_col)) from super', + 'select ceil(min(uint_col)) from t1', + 'select ceil(min(ubigint_col)) from super', + 'select ceil(min(ubigint_col)) from t1', + 'select ceil(min(usmallint_col)) from super', + 'select ceil(min(usmallint_col)) from t1', + 'select ceil(min(utinyint_col)) from super', + 'select ceil(min(utinyint_col)) from t1', + 'select ceil(max(int_col)) from super', + 'select ceil(max(int_col)) from t1', + 'select ceil(max(bigint_col)) from super', + 'select ceil(max(bigint_col)) from t1', + 'select ceil(max(float_col)) from super', + 'select ceil(max(float_col)) from t1', + 'select ceil(max(double_col)) from super', + 'select ceil(max(double_col)) from t1', + 'select ceil(max(smallint_col)) from super', + 'select ceil(max(smallint_col)) from t1', + 'select ceil(max(tinyint_col)) from super', + 'select ceil(max(tinyint_col)) from t1', + 'select ceil(max(uint_col)) from super', + 'select ceil(max(uint_col)) from t1', + 'select ceil(max(ubigint_col)) from super', + 'select ceil(max(ubigint_col)) from t1', + 'select ceil(max(usmallint_col)) from super', + 'select ceil(max(usmallint_col)) from t1', + 'select ceil(max(utinyint_col)) from super', + 'select ceil(max(utinyint_col)) from t1', + 'select ceil(first(int_col)) from super', + 'select ceil(first(int_col)) from t1', + 'select ceil(first(bigint_col)) from super', + 'select ceil(first(bigint_col)) from t1', + 'select ceil(first(float_col)) from super', + 'select ceil(first(float_col)) from t1', + 'select ceil(first(double_col)) from super', + 'select ceil(first(double_col)) from t1', + 'select ceil(first(smallint_col)) from super', + 'select ceil(first(smallint_col)) from t1', + 'select ceil(first(tinyint_col)) from super', + 'select ceil(first(tinyint_col)) from t1', + 'select ceil(first(uint_col)) from super', + 'select ceil(first(uint_col)) from t1', + 'select ceil(first(ubigint_col)) from super', + 'select ceil(first(ubigint_col)) from t1', + 'select ceil(first(usmallint_col)) from super', + 'select ceil(first(usmallint_col)) from t1', + 'select ceil(first(utinyint_col)) from super', + 'select ceil(first(utinyint_col)) from t1', + 'select ceil(last(int_col)) from super', + 'select ceil(last(int_col)) from t1', + 'select ceil(last(bigint_col)) from super', + 'select ceil(last(bigint_col)) from t1', + 'select ceil(last(float_col)) from super', + 'select ceil(last(float_col)) from t1', + 'select ceil(last(double_col)) from super', + 'select ceil(last(double_col)) from t1', + 'select ceil(last(smallint_col)) from super', + 'select ceil(last(smallint_col)) from t1', + 'select ceil(last(tinyint_col)) from super', + 'select ceil(last(tinyint_col)) from t1', + 'select ceil(last(uint_col)) from super', + 'select ceil(last(uint_col)) from t1', + 'select ceil(last(ubigint_col)) from super', + 'select ceil(last(ubigint_col)) from t1', + 'select ceil(last(usmallint_col)) from super', + 'select ceil(last(usmallint_col)) from t1', + 'select ceil(last(utinyint_col)) from super', + 'select ceil(last(utinyint_col)) from t1', + 'select ceil(percentile(int_col, 1)) from t1', + 'select ceil(percentile(bigint_col, 1)) from t1', + 'select ceil(percentile(float_col, 1)) from t1', + 'select ceil(percentile(double_col, 1)) from t1', + 'select ceil(percentile(smallint_col, 1)) from t1', + 'select ceil(percentile(tinyint_col, 1)) from t1', + 'select ceil(percentile(uint_col, 1)) from t1', + 'select ceil(percentile(ubigint_col, 1)) from t1', + 'select ceil(percentile(usmallint_col, 1)) from t1', + 'select ceil(percentile(utinyint_col, 1)) from t1', + 'select ceil(apercentile(int_col, 1)) from super', + 'select ceil(apercentile(int_col, 1)) from t1', + 'select ceil(apercentile(bigint_col, 1)) from super', + 'select ceil(apercentile(bigint_col, 1)) from t1', + 'select ceil(apercentile(float_col, 1)) from super', + 'select ceil(apercentile(float_col, 1)) from t1', + 'select ceil(apercentile(double_col, 1)) from super', + 'select ceil(apercentile(double_col, 1)) from t1', + 'select ceil(apercentile(smallint_col, 1)) from super', + 'select ceil(apercentile(smallint_col, 1)) from t1', + 'select ceil(apercentile(tinyint_col, 1)) from super', + 'select ceil(apercentile(tinyint_col, 1)) from t1', + 'select ceil(apercentile(uint_col, 1)) from super', + 'select ceil(apercentile(uint_col, 1)) from t1', + 'select ceil(apercentile(ubigint_col, 1)) from super', + 'select ceil(apercentile(ubigint_col, 1)) from t1', + 'select ceil(apercentile(usmallint_col, 1)) from super', + 'select ceil(apercentile(usmallint_col, 1)) from t1', + 'select ceil(apercentile(utinyint_col, 1)) from super', + 'select ceil(apercentile(utinyint_col, 1)) from t1', + 'select ceil(last_row(int_col)) from super', + 'select ceil(last_row(int_col)) from t1', + 'select ceil(last_row(bigint_col)) from super', + 'select ceil(last_row(bigint_col)) from t1', + 'select ceil(last_row(float_col)) from super', + 'select ceil(last_row(float_col)) from t1', + 'select ceil(last_row(double_col)) from super', + 'select ceil(last_row(double_col)) from t1', + 'select ceil(last_row(smallint_col)) from super', + 'select ceil(last_row(smallint_col)) from t1', + 'select ceil(last_row(tinyint_col)) from super', + 'select ceil(last_row(tinyint_col)) from t1', + 'select ceil(last_row(uint_col)) from super', + 'select ceil(last_row(uint_col)) from t1', + 'select ceil(last_row(ubigint_col)) from super', + 'select ceil(last_row(ubigint_col)) from t1', + 'select ceil(last_row(usmallint_col)) from super', + 'select ceil(last_row(usmallint_col)) from t1', + 'select ceil(last_row(utinyint_col)) from super', + 'select ceil(last_row(utinyint_col)) from t1', + 'select ceil(interp(int_col)) from t1', + 'select ceil(interp(bigint_col)) from t1', + 'select ceil(interp(float_col)) from t1', + 'select ceil(interp(double_col)) from t1', + 'select ceil(interp(smallint_col)) from t1', + 'select ceil(interp(tinyint_col)) from t1', + 'select ceil(interp(uint_col)) from t1', + 'select ceil(interp(ubigint_col)) from t1', + 'select ceil(interp(usmallint_col)) from t1', + 'select ceil(interp(utinyint_col)) from t1', + 'select ceil(spread(ts)) from super', + 'select ceil(spread(ts)) from t1', + 'select ceil(spread(timestamp_col)) from super', + 'select ceil(spread(timestamp_col)) from t1', + 'select ceil(spread(int_col)) from super', + 'select ceil(spread(int_col)) from t1', + 'select ceil(spread(bigint_col)) from super', + 'select ceil(spread(bigint_col)) from t1', + 'select ceil(spread(float_col)) from super', + 'select ceil(spread(float_col)) from t1', + 'select ceil(spread(double_col)) from super', + 'select ceil(spread(double_col)) from t1', + 'select ceil(spread(smallint_col)) from super', + 'select ceil(spread(smallint_col)) from t1', + 'select ceil(spread(tinyint_col)) from super', + 'select ceil(spread(tinyint_col)) from t1', + 'select ceil(spread(uint_col)) from super', + 'select ceil(spread(uint_col)) from t1', + 'select ceil(spread(ubigint_col)) from super', + 'select ceil(spread(ubigint_col)) from t1', + 'select ceil(spread(usmallint_col)) from super', + 'select ceil(spread(usmallint_col)) from t1', + 'select ceil(spread(utinyint_col)) from super', + 'select ceil(spread(utinyint_col)) from t1', + 'select ceil(int_col + int_col) from super', + 'select ceil(int_col + int_col) from t1', + 'select ceil(bigint_col + bigint_col) from super', + 'select ceil(bigint_col + bigint_col) from t1', + 'select ceil(float_col + float_col) from super', + 'select ceil(float_col + float_col) from t1', + 'select ceil(double_col + double_col) from super', + 'select ceil(double_col + double_col) from t1', + 'select ceil(smallint_col + smallint_col) from super', + 'select ceil(smallint_col + smallint_col) from t1', + 'select ceil(tinyint_col + tinyint_col) from super', + 'select ceil(tinyint_col + tinyint_col) from t1', + 'select ceil(uint_col + uint_col) from super', + 'select ceil(uint_col + uint_col) from t1', + 'select ceil(ubigint_col + ubigint_col) from super', + 'select ceil(ubigint_col + ubigint_col) from t1', + 'select ceil(usmallint_col + usmallint_col) from super', + 'select ceil(usmallint_col + usmallint_col) from t1', + 'select ceil(utinyint_col + utinyint_col) from super', + 'select ceil(utinyint_col + utinyint_col) from t1', + 'select ceil(int_col - int_col) from super', + 'select ceil(int_col - int_col) from t1', + 'select ceil(bigint_col - bigint_col) from super', + 'select ceil(bigint_col - bigint_col) from t1', + 'select ceil(float_col - float_col) from super', + 'select ceil(float_col - float_col) from t1', + 'select ceil(double_col - double_col) from super', + 'select ceil(double_col - double_col) from t1', + 'select ceil(smallint_col - smallint_col) from super', + 'select ceil(smallint_col - smallint_col) from t1', + 'select ceil(tinyint_col - tinyint_col) from super', + 'select ceil(tinyint_col - tinyint_col) from t1', + 'select ceil(uint_col - uint_col) from super', + 'select ceil(uint_col - uint_col) from t1', + 'select ceil(ubigint_col - ubigint_col) from super', + 'select ceil(ubigint_col - ubigint_col) from t1', + 'select ceil(usmallint_col - usmallint_col) from super', + 'select ceil(usmallint_col - usmallint_col) from t1', + 'select ceil(utinyint_col - utinyint_col) from super', + 'select ceil(utinyint_col - utinyint_col) from t1', + 'select ceil(int_col * int_col) from super', + 'select ceil(int_col * int_col) from t1', + 'select ceil(bigint_col * bigint_col) from super', + 'select ceil(bigint_col * bigint_col) from t1', + 'select ceil(float_col * float_col) from super', + 'select ceil(float_col * float_col) from t1', + 'select ceil(double_col * double_col) from super', + 'select ceil(double_col * double_col) from t1', + 'select ceil(smallint_col * smallint_col) from super', + 'select ceil(smallint_col * smallint_col) from t1', + 'select ceil(tinyint_col * tinyint_col) from super', + 'select ceil(tinyint_col * tinyint_col) from t1', + 'select ceil(uint_col * uint_col) from super', + 'select ceil(uint_col * uint_col) from t1', + 'select ceil(ubigint_col * ubigint_col) from super', + 'select ceil(ubigint_col * ubigint_col) from t1', + 'select ceil(usmallint_col * usmallint_col) from super', + 'select ceil(usmallint_col * usmallint_col) from t1', + 'select ceil(utinyint_col * utinyint_col) from super', + 'select ceil(utinyint_col * utinyint_col) from t1', + 'select ceil(int_col / int_col) from super', + 'select ceil(int_col / int_col) from t1', + 'select ceil(bigint_col / bigint_col) from super', + 'select ceil(bigint_col / bigint_col) from t1', + 'select ceil(float_col / float_col) from super', + 'select ceil(float_col / float_col) from t1', + 'select ceil(double_col / double_col) from super', + 'select ceil(double_col / double_col) from t1', + 'select ceil(smallint_col / smallint_col) from super', + 'select ceil(smallint_col / smallint_col) from t1', + 'select ceil(tinyint_col / tinyint_col) from super', + 'select ceil(tinyint_col / tinyint_col) from t1', + 'select ceil(uint_col / uint_col) from super', + 'select ceil(uint_col / uint_col) from t1', + 'select ceil(ubigint_col / ubigint_col) from super', + 'select ceil(ubigint_col / ubigint_col) from t1', + 'select ceil(usmallint_col / usmallint_col) from super', + 'select ceil(usmallint_col / usmallint_col) from t1', + 'select ceil(utinyint_col / utinyint_col) from super', + 'select ceil(utinyint_col / utinyint_col) from t1', + 'select int_col, ceil(int_col), int_col from super', + 'select int_col, ceil(int_col), int_col from t1', + 'select bigint_col, ceil(bigint_col), bigint_col from super', + 'select bigint_col, ceil(bigint_col), bigint_col from t1', + 'select float_col, ceil(float_col), float_col from super', + 'select float_col, ceil(float_col), float_col from t1', + 'select double_col, ceil(double_col), double_col from super', + 'select double_col, ceil(double_col), double_col from t1', + 'select smallint_col, ceil(smallint_col), smallint_col from super', + 'select smallint_col, ceil(smallint_col), smallint_col from t1', + 'select tinyint_col, ceil(tinyint_col), tinyint_col from super', + 'select tinyint_col, ceil(tinyint_col), tinyint_col from t1', + 'select uint_col, ceil(uint_col), uint_col from super', + 'select uint_col, ceil(uint_col), uint_col from t1', + 'select ubigint_col, ceil(ubigint_col), ubigint_col from super', + 'select ubigint_col, ceil(ubigint_col), ubigint_col from t1', + 'select usmallint_col, ceil(usmallint_col), usmallint_col from super', + 'select usmallint_col, ceil(usmallint_col), usmallint_col from t1', + 'select utinyint_col, ceil(utinyint_col), utinyint_col from super', + 'select utinyint_col, ceil(utinyint_col), utinyint_col from t1', + 'select 1, ceil(int_col), 1 from super', + 'select 1, ceil(int_col), 1 from t1', + 'select 1, ceil(bigint_col), 1 from super', + 'select 1, ceil(bigint_col), 1 from t1', + 'select 1, ceil(float_col), 1 from super', + 'select 1, ceil(float_col), 1 from t1', + 'select 1, ceil(double_col), 1 from super', + 'select 1, ceil(double_col), 1 from t1', + 'select 1, ceil(smallint_col), 1 from super', + 'select 1, ceil(smallint_col), 1 from t1', + 'select 1, ceil(tinyint_col), 1 from super', + 'select 1, ceil(tinyint_col), 1 from t1', + 'select 1, ceil(uint_col), 1 from super', + 'select 1, ceil(uint_col), 1 from t1', + 'select 1, ceil(ubigint_col), 1 from super', + 'select 1, ceil(ubigint_col), 1 from t1', + 'select 1, ceil(usmallint_col), 1 from super', + 'select 1, ceil(usmallint_col), 1 from t1', + 'select 1, ceil(utinyint_col), 1 from super', + 'select 1, ceil(utinyint_col), 1 from t1', + 'select ceil(int_col) as anyName from super', + 'select ceil(int_col) as anyName from t1', + 'select ceil(bigint_col) as anyName from super', + 'select ceil(bigint_col) as anyName from t1', + 'select ceil(float_col) as anyName from super', + 'select ceil(float_col) as anyName from t1', + 'select ceil(double_col) as anyName from super', + 'select ceil(double_col) as anyName from t1', + 'select ceil(smallint_col) as anyName from super', + 'select ceil(smallint_col) as anyName from t1', + 'select ceil(tinyint_col) as anyName from super', + 'select ceil(tinyint_col) as anyName from t1', + 'select ceil(uint_col) as anyName from super', + 'select ceil(uint_col) as anyName from t1', + 'select ceil(ubigint_col) as anyName from super', + 'select ceil(ubigint_col) as anyName from t1', + 'select ceil(usmallint_col) as anyName from super', + 'select ceil(usmallint_col) as anyName from t1', + 'select ceil(utinyint_col) as anyName from super', + 'select ceil(utinyint_col) as anyName from t1'] for s in range(len(select_command)): for f in range(len(from_command)): sql = "select " + select_command[s] + from_command[f] - if (select_command[s] == "ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col)"\ - or select_command[s] == "ceil(smallint_col)" \ - or select_command[s] == "ceil(float_col)"\ - or select_command[s] == "ceil(double_col)"\ - or select_command[s] == "ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col)"\ - or select_command[s] == "1, ceil(int_col), 1"\ - or select_command[s] == "1, ceil(bigint_col), 1"\ - or select_command[s] == "1, ceil(float_col), 1"\ - or select_command[s] == "1, ceil(double_col), 1"\ - or select_command[s] == "1, ceil(smallint_col), 1"\ - or select_command[s] == "1, ceil(tinyint_col), 1"\ - or select_command[s] == "1, ceil(uint_col), 1"\ - or select_command[s] == "1, ceil(ubigint_col), 1"\ - or select_command[s] == "1, ceil(usmallint_col), 1"\ - or select_command[s] == "1, ceil(utinyint_col), 1"\ - or select_command[s] == "int_col, ceil(int_col), int_col"\ - or select_command[s] == "bigint_col, ceil(bigint_col), bigint_col"\ - or select_command[s] == "float_col, ceil(float_col), float_col"\ - or select_command[s] == "double_col, ceil(double_col), double_col"\ - or select_command[s] == "smallint_col, ceil(smallint_col), smallint_col"\ - or select_command[s] == "tinyint_col, ceil(tinyint_col), tinyint_col"\ - or select_command[s] == "uint_col, ceil(uint_col), uint_col"\ - or select_command[s] == "ubigint_col, ceil(ubigint_col), ubigint_col"\ - or select_command[s] == "usmallint_col, ceil(usmallint_col), usmallint_col"\ - or select_command[s] == "utinyint_col, ceil(utinyint_col), utinyint_col"\ - or select_command[s] == "ceil(int_col) as anyName"\ - or select_command[s] == "ceil(bigint_col) as anyName"\ - or select_command[s] == "ceil(float_col) as anyName"\ - or select_command[s] == "ceil(double_col) as anyName"\ - or select_command[s] == "ceil(smallint_col) as anyName"\ - or select_command[s] == "ceil(tinyint_col) as anyName"\ - or select_command[s] == "ceil(uint_col) as anyName"\ - or select_command[s] == "ceil(ubigint_col) as anyName"\ - or select_command[s] == "ceil(usmallint_col) as anyName"\ - or select_command[s] == "ceil(utinyint_col) as anyName"\ - or select_command[s] == "ceil(int_col) + ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col) + ceil(bigint_col)"\ - or select_command[s] == "ceil(float_col) + ceil(float_col)"\ - or select_command[s] == "ceil(double_col) + ceil(double_col)"\ - or select_command[s] == "ceil(smallint_col) + ceil(smallint_col)"\ - or select_command[s] == "ceil(tinyint_col) + ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col) + ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col) + ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col) + ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col) + ceil(utinyint_col)"\ - or select_command[s] == "ceil(int_col) + ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col) + ceil(bigint_col)"\ - or select_command[s] == "ceil(float_col) + ceil(float_col)"\ - or select_command[s] == "ceil(double_col) + ceil(double_col)"\ - or select_command[s] == "ceil(smallint_col) + ceil(smallint_col)"\ - or select_command[s] == "ceil(tinyint_col) + ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col) + ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col) + ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col) + ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col) + cei(utinyint_col)"\ - or select_command[s] == "ceil(int_col) - ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col) - ceil(bigint_col)"\ - or select_command[s] == "ceil(float_col) - ceil(float_col)"\ - or select_command[s] == "ceil(double_col) - ceil(double_col)"\ - or select_command[s] == "ceil(smallint_col) - ceil(smallint_col)"\ - or select_command[s] == "ceil(tinyint_col) - ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col) - ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col) - ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col) - ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col) - ceil(utinyint_col)"\ - or select_command[s] == "ceil(int_col) * ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col) * ceil(bigint_col)"\ - or select_command[s] == "ceil(float_col) * ceil(float_col)"\ - or select_command[s] == "ceil(double_col) * ceil(double_col)"\ - or select_command[s] == "ceil(smallint_col) * ceil(smallint_col)"\ - or select_command[s] == "ceil(tinyint_col) * ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col) * ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col) * ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col) * ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col) * ceil(utinyint_col)"\ - or select_command[s] == "ceil(int_col) / ceil(int_col)"\ - or select_command[s] == "ceil(bigint_col) / ceil(bigint_col)"\ - or select_command[s] == "ceil(float_col) / ceil(float_col)"\ - or select_command[s] == "ceil(double_col) / ceil(double_col)"\ - or select_command[s] == "ceil(smallint_col) / ceil(smallint_col)"\ - or select_command[s] == "ceil(tinyint_col) / ceil(tinyint_col)"\ - or select_command[s] == "ceil(uint_col) / ceil(uint_col)"\ - or select_command[s] == "ceil(ubigint_col) / ceil(ubigint_col)"\ - or select_command[s] == "ceil(usmallint_col) / ceil(usmallint_col)"\ - or select_command[s] == "ceil(utinyint_col) / ceil(utinyint_col)"): + if sql in shouldPass: tdSql.query(sql) else: tdSql.error(sql) + + shouldPass2 = ['select ceil(super.int_col) from super', + 'select ceil(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.bigint_col) from super', + 'select ceil(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.float_col) from super', + 'select ceil(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.double_col) from super', + 'select ceil(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.smallint_col) from super', + 'select ceil(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.tinyint_col) from super', + 'select ceil(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.uint_col) from super', + 'select ceil(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.ubigint_col) from super', + 'select ceil(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.usmallint_col) from super', + 'select ceil(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(super.utinyint_col) from super', + 'select ceil(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select ceil(t1.int_col) from t1', + 'select ceil(t1.bigint_col) from t1', + 'select ceil(t1.float_col) from t1', + 'select ceil(t1.double_col) from t1', + 'select ceil(t1.smallint_col) from t1', + 'select ceil(t1.tinyint_col) from t1', + 'select ceil(t1.uint_col) from t1', + 'select ceil(t1.ubigint_col) from t1', + 'select ceil(t1.usmallint_col) from t1', + 'select ceil(t1.utinyint_col) from t1'] for sim in range(len(simple_select_command)): for fr in range(len(advance_from_command)): - for filter in range(len(filter_command)): + for groupby in range(len(filter_command)): for fill in range(len(fill_command)): - sql = "select " + simple_select_command[ - sim] + advance_from_command[fr] + filter_command[ - filter] + fill_command[fill] - if sql == "select ceil(t1.int_col) from t1"\ - or sql == "select ceil(super.int_col) from super"\ - or sql == "select ceil(t1.bigint_col) from t1"\ - or sql == "select ceil(super.bigint_col) from super"\ - or sql == "select ceil(t1.smallint_col) from t1"\ - or sql == "select ceil(super.smallint_col) from super"\ - or sql == "select ceil(t1.tinyint_col) from t1"\ - or sql == "select ceil(super.tinyint_col) from super"\ - or sql == "select ceil(t1.float_col) from t1"\ - or sql == "select ceil(super.float_col) from super"\ - or sql == "select ceil(t1.double_col) from t1"\ - or sql == "select ceil(super.double_col) from super"\ - or sql == "select ceil(t1.uint_col) from t1"\ - or sql == "select ceil(super.uint_col) from super"\ - or sql == "select ceil(t1.ubigint_col) from t1"\ - or sql == "select ceil(super.ubigint_col) from super"\ - or sql == "select ceil(t1.usmallint_col) from t1"\ - or sql == "select ceil(super.usmallint_col) from super"\ - or sql == "select ceil(t1.utinyint_col) from t1"\ - or sql == "select ceil(super.utinyint_col) from super"\ - or sql == "select ceil(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select ceil(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + sql = "select " + simple_select_command[sim] + advance_from_command[fr] + filter_command[groupby] + fill_command[fill] + if sql in shouldPass2: tdSql.query(sql) else: tdSql.error(sql) diff --git a/tests/pytest/functions/function_count_last_stab.py b/tests/pytest/functions/function_count_last_stab.py index 1d777c6bd314941036f542c7d0e9063e590fa7dd..cd0a9b17c2fd8c98544dca09f6a7008929225ece 100644 --- a/tests/pytest/functions/function_count_last_stab.py +++ b/tests/pytest/functions/function_count_last_stab.py @@ -42,22 +42,22 @@ class TDTestCase: % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.query("select count(*),last(*) from stest group by col1") - tdSql.checkRows(10) - tdSql.checkData(0, 0, 1) - tdSql.checkData(1, 2, 2) - tdSql.checkData(1, 3, 1) + tdSql.checkRows(11) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 2, 2) + tdSql.checkData(2, 3, 1) tdSql.query("select count(*),last(*) from stest group by col2") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.checkData(0, 2, 10) - tdSql.checkData(0, 3, 1) + tdSql.checkRows(2) + tdSql.checkData(1, 0, 10) + tdSql.checkData(1, 2, 10) + tdSql.checkData(1, 3, 1) tdSql.query("select count(*),last(ts,stest.*) from stest group by col1") - tdSql.checkRows(10) - tdSql.checkData(0, 0, 1) - tdSql.checkData(0, 2, "2018-09-17 09:00:00") - tdSql.checkData(1, 4, 1) + tdSql.checkRows(11) + tdSql.checkData(1, 0, 1) + tdSql.checkData(1, 2, "2018-09-17 09:00:00") + tdSql.checkData(2, 4, 1) diff --git a/tests/pytest/functions/function_csum.py b/tests/pytest/functions/function_csum.py new file mode 100644 index 0000000000000000000000000000000000000000..de2400066b9c606b2352649a394d9c20006ea929 --- /dev/null +++ b/tests/pytest/functions/function_csum.py @@ -0,0 +1,423 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + csum function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: csum query statement,default: select csum(c1) from t1 + ''' + + return f"select csum({col}) {alias} from {table_expr} {condition}" + + def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.csum_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("csum", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_data) + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_csum)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_csum[j]) + pre_row += len(pre_csum) + return + elif "union" in condition: + union_sql_0 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_csum_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_csum_1 = tdSql.queryResult + + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 1, union_csum_0[i][1]) + else: + tdSql.checkData(i, 1, union_csum_1[i-row_union_0][1]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_result)[offset_val:] + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 1, pre_csum[i]) + + pass + + def csum_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkcsum() + case2 = {"col": "c2"} + self.checkcsum(**case2) + case3 = {"col": "c5"} + self.checkcsum(**case3) + case4 = {"col": "c7"} + self.checkcsum(**case4) + case5 = {"col": "c8"} + self.checkcsum(**case5) + case6 = {"col": "c9"} + self.checkcsum(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checkcsum(**case7) + case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"} + self.checkcsum(**case8) + + # case9~10: mix with tbname/ts/tag/col + case9 = {"alias": ", tbname"} + self.checkcsum(**case9) + case10 = {"alias": ", _c0"} + self.checkcsum(**case10) + case11 = {"alias": ", st1"} + self.checkcsum(**case11) + case12 = {"alias": ", c1"} + # self.checkcsum(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkcsum(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkcsum(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkcsum(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkcsum(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkcsum(**case17) + # # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkcsum(**case18) + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checkcsum(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checkcsum(**case20) + + # case22: with union + case22 = { + "condition": "union all select csum(c1) from t2" + } + self.checkcsum(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkcsum(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkcsum(**case24) + + pass + + def csum_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.csum_query_form(col="")) # no col + tdSql.error("csum(c1) from stb1") # no select + tdSql.error("select csum from t1") # no csum condition + tdSql.error("select csum c1 from t1") # no brackets + tdSql.error("select csum(c1) t1") # no from + tdSql.error("select csum( c1 ) from ") # no table_expr + tdSql.error(self.csum_query_form(col="st1")) # tag col + tdSql.error(self.csum_query_form(col=1)) # col is a value + tdSql.error(self.csum_query_form(col="'c1'")) # col is a string + tdSql.error(self.csum_query_form(col=None)) # col is NULL 1 + tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.csum_query_form(col='""')) # col is "" + tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.csum_query_form(col='c3')) # timestamp col + tdSql.error(self.csum_query_form(col='ts')) # Primary key + tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col + tdSql.error(self.csum_query_form(col='c6')) # bool col + tdSql.error(self.csum_query_form(col='c4')) # binary col + tdSql.error(self.csum_query_form(col='c10')) # nachr col + tdSql.error(self.csum_query_form(col='c10')) # not table_expr col + tdSql.error(self.csum_query_form(col='t1')) # tbname + tdSql.error(self.csum_query_form(col='stb1')) # stbname + tdSql.error(self.csum_query_form(col='db')) # datbasename + tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1 + tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.csum_query_form(col='*')) # col is all col + tdSql.error("select csum[c1] from t1") # sql form error 1 + tdSql.error("select csum{c1} from t1") # sql form error 2 + tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3 + tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3 + tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 + tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.csum_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.csum_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.csum_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.csum_query_form(**order_by_tbname_sql)) + + pass + + def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def csum_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def csum_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.csum_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.csum_test_table(tbnum) + self.csum_test_data(tbnum, per_table_rows, nowtime) + self.csum_current_query() + self.csum_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.csum_current_query() + self.csum_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.csum_current_query() + self.csum_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.csum_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 3b79726ed80c206338392cecb8f3d2adf4588e2a..a2a458ea290b13ed462d8dcd47a8af16e3af0f82 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -29,7 +29,6 @@ class TDTestCase: def insertAndCheckData(self): types = ["tinyint", "tinyint unsigned", "smallint", "smallint unsigned", "int", "int unsigned", "bigint", "bigint unsigned", "float", "double", "bool", "binary(20)", "nchar(20)"] - for type in types: print("============== create table using %s type ================" % type) tdSql.execute("drop table if exists stb") diff --git a/tests/pytest/functions/function_elapsed.py b/tests/pytest/functions/function_elapsed.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc54bfc1c7fc173bf9447da1a9b0aa4aba3e525 --- /dev/null +++ b/tests/pytest/functions/function_elapsed.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from functions.function_elapsed_case import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def genTime(self, no): + h = int(no / 60) + hs = "%d" %h + if h < 10: + hs = "0%d" %h + + m = int(no % 60) + ms = "%d" %m + if m < 10: + ms = "0%d" %m + + return hs, ms + + def general(self): + # normal table + tdSql.execute("create database wxy_db minrows 10 maxrows 200") + tdSql.execute("use wxy_db") + tdSql.execute("create table t1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)") + for i in range(1, 1001): + hs, ms = self.genTime(i) + if i < 500: + ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 1)" % (hs, ms, i)) + else: + ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 0)" % (hs, ms, i)) + tdSql.query("select count(*) from t1") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 1000) + + # empty normal table + tdSql.execute("create table t2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)") + + tdSql.execute("create database wxy_db_ns precision \"ns\"") + tdSql.execute("use wxy_db_ns") + tdSql.execute("create table t1 (ts timestamp, f float)") + tdSql.execute("insert into t1 values('2021-11-18 00:00:00.000000100', 1)" + "('2021-11-18 00:00:00.000000200', 2)" + "('2021-11-18 00:00:00.000000300', 3)" + "('2021-11-18 00:00:00.000000500', 4)") + + # super table + tdSql.execute("use wxy_db") + tdSql.execute("create stable st1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + tdSql.execute("create table st1s1 using st1 tags(1)") + tdSql.execute("create table st1s2 using st1 tags(2)") + for i in range(1, 1001): + hs, ms = self.genTime(i) + if 0 == i % 2: + ret = tdSql.execute("insert into st1s1(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i)) + else: + ret = tdSql.execute("insert into st1s2(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i)) + tdSql.query("select count(*) from st1s1") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 500) + tdSql.query("select count(*) from st1s2") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 500) + # empty super table + tdSql.execute("create stable st2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + tdSql.execute("create table st2s1 using st1 tags(1)") + tdSql.execute("create table st2s2 using st1 tags(2)") + + tdSql.execute("create stable st3(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + + def run(self): + tdSql.prepare() + self.general() + ElapsedCase().run() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_elapsed_case.py b/tests/pytest/functions/function_elapsed_case.py new file mode 100644 index 0000000000000000000000000000000000000000..56610a9347c3ab90a9addc64dd62a6ed60758abf --- /dev/null +++ b/tests/pytest/functions/function_elapsed_case.py @@ -0,0 +1,374 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class ElapsedCase: + def __init__(self, restart = False): + self.restart = restart + + def selectTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(1) + + tdSql.query("select elapsed(ts, 1m) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 999) + + tdSql.query("select elapsed(ts), elapsed(ts, 1m), elapsed(ts, 10m) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkEqual(int(tdSql.getData(0, 1)), 999) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 99) + + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), stddev(f), leastsquares(f, 1, 1), " + "min(f), max(f), first(f), last(f), percentile(i, 20), apercentile(i, 30), last_row(i), spread(i) " + "from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(16) + tdSql.checkEqual(int(tdSql.getData(0, 1)), 1000) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) from t1 " + "where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(5) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 0) + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(2) # append tbname + + tdSql.query("select elapsed(ts, 10m) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 99) + tdSql.checkEqual(int(tdSql.getData(1, 0)), 99) + + tdSql.query("select elapsed(ts), elapsed(ts, 10m), elapsed(ts, 100m) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkEqual(int(tdSql.getData(0, 1)), 99) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 9) + # stddev(f), + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), min(f), max(f), first(f), last(f), apercentile(i, 30), last_row(i), spread(i) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(14) # append tbname + tdSql.checkEqual(int(tdSql.getData(0, 1)), 500) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(6) # append tbname + tdSql.checkEqual(int(tdSql.getData(0, 2)), 0) + + tdSql.query("select elapsed(ts), tbname from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(3) # additional append tbname + + tdSql.execute("use wxy_db_ns") + tdSql.query("select elapsed(ts, 1b), elapsed(ts, 1u) from t1") + tdSql.checkRows(1) + tdSql.checkCols(2) + + self.selectIllegalTest() + + # It has little to do with the elapsed function, so just simple test. + def whereTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' and id = 1 group by tbname") + tdSql.checkRows(1) + tdSql.checkCols(2) # append tbname + + # It has little to do with the elapsed function, so just simple test. + def sessionTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' session(ts, 10s)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' session(ts, 70s)") + tdSql.checkRows(1) + + # It has little to do with the elapsed function, so just simple test. + def stateWindowTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' state_window(i)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' state_window(b)") + tdSql.checkRows(2) + + def intervalTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m)") + tdSql.checkRows(1000) + + # The first window has 9 records, and the last window has 1 record. + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m)") + tdSql.checkRows(101) + tdSql.checkEqual(int(tdSql.getData(0, 1)), 9 * 60 * 1000) + tdSql.checkEqual(int(tdSql.getData(100, 1)), 0) + + # Skip windows without data. + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(35s)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), stddev(f), leastsquares(f, 1, 1), " + "min(f), max(f), first(f), last(f), percentile(i, 20), apercentile(i, 30), last_row(i), spread(i) " + "from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(20m)") + tdSql.checkRows(51) # ceil(1000/50) + 1(last point), window is half-open interval. + tdSql.checkCols(17) # front push timestamp + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30m) group by tbname") + tdSql.checkRows(68) # ceil(1000/30) + tdSql.checkCols(7) # front push timestamp and append tbname + + # It has little to do with the elapsed function, so just simple test. + def fillTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(value, 1000)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkEqual(int(tdSql.getData(0, 1)), 1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(prev)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkData(0, 1, None) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(null)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkData(0, 1, None) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(linear)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(next)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + + # Elapsed only support group by tbname. Supported tests have been done in selectTest(). + def groupbyTest(self): + tdSql.execute("use wxy_db") + + tdSql.error("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by i") + tdSql.error("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by i") + + def orderbyCheck(self, sql, elapsedCol): + resultAsc = tdSql.getResult(sql) + resultdesc = tdSql.getResult(sql + " order by ts desc") + resultRows = len(resultAsc) + for i in range(resultRows): + tdSql.checkEqual(resultAsc[i][elapsedCol], resultdesc[resultRows - i - 1][elapsedCol]) + + def splitStableResult(self, sql, elapsedCol, tbnameCol): + subtable = {} + result = tdSql.getResult(sql) + for i in range(len(result)): + if None == subtable.get(result[i][tbnameCol]): + subtable[result[i][tbnameCol]] = [result[i][elapsedCol]] + else: + subtable[result[i][tbnameCol]].append(result[i][elapsedCol]) + return subtable + + def doOrderbyCheck(self, resultAsc, resultdesc): + resultRows = len(resultAsc) + for i in range(resultRows): + tdSql.checkEqual(resultAsc[i], resultdesc[resultRows - i - 1]) + + def orderbyForStableCheck(self, sql, elapsedCol, tbnameCol): + subtableAsc = self.splitStableResult(sql, elapsedCol, tbnameCol) + subtableDesc = self.splitStableResult(sql + " order by ts desc", elapsedCol, tbnameCol) + for kv in subtableAsc.items(): + descValue = subtableDesc.get(kv[0]) + if None == descValue: + tdLog.exit("%s failed: subtable %s not exists" % (sql)) + else: + self.doOrderbyCheck(kv[1], descValue) + + # Orderby clause only changes the output order and has no effect on the calculation results. + def orderbyTest(self): + tdSql.execute("use wxy_db") + + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", 0) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1000m)", 1) + + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", 0, 1) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1000m) group by tbname", 1, 2) + + def slimitCheck(self, sql): + tdSql.checkEqual(tdSql.query(sql + " slimit 0"), 0) + tdSql.checkEqual(tdSql.query(sql + " slimit 1 soffset 0"), tdSql.query(sql + " slimit 0, 1")) + tdSql.checkEqual(tdSql.query(sql + " slimit 1, 1"), tdSql.query(sql) / 2) + tdSql.checkEqual(tdSql.query(sql + " slimit 10"), tdSql.query(sql)) + + # It has little to do with the elapsed function, so just simple test. + def slimitTest(self): + tdSql.execute("use wxy_db") + + self.slimitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + self.slimitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname") + + def limitCheck(self, sql, groupby = 0): + rows = tdSql.query(sql) + if rows > 0: + tdSql.checkEqual(tdSql.query(sql + " limit 0"), 0) + if 1 == groupby: + tdSql.checkEqual(tdSql.query(sql + " limit 1"), 2) + tdSql.checkEqual(tdSql.query(sql + " limit %d offset %d" % (rows / 2, rows / 3)), tdSql.query(sql + " limit %d, %d" % (rows / 3, rows / 2))) + tdSql.checkEqual(tdSql.query(sql + " limit %d" % (rows / 2)), rows) + else: + tdSql.checkEqual(tdSql.query(sql + " limit 1"), 1) + tdSql.checkEqual(tdSql.query(sql + " limit %d offset %d" % (rows / 2, rows / 3)), tdSql.query(sql + " limit %d, %d" % (rows / 3, rows / 2))) + tdSql.checkEqual(tdSql.query(sql + " limit %d" % (rows + 1)), rows) + + # It has little to do with the elapsed function, so just simple test. + def limitTest(self): + tdSql.execute("use wxy_db") + + self.limitCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + self.limitCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)") + + self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", 1) + self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1) + + def fromCheck(self, sqlTemplate, table): + tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table))) + tdSql.query(sqlTemplate % ("(select last(ts) from %s interval(10s))" % table)) + tdSql.query(sqlTemplate % ("(select elapsed(ts) from %s interval(10s))" % table)) + + # It has little to do with the elapsed function, so just simple test. + def fromTest(self): + tdSql.execute("use wxy_db") + + self.fromCheck("select elapsed(ts) from %s where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", "t1") + self.fromCheck("select elapsed(ts) from %s where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", "t1") + tdSql.query("select * from (select elapsed(ts) from t1 interval(10s)) where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.query("select * from (select elapsed(ts) from t1)") + # empty table test + tdSql.checkEqual(tdSql.query("select elapsed(ts) from t2"), 0) + tdSql.checkEqual(tdSql.query("select elapsed(ts) from st2 group by tbname"), 0) + tdSql.checkEqual(tdSql.query("select elapsed(ts) from st3 group by tbname"), 0) + # Tags not allowed for table query, so there is no need to test super table. + tdSql.error("select elapsed(ts) from (select * from st1)") + + def joinCheck(self, sqlTemplate, rtable): + tdSql.checkEqual(tdSql.getResult(sqlTemplate % (rtable, "")), tdSql.getResult(sqlTemplate % ("t1, %s t2" % rtable, "t1.ts = t2.ts and "))) + + # It has little to do with the elapsed function, so just simple test. + def joinTest(self): + tdSql.execute("use wxy_db") + + # st1s1 is a subset of t1. + self.joinCheck("select elapsed(ts) from %s where %s ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", "st1s1") + self.joinCheck("select elapsed(ts) from %s where %s ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)", "st1s1") + # join query does not support group by, so there is no need to test super table. + + def unionAllCheck(self, sql1, sql2): + rows1 = tdSql.query(sql1) + rows2 = tdSql.query(sql2) + tdSql.checkEqual(tdSql.query(sql1 + " union all " + sql2), rows1 + rows2) + + # It has little to do with the elapsed function, so just simple test. + def unionAllTest(self): + tdSql.execute("use wxy_db") + + self.unionAllCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", + "select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-22 01:00:00'") + self.unionAllCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", + "select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)") + self.unionAllCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", + "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-22 02:00:00' group by tbname") + self.unionAllCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m) group by tbname", + "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname") + + # It has little to do with the elapsed function, so just simple test. + def continuousQueryTest(self): + tdSql.execute("use wxy_db") + + if (self.restart): + tdSql.execute("drop table elapsed_t") + tdSql.execute("drop table elapsed_st") + tdSql.execute("create table elapsed_t as select elapsed(ts) from t1 interval(1m) sliding(30s)") + tdSql.execute("create table elapsed_st as select elapsed(ts) from st1 interval(1m) sliding(30s) group by tbname") + + def selectIllegalTest(self): + tdSql.execute("use wxy_db") + tdSql.error("select elapsed(1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed('2021-11-18 00:00:10') from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(now) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(b) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(f) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(d) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(bin) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(s) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(t) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(bl) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(n) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(*) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, '1s') from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + #tdSql.error("select elapsed(ts, now) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts + 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, 1b) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, 1u) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(max(ts)) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select distinct elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select distinct elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.error("select elapsed(ts), i from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), ts from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), _c0 from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), top(i, 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), bottom(i, 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), inerp(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), diff(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), derivative(i, 1s, 0) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), ceil(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), floor(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), round(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + + def run(self): + self.selectTest() + self.whereTest() + self.sessionTest() + self.stateWindowTest() + self.intervalTest() + self.fillTest() + self.groupbyTest() + self.orderbyTest() + self.slimitTest() + self.limitTest() + self.fromTest() + self.joinTest() + self.unionAllTest() + self.continuousQueryTest() diff --git a/tests/pytest/functions/function_elapsed_restart.py b/tests/pytest/functions/function_elapsed_restart.py new file mode 100644 index 0000000000000000000000000000000000000000..8b492267abdd8ea2d2b2fc27ee2e957e1038f48d --- /dev/null +++ b/tests/pytest/functions/function_elapsed_restart.py @@ -0,0 +1,35 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from functions.function_elapsed_case import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + ElapsedCase(True).run() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_floor.py b/tests/pytest/functions/function_floor.py index 305e3b798a74376766a14cd824ded617db3cc8a2..562bbbc4724e9d7bad9bc8807c0cdd57972a157d 100644 --- a/tests/pytest/functions/function_floor.py +++ b/tests/pytest/functions/function_floor.py @@ -1294,21 +1294,21 @@ class TDTestCase: double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( - "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1338,11 +1338,11 @@ class TDTestCase: self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) tdSql.execute( - "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1371,100 +1371,568 @@ class TDTestCase: self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) + + shouldPass = ['select floor(int_col) from super', + 'select floor(int_col) from t1', + 'select floor(bigint_col) from super', + 'select floor(bigint_col) from t1', + 'select floor(float_col) from super', + 'select floor(float_col) from t1', + 'select floor(double_col) from super', + 'select floor(double_col) from t1', + 'select floor(smallint_col) from super', + 'select floor(smallint_col) from t1', + 'select floor(tinyint_col) from super', + 'select floor(tinyint_col) from t1', + 'select floor(uint_col) from super', + 'select floor(uint_col) from t1', + 'select floor(ubigint_col) from super', + 'select floor(ubigint_col) from t1', + 'select floor(usmallint_col) from super', + 'select floor(usmallint_col) from t1', + 'select floor(utinyint_col) from super', + 'select floor(utinyint_col) from t1', + 'select floor(int_col) - floor(int_col) from super', + 'select floor(int_col) - floor(int_col) from t1', + 'select floor(bigint_col) - floor(bigint_col) from super', + 'select floor(bigint_col) - floor(bigint_col) from t1', + 'select floor(float_col) - floor(float_col) from super', + 'select floor(float_col) - floor(float_col) from t1', + 'select floor(double_col) - floor(double_col) from super', + 'select floor(double_col) - floor(double_col) from t1', + 'select floor(smallint_col) - floor(smallint_col) from super', + 'select floor(smallint_col) - floor(smallint_col) from t1', + 'select floor(tinyint_col) - floor(tinyint_col) from super', + 'select floor(tinyint_col) - floor(tinyint_col) from t1', + 'select floor(uint_col) - floor(uint_col) from super', + 'select floor(uint_col) - floor(uint_col) from t1', + 'select floor(ubigint_col) - floor(ubigint_col) from super', + 'select floor(ubigint_col) - floor(ubigint_col) from t1', + 'select floor(usmallint_col) - floor(usmallint_col) from super', + 'select floor(usmallint_col) - floor(usmallint_col) from t1', + 'select floor(utinyint_col) - floor(utinyint_col) from super', + 'select floor(utinyint_col) - floor(utinyint_col) from t1', + 'select floor(int_col) / floor(int_col) from super', + 'select floor(int_col) / floor(int_col) from t1', + 'select floor(bigint_col) / floor(bigint_col) from super', + 'select floor(bigint_col) / floor(bigint_col) from t1', + 'select floor(float_col) / floor(float_col) from super', + 'select floor(float_col) / floor(float_col) from t1', + 'select floor(double_col) / floor(double_col) from super', + 'select floor(double_col) / floor(double_col) from t1', + 'select floor(smallint_col) / floor(smallint_col) from super', + 'select floor(smallint_col) / floor(smallint_col) from t1', + 'select floor(tinyint_col) / floor(tinyint_col) from super', + 'select floor(tinyint_col) / floor(tinyint_col) from t1', + 'select floor(uint_col) / floor(uint_col) from super', + 'select floor(uint_col) / floor(uint_col) from t1', + 'select floor(ubigint_col) / floor(ubigint_col) from super', + 'select floor(ubigint_col) / floor(ubigint_col) from t1', + 'select floor(usmallint_col) / floor(usmallint_col) from super', + 'select floor(usmallint_col) / floor(usmallint_col) from t1', + 'select floor(utinyint_col) / floor(utinyint_col) from super', + 'select floor(utinyint_col) / floor(utinyint_col) from t1', + 'select floor(int_col) * floor(int_col) from super', + 'select floor(int_col) * floor(int_col) from t1', + 'select floor(bigint_col) * floor(bigint_col) from super', + 'select floor(bigint_col) * floor(bigint_col) from t1', + 'select floor(float_col) * floor(float_col) from super', + 'select floor(float_col) * floor(float_col) from t1', + 'select floor(double_col) * floor(double_col) from super', + 'select floor(double_col) * floor(double_col) from t1', + 'select floor(smallint_col) * floor(smallint_col) from super', + 'select floor(smallint_col) * floor(smallint_col) from t1', + 'select floor(tinyint_col) * floor(tinyint_col) from super', + 'select floor(tinyint_col) * floor(tinyint_col) from t1', + 'select floor(uint_col) * floor(uint_col) from super', + 'select floor(uint_col) * floor(uint_col) from t1', + 'select floor(ubigint_col) * floor(ubigint_col) from super', + 'select floor(ubigint_col) * floor(ubigint_col) from t1', + 'select floor(usmallint_col) * floor(usmallint_col) from super', + 'select floor(usmallint_col) * floor(usmallint_col) from t1', + 'select floor(utinyint_col) * floor(utinyint_col) from super', + 'select floor(utinyint_col) * floor(utinyint_col) from t1', + 'select floor(count(ts)) from super', + 'select floor(count(ts)) from t1', + 'select floor(count(timestamp_col)) from super', + 'select floor(count(timestamp_col)) from t1', + 'select floor(count(int_col)) from super', + 'select floor(count(int_col)) from t1', + 'select floor(count(bigint_col)) from super', + 'select floor(count(bigint_col)) from t1', + 'select floor(count(float_col)) from super', + 'select floor(count(float_col)) from t1', + 'select floor(count(double_col)) from super', + 'select floor(count(double_col)) from t1', + 'select floor(count(binary_col)) from super', + 'select floor(count(binary_col)) from t1', + 'select floor(count(smallint_col)) from super', + 'select floor(count(smallint_col)) from t1', + 'select floor(count(tinyint_col)) from super', + 'select floor(count(tinyint_col)) from t1', + 'select floor(count(bool_col)) from super', + 'select floor(count(bool_col)) from t1', + 'select floor(count(nchar_col)) from super', + 'select floor(count(nchar_col)) from t1', + 'select floor(count(uint_col)) from super', + 'select floor(count(uint_col)) from t1', + 'select floor(count(ubigint_col)) from super', + 'select floor(count(ubigint_col)) from t1', + 'select floor(count(usmallint_col)) from super', + 'select floor(count(usmallint_col)) from t1', + 'select floor(count(utinyint_col)) from super', + 'select floor(count(utinyint_col)) from t1', + 'select floor(count(timestamp_tag)) from super', + 'select floor(count(timestamp_tag)) from t1', + 'select floor(count(int_tag)) from super', + 'select floor(count(int_tag)) from t1', + 'select floor(count(bigint_tag)) from super', + 'select floor(count(bigint_tag)) from t1', + 'select floor(count(float_tag)) from super', + 'select floor(count(float_tag)) from t1', + 'select floor(count(double_tag)) from super', + 'select floor(count(double_tag)) from t1', + 'select floor(count(binary_tag)) from super', + 'select floor(count(binary_tag)) from t1', + 'select floor(count(smallint_tag)) from super', + 'select floor(count(smallint_tag)) from t1', + 'select floor(count(tinyint_tag)) from super', + 'select floor(count(tinyint_tag)) from t1', + 'select floor(count(bool_tag)) from super', + 'select floor(count(bool_tag)) from t1', + 'select floor(count(nchar_tag)) from super', + 'select floor(count(nchar_tag)) from t1', + 'select floor(count(uint_tag)) from super', + 'select floor(count(uint_tag)) from t1', + 'select floor(count(ubigint_tag)) from super', + 'select floor(count(ubigint_tag)) from t1', + 'select floor(count(usmallint_tag)) from super', + 'select floor(count(usmallint_tag)) from t1', + 'select floor(count(utinyint_tag)) from super', + 'select floor(count(utinyint_tag)) from t1', + 'select floor(avg(int_col)) from super', + 'select floor(avg(int_col)) from t1', + 'select floor(avg(bigint_col)) from super', + 'select floor(avg(bigint_col)) from t1', + 'select floor(avg(float_col)) from super', + 'select floor(avg(float_col)) from t1', + 'select floor(avg(double_col)) from super', + 'select floor(avg(double_col)) from t1', + 'select floor(avg(smallint_col)) from super', + 'select floor(avg(smallint_col)) from t1', + 'select floor(avg(tinyint_col)) from super', + 'select floor(avg(tinyint_col)) from t1', + 'select floor(avg(uint_col)) from super', + 'select floor(avg(uint_col)) from t1', + 'select floor(avg(ubigint_col)) from super', + 'select floor(avg(ubigint_col)) from t1', + 'select floor(avg(usmallint_col)) from super', + 'select floor(avg(usmallint_col)) from t1', + 'select floor(avg(utinyint_col)) from super', + 'select floor(avg(utinyint_col)) from t1', + 'select floor(twa(int_col)) from t1', + 'select floor(twa(bigint_col)) from t1', + 'select floor(twa(float_col)) from t1', + 'select floor(twa(double_col)) from t1', + 'select floor(twa(smallint_col)) from t1', + 'select floor(twa(tinyint_col)) from t1', + 'select floor(twa(uint_col)) from t1', + 'select floor(twa(ubigint_col)) from t1', + 'select floor(twa(usmallint_col)) from t1', + 'select floor(twa(utinyint_col)) from t1', + 'select floor(sum(int_col)) from super', + 'select floor(sum(int_col)) from t1', + 'select floor(sum(bigint_col)) from super', + 'select floor(sum(bigint_col)) from t1', + 'select floor(sum(float_col)) from super', + 'select floor(sum(float_col)) from t1', + 'select floor(sum(double_col)) from super', + 'select floor(sum(double_col)) from t1', + 'select floor(sum(smallint_col)) from super', + 'select floor(sum(smallint_col)) from t1', + 'select floor(sum(tinyint_col)) from super', + 'select floor(sum(tinyint_col)) from t1', + 'select floor(sum(uint_col)) from super', + 'select floor(sum(uint_col)) from t1', + 'select floor(sum(ubigint_col)) from super', + 'select floor(sum(ubigint_col)) from t1', + 'select floor(sum(usmallint_col)) from super', + 'select floor(sum(usmallint_col)) from t1', + 'select floor(sum(utinyint_col)) from super', + 'select floor(sum(utinyint_col)) from t1', + 'select floor(stddev(int_col)) from super', + 'select floor(stddev(int_col)) from t1', + 'select floor(stddev(bigint_col)) from super', + 'select floor(stddev(bigint_col)) from t1', + 'select floor(stddev(float_col)) from super', + 'select floor(stddev(float_col)) from t1', + 'select floor(stddev(double_col)) from super', + 'select floor(stddev(double_col)) from t1', + 'select floor(stddev(smallint_col)) from super', + 'select floor(stddev(smallint_col)) from t1', + 'select floor(stddev(tinyint_col)) from super', + 'select floor(stddev(tinyint_col)) from t1', + 'select floor(stddev(uint_col)) from super', + 'select floor(stddev(uint_col)) from t1', + 'select floor(stddev(ubigint_col)) from super', + 'select floor(stddev(ubigint_col)) from t1', + 'select floor(stddev(usmallint_col)) from super', + 'select floor(stddev(usmallint_col)) from t1', + 'select floor(stddev(utinyint_col)) from super', + 'select floor(stddev(utinyint_col)) from t1', + 'select floor(irate(int_col)) from t1', + 'select floor(irate(bigint_col)) from t1', + 'select floor(irate(float_col)) from t1', + 'select floor(irate(double_col)) from t1', + 'select floor(irate(smallint_col)) from t1', + 'select floor(irate(tinyint_col)) from t1', + 'select floor(irate(uint_col)) from t1', + 'select floor(irate(ubigint_col)) from t1', + 'select floor(irate(usmallint_col)) from t1', + 'select floor(irate(utinyint_col)) from t1', + 'select floor(min(int_col)) from super', + 'select floor(min(int_col)) from t1', + 'select floor(min(bigint_col)) from super', + 'select floor(min(bigint_col)) from t1', + 'select floor(min(float_col)) from super', + 'select floor(min(float_col)) from t1', + 'select floor(min(double_col)) from super', + 'select floor(min(double_col)) from t1', + 'select floor(min(smallint_col)) from super', + 'select floor(min(smallint_col)) from t1', + 'select floor(min(tinyint_col)) from super', + 'select floor(min(tinyint_col)) from t1', + 'select floor(min(uint_col)) from super', + 'select floor(min(uint_col)) from t1', + 'select floor(min(ubigint_col)) from super', + 'select floor(min(ubigint_col)) from t1', + 'select floor(min(usmallint_col)) from super', + 'select floor(min(usmallint_col)) from t1', + 'select floor(min(utinyint_col)) from super', + 'select floor(min(utinyint_col)) from t1', + 'select floor(max(int_col)) from super', + 'select floor(max(int_col)) from t1', + 'select floor(max(bigint_col)) from super', + 'select floor(max(bigint_col)) from t1', + 'select floor(max(float_col)) from super', + 'select floor(max(float_col)) from t1', + 'select floor(max(double_col)) from super', + 'select floor(max(double_col)) from t1', + 'select floor(max(smallint_col)) from super', + 'select floor(max(smallint_col)) from t1', + 'select floor(max(tinyint_col)) from super', + 'select floor(max(tinyint_col)) from t1', + 'select floor(max(uint_col)) from super', + 'select floor(max(uint_col)) from t1', + 'select floor(max(ubigint_col)) from super', + 'select floor(max(ubigint_col)) from t1', + 'select floor(max(usmallint_col)) from super', + 'select floor(max(usmallint_col)) from t1', + 'select floor(max(utinyint_col)) from super', + 'select floor(max(utinyint_col)) from t1', + 'select floor(first(int_col)) from super', + 'select floor(first(int_col)) from t1', + 'select floor(first(bigint_col)) from super', + 'select floor(first(bigint_col)) from t1', + 'select floor(first(float_col)) from super', + 'select floor(first(float_col)) from t1', + 'select floor(first(double_col)) from super', + 'select floor(first(double_col)) from t1', + 'select floor(first(smallint_col)) from super', + 'select floor(first(smallint_col)) from t1', + 'select floor(first(tinyint_col)) from super', + 'select floor(first(tinyint_col)) from t1', + 'select floor(first(uint_col)) from super', + 'select floor(first(uint_col)) from t1', + 'select floor(first(ubigint_col)) from super', + 'select floor(first(ubigint_col)) from t1', + 'select floor(first(usmallint_col)) from super', + 'select floor(first(usmallint_col)) from t1', + 'select floor(first(utinyint_col)) from super', + 'select floor(first(utinyint_col)) from t1', + 'select floor(last(int_col)) from super', + 'select floor(last(int_col)) from t1', + 'select floor(last(bigint_col)) from super', + 'select floor(last(bigint_col)) from t1', + 'select floor(last(float_col)) from super', + 'select floor(last(float_col)) from t1', + 'select floor(last(double_col)) from super', + 'select floor(last(double_col)) from t1', + 'select floor(last(smallint_col)) from super', + 'select floor(last(smallint_col)) from t1', + 'select floor(last(tinyint_col)) from super', + 'select floor(last(tinyint_col)) from t1', + 'select floor(last(uint_col)) from super', + 'select floor(last(uint_col)) from t1', + 'select floor(last(ubigint_col)) from super', + 'select floor(last(ubigint_col)) from t1', + 'select floor(last(usmallint_col)) from super', + 'select floor(last(usmallint_col)) from t1', + 'select floor(last(utinyint_col)) from super', + 'select floor(last(utinyint_col)) from t1', + 'select floor(percentile(int_col, 1)) from t1', + 'select floor(percentile(bigint_col, 1)) from t1', + 'select floor(percentile(float_col, 1)) from t1', + 'select floor(percentile(double_col, 1)) from t1', + 'select floor(percentile(smallint_col, 1)) from t1', + 'select floor(percentile(tinyint_col, 1)) from t1', + 'select floor(percentile(uint_col, 1)) from t1', + 'select floor(percentile(ubigint_col, 1)) from t1', + 'select floor(percentile(usmallint_col, 1)) from t1', + 'select floor(percentile(utinyint_col, 1)) from t1', + 'select floor(apercentile(int_col, 1)) from super', + 'select floor(apercentile(int_col, 1)) from t1', + 'select floor(apercentile(bigint_col, 1)) from super', + 'select floor(apercentile(bigint_col, 1)) from t1', + 'select floor(apercentile(float_col, 1)) from super', + 'select floor(apercentile(float_col, 1)) from t1', + 'select floor(apercentile(double_col, 1)) from super', + 'select floor(apercentile(double_col, 1)) from t1', + 'select floor(apercentile(smallint_col, 1)) from super', + 'select floor(apercentile(smallint_col, 1)) from t1', + 'select floor(apercentile(tinyint_col, 1)) from super', + 'select floor(apercentile(tinyint_col, 1)) from t1', + 'select floor(apercentile(uint_col, 1)) from super', + 'select floor(apercentile(uint_col, 1)) from t1', + 'select floor(apercentile(ubigint_col, 1)) from super', + 'select floor(apercentile(ubigint_col, 1)) from t1', + 'select floor(apercentile(usmallint_col, 1)) from super', + 'select floor(apercentile(usmallint_col, 1)) from t1', + 'select floor(apercentile(utinyint_col, 1)) from super', + 'select floor(apercentile(utinyint_col, 1)) from t1', + 'select floor(last_row(int_col)) from super', + 'select floor(last_row(int_col)) from t1', + 'select floor(last_row(bigint_col)) from super', + 'select floor(last_row(bigint_col)) from t1', + 'select floor(last_row(float_col)) from super', + 'select floor(last_row(float_col)) from t1', + 'select floor(last_row(double_col)) from super', + 'select floor(last_row(double_col)) from t1', + 'select floor(last_row(smallint_col)) from super', + 'select floor(last_row(smallint_col)) from t1', + 'select floor(last_row(tinyint_col)) from super', + 'select floor(last_row(tinyint_col)) from t1', + 'select floor(last_row(uint_col)) from super', + 'select floor(last_row(uint_col)) from t1', + 'select floor(last_row(ubigint_col)) from super', + 'select floor(last_row(ubigint_col)) from t1', + 'select floor(last_row(usmallint_col)) from super', + 'select floor(last_row(usmallint_col)) from t1', + 'select floor(last_row(utinyint_col)) from super', + 'select floor(last_row(utinyint_col)) from t1', + 'select floor(interp(int_col)) from t1', + 'select floor(interp(bigint_col)) from t1', + 'select floor(interp(float_col)) from t1', + 'select floor(interp(double_col)) from t1', + 'select floor(interp(smallint_col)) from t1', + 'select floor(interp(tinyint_col)) from t1', + 'select floor(interp(uint_col)) from t1', + 'select floor(interp(ubigint_col)) from t1', + 'select floor(interp(usmallint_col)) from t1', + 'select floor(interp(utinyint_col)) from t1', + 'select floor(spread(ts)) from super', + 'select floor(spread(ts)) from t1', + 'select floor(spread(timestamp_col)) from super', + 'select floor(spread(timestamp_col)) from t1', + 'select floor(spread(int_col)) from super', + 'select floor(spread(int_col)) from t1', + 'select floor(spread(bigint_col)) from super', + 'select floor(spread(bigint_col)) from t1', + 'select floor(spread(float_col)) from super', + 'select floor(spread(float_col)) from t1', + 'select floor(spread(double_col)) from super', + 'select floor(spread(double_col)) from t1', + 'select floor(spread(smallint_col)) from super', + 'select floor(spread(smallint_col)) from t1', + 'select floor(spread(tinyint_col)) from super', + 'select floor(spread(tinyint_col)) from t1', + 'select floor(spread(uint_col)) from super', + 'select floor(spread(uint_col)) from t1', + 'select floor(spread(ubigint_col)) from super', + 'select floor(spread(ubigint_col)) from t1', + 'select floor(spread(usmallint_col)) from super', + 'select floor(spread(usmallint_col)) from t1', + 'select floor(spread(utinyint_col)) from super', + 'select floor(spread(utinyint_col)) from t1', + 'select floor(int_col + int_col) from super', + 'select floor(int_col + int_col) from t1', + 'select floor(bigint_col + bigint_col) from super', + 'select floor(bigint_col + bigint_col) from t1', + 'select floor(float_col + float_col) from super', + 'select floor(float_col + float_col) from t1', + 'select floor(double_col + double_col) from super', + 'select floor(double_col + double_col) from t1', + 'select floor(smallint_col + smallint_col) from super', + 'select floor(smallint_col + smallint_col) from t1', + 'select floor(tinyint_col + tinyint_col) from super', + 'select floor(tinyint_col + tinyint_col) from t1', + 'select floor(uint_col + uint_col) from super', + 'select floor(uint_col + uint_col) from t1', + 'select floor(ubigint_col + ubigint_col) from super', + 'select floor(ubigint_col + ubigint_col) from t1', + 'select floor(usmallint_col + usmallint_col) from super', + 'select floor(usmallint_col + usmallint_col) from t1', + 'select floor(utinyint_col + utinyint_col) from super', + 'select floor(utinyint_col + utinyint_col) from t1', + 'select floor(int_col - int_col) from super', + 'select floor(int_col - int_col) from t1', + 'select floor(bigint_col - bigint_col) from super', + 'select floor(bigint_col - bigint_col) from t1', + 'select floor(float_col - float_col) from super', + 'select floor(float_col - float_col) from t1', + 'select floor(double_col - double_col) from super', + 'select floor(double_col - double_col) from t1', + 'select floor(smallint_col - smallint_col) from super', + 'select floor(smallint_col - smallint_col) from t1', + 'select floor(tinyint_col - tinyint_col) from super', + 'select floor(tinyint_col - tinyint_col) from t1', + 'select floor(uint_col - uint_col) from super', + 'select floor(uint_col - uint_col) from t1', + 'select floor(ubigint_col - ubigint_col) from super', + 'select floor(ubigint_col - ubigint_col) from t1', + 'select floor(usmallint_col - usmallint_col) from super', + 'select floor(usmallint_col - usmallint_col) from t1', + 'select floor(utinyint_col - utinyint_col) from super', + 'select floor(utinyint_col - utinyint_col) from t1', + 'select floor(int_col * int_col) from super', + 'select floor(int_col * int_col) from t1', + 'select floor(bigint_col * bigint_col) from super', + 'select floor(bigint_col * bigint_col) from t1', + 'select floor(float_col * float_col) from super', + 'select floor(float_col * float_col) from t1', + 'select floor(double_col * double_col) from super', + 'select floor(double_col * double_col) from t1', + 'select floor(smallint_col * smallint_col) from super', + 'select floor(smallint_col * smallint_col) from t1', + 'select floor(tinyint_col * tinyint_col) from super', + 'select floor(tinyint_col * tinyint_col) from t1', + 'select floor(uint_col * uint_col) from super', + 'select floor(uint_col * uint_col) from t1', + 'select floor(ubigint_col * ubigint_col) from super', + 'select floor(ubigint_col * ubigint_col) from t1', + 'select floor(usmallint_col * usmallint_col) from super', + 'select floor(usmallint_col * usmallint_col) from t1', + 'select floor(utinyint_col * utinyint_col) from super', + 'select floor(utinyint_col * utinyint_col) from t1', + 'select floor(int_col / int_col) from super', + 'select floor(int_col / int_col) from t1', + 'select floor(bigint_col / bigint_col) from super', + 'select floor(bigint_col / bigint_col) from t1', + 'select floor(float_col / float_col) from super', + 'select floor(float_col / float_col) from t1', + 'select floor(double_col / double_col) from super', + 'select floor(double_col / double_col) from t1', + 'select floor(smallint_col / smallint_col) from super', + 'select floor(smallint_col / smallint_col) from t1', + 'select floor(tinyint_col / tinyint_col) from super', + 'select floor(tinyint_col / tinyint_col) from t1', + 'select floor(uint_col / uint_col) from super', + 'select floor(uint_col / uint_col) from t1', + 'select floor(ubigint_col / ubigint_col) from super', + 'select floor(ubigint_col / ubigint_col) from t1', + 'select floor(usmallint_col / usmallint_col) from super', + 'select floor(usmallint_col / usmallint_col) from t1', + 'select floor(utinyint_col / utinyint_col) from super', + 'select floor(utinyint_col / utinyint_col) from t1', + 'select int_col, floor(int_col), int_col from super', + 'select int_col, floor(int_col), int_col from t1', + 'select bigint_col, floor(bigint_col), bigint_col from super', + 'select bigint_col, floor(bigint_col), bigint_col from t1', + 'select float_col, floor(float_col), float_col from super', + 'select float_col, floor(float_col), float_col from t1', + 'select double_col, floor(double_col), double_col from super', + 'select double_col, floor(double_col), double_col from t1', + 'select smallint_col, floor(smallint_col), smallint_col from super', + 'select smallint_col, floor(smallint_col), smallint_col from t1', + 'select tinyint_col, floor(tinyint_col), tinyint_col from super', + 'select tinyint_col, floor(tinyint_col), tinyint_col from t1', + 'select uint_col, floor(uint_col), uint_col from super', + 'select uint_col, floor(uint_col), uint_col from t1', + 'select ubigint_col, floor(ubigint_col), ubigint_col from super', + 'select ubigint_col, floor(ubigint_col), ubigint_col from t1', + 'select usmallint_col, floor(usmallint_col), usmallint_col from super', + 'select usmallint_col, floor(usmallint_col), usmallint_col from t1', + 'select utinyint_col, floor(utinyint_col), utinyint_col from super', + 'select utinyint_col, floor(utinyint_col), utinyint_col from t1', + 'select 1, floor(int_col), 1 from super', + 'select 1, floor(int_col), 1 from t1', + 'select 1, floor(bigint_col), 1 from super', + 'select 1, floor(bigint_col), 1 from t1', + 'select 1, floor(float_col), 1 from super', + 'select 1, floor(float_col), 1 from t1', + 'select 1, floor(double_col), 1 from super', + 'select 1, floor(double_col), 1 from t1', + 'select 1, floor(smallint_col), 1 from super', + 'select 1, floor(smallint_col), 1 from t1', + 'select 1, floor(tinyint_col), 1 from super', + 'select 1, floor(tinyint_col), 1 from t1', + 'select 1, floor(uint_col), 1 from super', + 'select 1, floor(uint_col), 1 from t1', + 'select 1, floor(ubigint_col), 1 from super', + 'select 1, floor(ubigint_col), 1 from t1', + 'select 1, floor(usmallint_col), 1 from super', + 'select 1, floor(usmallint_col), 1 from t1', + 'select 1, floor(utinyint_col), 1 from super', + 'select 1, floor(utinyint_col), 1 from t1', + 'select floor(int_col) as anyName from super', + 'select floor(int_col) as anyName from t1', + 'select floor(bigint_col) as anyName from super', + 'select floor(bigint_col) as anyName from t1', + 'select floor(float_col) as anyName from super', + 'select floor(float_col) as anyName from t1', + 'select floor(double_col) as anyName from super', + 'select floor(double_col) as anyName from t1', + 'select floor(smallint_col) as anyName from super', + 'select floor(smallint_col) as anyName from t1', + 'select floor(tinyint_col) as anyName from super', + 'select floor(tinyint_col) as anyName from t1', + 'select floor(uint_col) as anyName from super', + 'select floor(uint_col) as anyName from t1', + 'select floor(ubigint_col) as anyName from super', + 'select floor(ubigint_col) as anyName from t1', + 'select floor(usmallint_col) as anyName from super', + 'select floor(usmallint_col) as anyName from t1', + 'select floor(utinyint_col) as anyName from super', + 'select floor(utinyint_col) as anyName from t1' + ] + + + shouldPass2 = ['select floor(super.int_col) from super', + 'select floor(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.bigint_col) from super', + 'select floor(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.float_col) from super', + 'select floor(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.double_col) from super', + 'select floor(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.smallint_col) from super', + 'select floor(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.tinyint_col) from super', + 'select floor(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.uint_col) from super', + 'select floor(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.ubigint_col) from super', + 'select floor(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.usmallint_col) from super', + 'select floor(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(super.utinyint_col) from super', + 'select floor(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select floor(t1.int_col) from t1', + 'select floor(t1.bigint_col) from t1', + 'select floor(t1.float_col) from t1', + 'select floor(t1.double_col) from t1', + 'select floor(t1.smallint_col) from t1', + 'select floor(t1.tinyint_col) from t1', + 'select floor(t1.uint_col) from t1', + 'select floor(t1.ubigint_col) from t1', + 'select floor(t1.usmallint_col) from t1', + 'select floor(t1.utinyint_col) from t1'] + for s in range(len(select_command)): for f in range(len(from_command)): sql = "select " + select_command[s] + from_command[f] - if (select_command[s] == "floor(int_col)"\ - or select_command[s] == "floor(bigint_col)"\ - or select_command[s] == "floor(smallint_col)" \ - or select_command[s] == "floor(float_col)"\ - or select_command[s] == "floor(double_col)"\ - or select_command[s] == "floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col)"\ - or select_command[s] == "1, floor(int_col), 1"\ - or select_command[s] == "1, floor(bigint_col), 1"\ - or select_command[s] == "1, floor(float_col), 1"\ - or select_command[s] == "1, floor(double_col), 1"\ - or select_command[s] == "1, floor(smallint_col), 1"\ - or select_command[s] == "1, floor(tinyint_col), 1"\ - or select_command[s] == "1, floor(uint_col), 1"\ - or select_command[s] == "1, floor(ubigint_col), 1"\ - or select_command[s] == "1, floor(usmallint_col), 1"\ - or select_command[s] == "1, floor(utinyint_col), 1"\ - or select_command[s] == "int_col, floor(int_col), int_col"\ - or select_command[s] == "bigint_col, floor(bigint_col), bigint_col"\ - or select_command[s] == "float_col, floor(float_col), float_col"\ - or select_command[s] == "double_col, floor(double_col), double_col"\ - or select_command[s] == "smallint_col, floor(smallint_col), smallint_col"\ - or select_command[s] == "tinyint_col, floor(tinyint_col), tinyint_col"\ - or select_command[s] == "uint_col, floor(uint_col), uint_col"\ - or select_command[s] == "ubigint_col, floor(ubigint_col), ubigint_col"\ - or select_command[s] == "usmallint_col, floor(usmallint_col), usmallint_col"\ - or select_command[s] == "utinyint_col, floor(utinyint_col), utinyint_col"\ - or select_command[s] == "floor(int_col) as anyName"\ - or select_command[s] == "floor(bigint_col) as anyName"\ - or select_command[s] == "floor(float_col) as anyName"\ - or select_command[s] == "floor(double_col) as anyName"\ - or select_command[s] == "floor(smallint_col) as anyName"\ - or select_command[s] == "floor(tinyint_col) as anyName"\ - or select_command[s] == "floor(uint_col) as anyName"\ - or select_command[s] == "floor(ubigint_col) as anyName"\ - or select_command[s] == "floor(usmallint_col) as anyName"\ - or select_command[s] == "floor(utinyint_col) as anyName"\ - or select_command[s] == "floor(int_col) + floor(int_col)"\ - or select_command[s] == "floor(bigint_col) + floor(bigint_col)"\ - or select_command[s] == "floor(float_col) + floor(float_col)"\ - or select_command[s] == "floor(double_col) + floor(double_col)"\ - or select_command[s] == "floor(smallint_col) + floor(smallint_col)"\ - or select_command[s] == "floor(tinyint_col) + floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col) + floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col) + floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col) + floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col) + floor(utinyint_col)"\ - or select_command[s] == "floor(int_col) + floor(int_col)"\ - or select_command[s] == "floor(bigint_col) + floor(bigint_col)"\ - or select_command[s] == "floor(float_col) + floor(float_col)"\ - or select_command[s] == "floor(double_col) + floor(double_col)"\ - or select_command[s] == "floor(smallint_col) + floor(smallint_col)"\ - or select_command[s] == "floor(tinyint_col) + floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col) + floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col) + floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col) + floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col) + cei(utinyint_col)"\ - or select_command[s] == "floor(int_col) - floor(int_col)"\ - or select_command[s] == "floor(bigint_col) - floor(bigint_col)"\ - or select_command[s] == "floor(float_col) - floor(float_col)"\ - or select_command[s] == "floor(double_col) - floor(double_col)"\ - or select_command[s] == "floor(smallint_col) - floor(smallint_col)"\ - or select_command[s] == "floor(tinyint_col) - floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col) - floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col) - floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col) - floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col) - floor(utinyint_col)"\ - or select_command[s] == "floor(int_col) * floor(int_col)"\ - or select_command[s] == "floor(bigint_col) * floor(bigint_col)"\ - or select_command[s] == "floor(float_col) * floor(float_col)"\ - or select_command[s] == "floor(double_col) * floor(double_col)"\ - or select_command[s] == "floor(smallint_col) * floor(smallint_col)"\ - or select_command[s] == "floor(tinyint_col) * floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col) * floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col) * floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col) * floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col) * floor(utinyint_col)"\ - or select_command[s] == "floor(int_col) / floor(int_col)"\ - or select_command[s] == "floor(bigint_col) / floor(bigint_col)"\ - or select_command[s] == "floor(float_col) / floor(float_col)"\ - or select_command[s] == "floor(double_col) / floor(double_col)"\ - or select_command[s] == "floor(smallint_col) / floor(smallint_col)"\ - or select_command[s] == "floor(tinyint_col) / floor(tinyint_col)"\ - or select_command[s] == "floor(uint_col) / floor(uint_col)"\ - or select_command[s] == "floor(ubigint_col) / floor(ubigint_col)"\ - or select_command[s] == "floor(usmallint_col) / floor(usmallint_col)"\ - or select_command[s] == "floor(utinyint_col) / floor(utinyint_col)"): + if sql in shouldPass: tdSql.query(sql) else: tdSql.error(sql) @@ -1475,40 +1943,10 @@ class TDTestCase: sql = "select " + simple_select_command[ sim] + advance_from_command[fr] + filter_command[ filter] + fill_command[fill] - if sql == "select floor(t1.int_col) from t1"\ - or sql == "select floor(super.int_col) from super"\ - or sql == "select floor(t1.bigint_col) from t1"\ - or sql == "select floor(super.bigint_col) from super"\ - or sql == "select floor(t1.smallint_col) from t1"\ - or sql == "select floor(super.smallint_col) from super"\ - or sql == "select floor(t1.tinyint_col) from t1"\ - or sql == "select floor(super.tinyint_col) from super"\ - or sql == "select floor(t1.float_col) from t1"\ - or sql == "select floor(super.float_col) from super"\ - or sql == "select floor(t1.double_col) from t1"\ - or sql == "select floor(super.double_col) from super"\ - or sql == "select floor(t1.uint_col) from t1"\ - or sql == "select floor(super.uint_col) from super"\ - or sql == "select floor(t1.ubigint_col) from t1"\ - or sql == "select floor(super.ubigint_col) from super"\ - or sql == "select floor(t1.usmallint_col) from t1"\ - or sql == "select floor(super.usmallint_col) from super"\ - or sql == "select floor(t1.utinyint_col) from t1"\ - or sql == "select floor(super.utinyint_col) from super"\ - or sql == "select floor(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select floor(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + if sql in shouldPass2: tdSql.query(sql) else: - tdSql.error(sql) - + tdSql.error(sql) def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index b3cf42fd275c07745109cf37eb9e1712b67ba7a9..e45983427d64a801fc866058a6468414d62b81c5 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -8360,6 +8360,45 @@ class TDTestCase: sql += "where ts >= '%s' AND ts <= '%s' range('%s' , '%s') EVERY(1s) FILL(NULL);" % (self.ts , self.ts + 10000 , self.ts + 150000 , self.ts + 200000) datacheck = tdSql.error(sql) + # TD-10736 Exception use case for coredump + + tdSql.execute("create database db_except;") + tdSql.execute("use db_except;") + tdSql.execute("create table tb (ts timestamp, c1 int);") + tdSql.execute("insert into tb values ('2021-10-01 08:00:00.000' ,1);") + tdSql.execute("insert into tb values ('2021-10-01 08:00:01.000' ,2);") + tdSql.execute("insert into tb values ('2021-10-01 08:00:02.000' ,3);") + tdSql.execute("insert into tb values ('2021-10-01 08:00:03.000' ,4);") + + tdSql.execute("create stable stb (ts timestamp, c1 int) tags (id int);") + tdSql.execute("insert into sub_1 using stb tags (1) values ('2021-10-01 08:00:00.000' ,1);") + tdSql.execute("insert into sub_1 using stb tags (1) values ('2021-10-01 08:00:01.000' ,2);") + + tdSql.execute("insert into sub_2 using stb tags (1) values ('2021-10-01 08:00:00.000' ,3);") + tdSql.execute("insert into sub_2 using stb tags (1) values ('2021-10-01 08:00:01.000' ,4);") + + tdSql.execute("insert into sub_3 using stb tags (1) values ('2021-10-01 08:00:01.000' ,1);") + tdSql.execute("insert into sub_3 using stb tags (1) values ('2021-10-01 08:00:02.000' ,2);") + tdSql.execute("insert into sub_3 using stb tags (1) values ('2021-10-01 08:00:03.000' ,3);") + tdSql.execute("insert into sub_3 using stb tags (1) values ('2021-10-01 08:00:04.000' ,4);") + + tdSql.query("select interp(c1) from tb where ts = '2021-10-01 08:00:00.000' every(1s); ") + tdSql.checkData(0,1,1) + tdSql.query("select interp(c1) from tb where ts = '2021-10-01 08:00:98.000' every(1s); ") + tdSql.checkRows(0) + + tdSql.query("select interp(c1) from sub_1 where ts = '2021-10-01 08:00:00.000' every(1s); ") + tdSql.checkData(0,1,1) + tdSql.query("select interp(c1) from sub_1 where ts = '2021-10-01 08:00:98.000' every(1s); ") + tdSql.checkRows(0) + + tdSql.error("select interp(c1) from stb where ts = '2021-10-01 08:00:00.000' every(1s); ") + tdSql.error("select interp(c1) from stb where ts = '2021-10-01 08:00:98.000' every(1s); ") + tdSql.query("select interp(c1) from stb where ts = '2021-10-01 08:00:00.000' every(1s) group by tbname; ") + tdSql.checkRows(2) + tdSql.query("select interp(c1) from stb where ts = '2021-10-01 08:00:98.000' every(1s) group by tbname; ") + tdSql.checkRows(0) + # Nested Query + where + range + FILL(NULL) + EVERY( s)(3) # sql = "select * from (select %s from stable_1 where ts BETWEEN '%s' AND '%s' range('%s' , '%s') EVERY(1s) FILL(NULL) group by tbname) z1," % (interp_select , self.ts , self.ts + 10000 , self.ts - 10000 , self.ts + 100000) # sql += "(select %s from stable_2 where ts BETWEEN '%s' AND '%s' range('%s' , '%s') EVERY(1s) FILL(NULL) group by tbname) z2 where z1.ts=z2.ts ;" % (interp_select , self.ts , self.ts + 10000 , self.ts - 10000 , self.ts + 100000) diff --git a/tests/pytest/functions/function_mavg.py b/tests/pytest/functions/function_mavg.py new file mode 100644 index 0000000000000000000000000000000000000000..0760b203b809ae1a1a05b061ed6f2fbc8659b740 --- /dev/null +++ b/tests/pytest/functions/function_mavg.py @@ -0,0 +1,676 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + mavg function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "mavg(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: mavg query statement,default: select mavg(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ", "") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + # if all(["," in col , len(col.split(",")) != 2]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if ("," in col): + # if (not col.split(",")[0].strip()) ^ (not col.split(",")[1].strip()): + # col = col.strip().split(",")[0] if not col.split(",")[1].strip() else col.strip().split(",")[1] + # else: + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # pass + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + colname = col if "." not in col else col.split(".")[1] + col_index = collist.index(colname) + if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any( [func != "mavg(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if "order by tbname" in condition.lower(): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias, not alias.isalnum()]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("mavg\([a-z0-9 .,]*\)", f"count({col})", self.mavg_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for j in range(len(pre_mavg)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_mavg[j]) + pre_row += len(pre_mavg) + return + elif "union" in condition: + union_sql_0 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_mavg_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_mavg_1 = tdSql.queryResult + + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 1, union_mavg_0[i][1]) + else: + tdSql.checkData(i, 1, union_mavg_1[i-row_union_0][1]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 1, pre_mavg[i]) + + pass + + def mavg_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkmavg() + case2 = {"col": "c2"} + self.checkmavg(**case2) + case3 = {"col": "c5"} + self.checkmavg(**case3) + case4 = {"col": "c7"} + self.checkmavg(**case4) + case5 = {"col": "c8"} + self.checkmavg(**case5) + case6 = {"col": "c9"} + self.checkmavg(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checkmavg(**case7) + case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + self.checkmavg(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkmavg(**case9) + case10 = {"alias": ", _c0"} + self.checkmavg(**case10) + # case11 = {"alias": ", st1"} + # self.checkmavg(**case11) + # case12 = {"alias": ", c1"} + # self.checkmavg(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkmavg(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkmavg(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkmavg(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkmavg(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkmavg(**case17) + # # case18~19: with group by + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checkmavg(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checkmavg(**case20) + #case21 = { + # "table_expr": "stb1", + # "condition": "group by tbname order by tbname" + #} + #self.checkmavg(**case21) + + # case22: with union + case22 = { + "condition": "union all select mavg( c1 , 1 ) from t2" + } + self.checkmavg(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkmavg(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checkmavg(**case24) + case25 = {"k": 2.999} + self.checkmavg(**case25) + case26 = {"k": 1000} + self.checkmavg(**case26) + + pass + + def mavg_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checkmavg(**err1) # no col + err2 = {"sel": ""} + self.checkmavg(**err2) # no select + err3 = {"func": "mavg", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checkmavg(**err3) # no mavg condition: select mavg from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checkmavg(**err4) # no mavg condition: select mavg() from + err5 = {"func": "mavg", "r_comm": ""} + self.checkmavg(**err5) # no brackets: select mavg col, k from + err6 = {"fr": ""} + self.checkmavg(**err6) # no from + err7 = {"k": ""} + self.checkmavg(**err7) # no k + err8 = {"table_expr": ""} + self.checkmavg(**err8) # no table_expr + + err9 = {"col": "st1"} + self.checkmavg(**err9) # col: tag + err10 = {"col": 1} + self.checkmavg(**err10) # col: value + err11 = {"col": "NULL"} + self.checkmavg(**err11) # col: NULL + err12 = {"col": "%_"} + self.checkmavg(**err12) # col: %_ + err13 = {"col": "c3"} + self.checkmavg(**err13) # col: timestamp col + err14 = {"col": "_c0"} + self.checkmavg(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + self.checkmavg(**err15) # expr col + err16 = {"col": "c4"} + self.checkmavg(**err16) # binary col + err17 = {"col": "c10"} + self.checkmavg(**err17) # nchar col + err18 = {"col": "c6"} + self.checkmavg(**err18) # bool col + err19 = {"col": "'c1'"} + self.checkmavg(**err19) # col: string + err20 = {"col": None} + self.checkmavg(**err20) # col: None + err21 = {"col": "''"} + self.checkmavg(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checkmavg(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checkmavg(**err23) # tbname + err24 = {"col": "stb1"} + self.checkmavg(**err24) # stbname + err25 = {"col": "db"} + self.checkmavg(**err25) # datbasename + err26 = {"col": "True"} + self.checkmavg(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checkmavg(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checkmavg(**err28) # col: all col + err29 = {"func": "mavg[", "r_comm": "]"} + self.checkmavg(**err29) # form: mavg[col, k] + err30 = {"func": "mavg{", "r_comm": "}"} + self.checkmavg(**err30) # form: mavg{col, k} + err31 = {"col": "[c1]"} + self.checkmavg(**err31) # form: mavg([col], k) + err32 = {"col": "c1, c2"} + self.checkmavg(**err32) # form: mavg(col, col2, k) + err33 = {"col": "c1, 2"} + self.checkmavg(**err33) # form: mavg(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checkmavg(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checkmavg(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checkmavg(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checkmavg(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checkmavg(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checkmavg(**err39) # mix with calculation function 2 + err40 = {"alias": "+ 2"} + self.checkmavg(**err40) # mix with arithmetic 1 + err41 = {"alias": "+ avg(c1)"} + self.checkmavg(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checkmavg(**err42) # mix with other col + err43 = {"table_expr": "stb1"} + self.checkmavg(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checkmavg(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checkmavg(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + self.checkmavg(**err46) # group by normal col + err47 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + # self.checkmavg(**err47) # with slimit + err48 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # self.checkmavg(**err48) # with soffset + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checkmavg(**err49) # k: timestamp + err50 = {"k": False} + self.checkmavg(**err50) # k: False + err51 = {"k": "%"} + self.checkmavg(**err51) # k: special char + err52 = {"k": ""} + self.checkmavg(**err52) # k: "" + err53 = {"k": None} + self.checkmavg(**err53) # k: None + err54 = {"k": "NULL"} + self.checkmavg(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checkmavg(**err55) # k: string + err56 = {"k": "c1"} + self.checkmavg(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checkmavg(**err57) # form: mavg(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checkmavg(**err58) # form: mavg(col newname, k) + err59 = {"k": "'1'"} + # self.checkmavg(**err59) # formL mavg(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checkmavg(**err60) # formL mavg(colm, -1-2) + err61 = {"k": 1001} + self.checkmavg(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checkmavg(**err62) # k: negative number + err63 = {"k": 0} + self.checkmavg(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checkmavg(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checkmavg(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checkmavg(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checkmavg(**err67) # k: left out of [1, 1000] + err68 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" # order by tbname not supported + } + self.checkmavg(**err68) + + pass + + def mavg_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def mavg_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def mavg_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.mavg_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.mavg_test_table(tbnum) + self.mavg_test_data(tbnum, per_table_rows, nowtime) + self.mavg_current_query() + self.mavg_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.mavg_current_query() + self.mavg_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.mavg_current_query() + self.mavg_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.mavg_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_round.py b/tests/pytest/functions/function_round.py index 93cace49ad8d16c6491584ed530b3dff07ef6fe4..8785821452dfe0ed65bc784f2c5d6c20f0fa484b 100644 --- a/tests/pytest/functions/function_round.py +++ b/tests/pytest/functions/function_round.py @@ -1294,21 +1294,21 @@ class TDTestCase: double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ - uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned, timestamp_tag timestamp)" ) tdSql.execute( - "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1338,11 +1338,11 @@ class TDTestCase: self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) tdSql.execute( - "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d, %s)" % (self.randomBigint(), self.randomDouble(), self.randomDouble(), self.randomNchar(), self.randomSmallint(), self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), - self.randomUSmallint(), self.randomUTinyint())) + self.randomUSmallint(), self.randomUTinyint(), 'now')) tdSql.execute( "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" % (self.randomInt(), self.randomBigint(), self.randomDouble(), @@ -1371,100 +1371,567 @@ class TDTestCase: self.randomTinyint(), self.randomNchar(), self.randomUInt(), self.randomUBigint(), self.randomUSmallint(), self.randomUTinyint())) + + shouldPass = ['select round(int_col) from super', + 'select round(int_col) from t1', + 'select round(bigint_col) from super', + 'select round(bigint_col) from t1', + 'select round(float_col) from super', + 'select round(float_col) from t1', + 'select round(double_col) from super', + 'select round(double_col) from t1', + 'select round(smallint_col) from super', + 'select round(smallint_col) from t1', + 'select round(tinyint_col) from super', + 'select round(tinyint_col) from t1', + 'select round(uint_col) from super', + 'select round(uint_col) from t1', + 'select round(ubigint_col) from super', + 'select round(ubigint_col) from t1', + 'select round(usmallint_col) from super', + 'select round(usmallint_col) from t1', + 'select round(utinyint_col) from super', + 'select round(utinyint_col) from t1', + 'select round(int_col) - round(int_col) from super', + 'select round(int_col) - round(int_col) from t1', + 'select round(bigint_col) - round(bigint_col) from super', + 'select round(bigint_col) - round(bigint_col) from t1', + 'select round(float_col) - round(float_col) from super', + 'select round(float_col) - round(float_col) from t1', + 'select round(double_col) - round(double_col) from super', + 'select round(double_col) - round(double_col) from t1', + 'select round(smallint_col) - round(smallint_col) from super', + 'select round(smallint_col) - round(smallint_col) from t1', + 'select round(tinyint_col) - round(tinyint_col) from super', + 'select round(tinyint_col) - round(tinyint_col) from t1', + 'select round(uint_col) - round(uint_col) from super', + 'select round(uint_col) - round(uint_col) from t1', + 'select round(ubigint_col) - round(ubigint_col) from super', + 'select round(ubigint_col) - round(ubigint_col) from t1', + 'select round(usmallint_col) - round(usmallint_col) from super', + 'select round(usmallint_col) - round(usmallint_col) from t1', + 'select round(utinyint_col) - round(utinyint_col) from super', + 'select round(utinyint_col) - round(utinyint_col) from t1', + 'select round(int_col) / round(int_col) from super', + 'select round(int_col) / round(int_col) from t1', + 'select round(bigint_col) / round(bigint_col) from super', + 'select round(bigint_col) / round(bigint_col) from t1', + 'select round(float_col) / round(float_col) from super', + 'select round(float_col) / round(float_col) from t1', + 'select round(double_col) / round(double_col) from super', + 'select round(double_col) / round(double_col) from t1', + 'select round(smallint_col) / round(smallint_col) from super', + 'select round(smallint_col) / round(smallint_col) from t1', + 'select round(tinyint_col) / round(tinyint_col) from super', + 'select round(tinyint_col) / round(tinyint_col) from t1', + 'select round(uint_col) / round(uint_col) from super', + 'select round(uint_col) / round(uint_col) from t1', + 'select round(ubigint_col) / round(ubigint_col) from super', + 'select round(ubigint_col) / round(ubigint_col) from t1', + 'select round(usmallint_col) / round(usmallint_col) from super', + 'select round(usmallint_col) / round(usmallint_col) from t1', + 'select round(utinyint_col) / round(utinyint_col) from super', + 'select round(utinyint_col) / round(utinyint_col) from t1', + 'select round(int_col) * round(int_col) from super', + 'select round(int_col) * round(int_col) from t1', + 'select round(bigint_col) * round(bigint_col) from super', + 'select round(bigint_col) * round(bigint_col) from t1', + 'select round(float_col) * round(float_col) from super', + 'select round(float_col) * round(float_col) from t1', + 'select round(double_col) * round(double_col) from super', + 'select round(double_col) * round(double_col) from t1', + 'select round(smallint_col) * round(smallint_col) from super', + 'select round(smallint_col) * round(smallint_col) from t1', + 'select round(tinyint_col) * round(tinyint_col) from super', + 'select round(tinyint_col) * round(tinyint_col) from t1', + 'select round(uint_col) * round(uint_col) from super', + 'select round(uint_col) * round(uint_col) from t1', + 'select round(ubigint_col) * round(ubigint_col) from super', + 'select round(ubigint_col) * round(ubigint_col) from t1', + 'select round(usmallint_col) * round(usmallint_col) from super', + 'select round(usmallint_col) * round(usmallint_col) from t1', + 'select round(utinyint_col) * round(utinyint_col) from super', + 'select round(utinyint_col) * round(utinyint_col) from t1', + 'select round(count(ts)) from super', + 'select round(count(ts)) from t1', + 'select round(count(timestamp_col)) from super', + 'select round(count(timestamp_col)) from t1', + 'select round(count(int_col)) from super', + 'select round(count(int_col)) from t1', + 'select round(count(bigint_col)) from super', + 'select round(count(bigint_col)) from t1', + 'select round(count(float_col)) from super', + 'select round(count(float_col)) from t1', + 'select round(count(double_col)) from super', + 'select round(count(double_col)) from t1', + 'select round(count(binary_col)) from super', + 'select round(count(binary_col)) from t1', + 'select round(count(smallint_col)) from super', + 'select round(count(smallint_col)) from t1', + 'select round(count(tinyint_col)) from super', + 'select round(count(tinyint_col)) from t1', + 'select round(count(bool_col)) from super', + 'select round(count(bool_col)) from t1', + 'select round(count(nchar_col)) from super', + 'select round(count(nchar_col)) from t1', + 'select round(count(uint_col)) from super', + 'select round(count(uint_col)) from t1', + 'select round(count(ubigint_col)) from super', + 'select round(count(ubigint_col)) from t1', + 'select round(count(usmallint_col)) from super', + 'select round(count(usmallint_col)) from t1', + 'select round(count(utinyint_col)) from super', + 'select round(count(utinyint_col)) from t1', + 'select round(count(timestamp_tag)) from super', + 'select round(count(timestamp_tag)) from t1', + 'select round(count(int_tag)) from super', + 'select round(count(int_tag)) from t1', + 'select round(count(bigint_tag)) from super', + 'select round(count(bigint_tag)) from t1', + 'select round(count(float_tag)) from super', + 'select round(count(float_tag)) from t1', + 'select round(count(double_tag)) from super', + 'select round(count(double_tag)) from t1', + 'select round(count(binary_tag)) from super', + 'select round(count(binary_tag)) from t1', + 'select round(count(smallint_tag)) from super', + 'select round(count(smallint_tag)) from t1', + 'select round(count(tinyint_tag)) from super', + 'select round(count(tinyint_tag)) from t1', + 'select round(count(bool_tag)) from super', + 'select round(count(bool_tag)) from t1', + 'select round(count(nchar_tag)) from super', + 'select round(count(nchar_tag)) from t1', + 'select round(count(uint_tag)) from super', + 'select round(count(uint_tag)) from t1', + 'select round(count(ubigint_tag)) from super', + 'select round(count(ubigint_tag)) from t1', + 'select round(count(usmallint_tag)) from super', + 'select round(count(usmallint_tag)) from t1', + 'select round(count(utinyint_tag)) from super', + 'select round(count(utinyint_tag)) from t1', + 'select round(avg(int_col)) from super', + 'select round(avg(int_col)) from t1', + 'select round(avg(bigint_col)) from super', + 'select round(avg(bigint_col)) from t1', + 'select round(avg(float_col)) from super', + 'select round(avg(float_col)) from t1', + 'select round(avg(double_col)) from super', + 'select round(avg(double_col)) from t1', + 'select round(avg(smallint_col)) from super', + 'select round(avg(smallint_col)) from t1', + 'select round(avg(tinyint_col)) from super', + 'select round(avg(tinyint_col)) from t1', + 'select round(avg(uint_col)) from super', + 'select round(avg(uint_col)) from t1', + 'select round(avg(ubigint_col)) from super', + 'select round(avg(ubigint_col)) from t1', + 'select round(avg(usmallint_col)) from super', + 'select round(avg(usmallint_col)) from t1', + 'select round(avg(utinyint_col)) from super', + 'select round(avg(utinyint_col)) from t1', + 'select round(twa(int_col)) from t1', + 'select round(twa(bigint_col)) from t1', + 'select round(twa(float_col)) from t1', + 'select round(twa(double_col)) from t1', + 'select round(twa(smallint_col)) from t1', + 'select round(twa(tinyint_col)) from t1', + 'select round(twa(uint_col)) from t1', + 'select round(twa(ubigint_col)) from t1', + 'select round(twa(usmallint_col)) from t1', + 'select round(twa(utinyint_col)) from t1', + 'select round(sum(int_col)) from super', + 'select round(sum(int_col)) from t1', + 'select round(sum(bigint_col)) from super', + 'select round(sum(bigint_col)) from t1', + 'select round(sum(float_col)) from super', + 'select round(sum(float_col)) from t1', + 'select round(sum(double_col)) from super', + 'select round(sum(double_col)) from t1', + 'select round(sum(smallint_col)) from super', + 'select round(sum(smallint_col)) from t1', + 'select round(sum(tinyint_col)) from super', + 'select round(sum(tinyint_col)) from t1', + 'select round(sum(uint_col)) from super', + 'select round(sum(uint_col)) from t1', + 'select round(sum(ubigint_col)) from super', + 'select round(sum(ubigint_col)) from t1', + 'select round(sum(usmallint_col)) from super', + 'select round(sum(usmallint_col)) from t1', + 'select round(sum(utinyint_col)) from super', + 'select round(sum(utinyint_col)) from t1', + 'select round(stddev(int_col)) from super', + 'select round(stddev(int_col)) from t1', + 'select round(stddev(bigint_col)) from super', + 'select round(stddev(bigint_col)) from t1', + 'select round(stddev(float_col)) from super', + 'select round(stddev(float_col)) from t1', + 'select round(stddev(double_col)) from super', + 'select round(stddev(double_col)) from t1', + 'select round(stddev(smallint_col)) from super', + 'select round(stddev(smallint_col)) from t1', + 'select round(stddev(tinyint_col)) from super', + 'select round(stddev(tinyint_col)) from t1', + 'select round(stddev(uint_col)) from super', + 'select round(stddev(uint_col)) from t1', + 'select round(stddev(ubigint_col)) from super', + 'select round(stddev(ubigint_col)) from t1', + 'select round(stddev(usmallint_col)) from super', + 'select round(stddev(usmallint_col)) from t1', + 'select round(stddev(utinyint_col)) from super', + 'select round(stddev(utinyint_col)) from t1', + 'select round(irate(int_col)) from t1', + 'select round(irate(bigint_col)) from t1', + 'select round(irate(float_col)) from t1', + 'select round(irate(double_col)) from t1', + 'select round(irate(smallint_col)) from t1', + 'select round(irate(tinyint_col)) from t1', + 'select round(irate(uint_col)) from t1', + 'select round(irate(ubigint_col)) from t1', + 'select round(irate(usmallint_col)) from t1', + 'select round(irate(utinyint_col)) from t1', + 'select round(min(int_col)) from super', + 'select round(min(int_col)) from t1', + 'select round(min(bigint_col)) from super', + 'select round(min(bigint_col)) from t1', + 'select round(min(float_col)) from super', + 'select round(min(float_col)) from t1', + 'select round(min(double_col)) from super', + 'select round(min(double_col)) from t1', + 'select round(min(smallint_col)) from super', + 'select round(min(smallint_col)) from t1', + 'select round(min(tinyint_col)) from super', + 'select round(min(tinyint_col)) from t1', + 'select round(min(uint_col)) from super', + 'select round(min(uint_col)) from t1', + 'select round(min(ubigint_col)) from super', + 'select round(min(ubigint_col)) from t1', + 'select round(min(usmallint_col)) from super', + 'select round(min(usmallint_col)) from t1', + 'select round(min(utinyint_col)) from super', + 'select round(min(utinyint_col)) from t1', + 'select round(max(int_col)) from super', + 'select round(max(int_col)) from t1', + 'select round(max(bigint_col)) from super', + 'select round(max(bigint_col)) from t1', + 'select round(max(float_col)) from super', + 'select round(max(float_col)) from t1', + 'select round(max(double_col)) from super', + 'select round(max(double_col)) from t1', + 'select round(max(smallint_col)) from super', + 'select round(max(smallint_col)) from t1', + 'select round(max(tinyint_col)) from super', + 'select round(max(tinyint_col)) from t1', + 'select round(max(uint_col)) from super', + 'select round(max(uint_col)) from t1', + 'select round(max(ubigint_col)) from super', + 'select round(max(ubigint_col)) from t1', + 'select round(max(usmallint_col)) from super', + 'select round(max(usmallint_col)) from t1', + 'select round(max(utinyint_col)) from super', + 'select round(max(utinyint_col)) from t1', + 'select round(first(int_col)) from super', + 'select round(first(int_col)) from t1', + 'select round(first(bigint_col)) from super', + 'select round(first(bigint_col)) from t1', + 'select round(first(float_col)) from super', + 'select round(first(float_col)) from t1', + 'select round(first(double_col)) from super', + 'select round(first(double_col)) from t1', + 'select round(first(smallint_col)) from super', + 'select round(first(smallint_col)) from t1', + 'select round(first(tinyint_col)) from super', + 'select round(first(tinyint_col)) from t1', + 'select round(first(uint_col)) from super', + 'select round(first(uint_col)) from t1', + 'select round(first(ubigint_col)) from super', + 'select round(first(ubigint_col)) from t1', + 'select round(first(usmallint_col)) from super', + 'select round(first(usmallint_col)) from t1', + 'select round(first(utinyint_col)) from super', + 'select round(first(utinyint_col)) from t1', + 'select round(last(int_col)) from super', + 'select round(last(int_col)) from t1', + 'select round(last(bigint_col)) from super', + 'select round(last(bigint_col)) from t1', + 'select round(last(float_col)) from super', + 'select round(last(float_col)) from t1', + 'select round(last(double_col)) from super', + 'select round(last(double_col)) from t1', + 'select round(last(smallint_col)) from super', + 'select round(last(smallint_col)) from t1', + 'select round(last(tinyint_col)) from super', + 'select round(last(tinyint_col)) from t1', + 'select round(last(uint_col)) from super', + 'select round(last(uint_col)) from t1', + 'select round(last(ubigint_col)) from super', + 'select round(last(ubigint_col)) from t1', + 'select round(last(usmallint_col)) from super', + 'select round(last(usmallint_col)) from t1', + 'select round(last(utinyint_col)) from super', + 'select round(last(utinyint_col)) from t1', + 'select round(percentile(int_col, 1)) from t1', + 'select round(percentile(bigint_col, 1)) from t1', + 'select round(percentile(float_col, 1)) from t1', + 'select round(percentile(double_col, 1)) from t1', + 'select round(percentile(smallint_col, 1)) from t1', + 'select round(percentile(tinyint_col, 1)) from t1', + 'select round(percentile(uint_col, 1)) from t1', + 'select round(percentile(ubigint_col, 1)) from t1', + 'select round(percentile(usmallint_col, 1)) from t1', + 'select round(percentile(utinyint_col, 1)) from t1', + 'select round(apercentile(int_col, 1)) from super', + 'select round(apercentile(int_col, 1)) from t1', + 'select round(apercentile(bigint_col, 1)) from super', + 'select round(apercentile(bigint_col, 1)) from t1', + 'select round(apercentile(float_col, 1)) from super', + 'select round(apercentile(float_col, 1)) from t1', + 'select round(apercentile(double_col, 1)) from super', + 'select round(apercentile(double_col, 1)) from t1', + 'select round(apercentile(smallint_col, 1)) from super', + 'select round(apercentile(smallint_col, 1)) from t1', + 'select round(apercentile(tinyint_col, 1)) from super', + 'select round(apercentile(tinyint_col, 1)) from t1', + 'select round(apercentile(uint_col, 1)) from super', + 'select round(apercentile(uint_col, 1)) from t1', + 'select round(apercentile(ubigint_col, 1)) from super', + 'select round(apercentile(ubigint_col, 1)) from t1', + 'select round(apercentile(usmallint_col, 1)) from super', + 'select round(apercentile(usmallint_col, 1)) from t1', + 'select round(apercentile(utinyint_col, 1)) from super', + 'select round(apercentile(utinyint_col, 1)) from t1', + 'select round(last_row(int_col)) from super', + 'select round(last_row(int_col)) from t1', + 'select round(last_row(bigint_col)) from super', + 'select round(last_row(bigint_col)) from t1', + 'select round(last_row(float_col)) from super', + 'select round(last_row(float_col)) from t1', + 'select round(last_row(double_col)) from super', + 'select round(last_row(double_col)) from t1', + 'select round(last_row(smallint_col)) from super', + 'select round(last_row(smallint_col)) from t1', + 'select round(last_row(tinyint_col)) from super', + 'select round(last_row(tinyint_col)) from t1', + 'select round(last_row(uint_col)) from super', + 'select round(last_row(uint_col)) from t1', + 'select round(last_row(ubigint_col)) from super', + 'select round(last_row(ubigint_col)) from t1', + 'select round(last_row(usmallint_col)) from super', + 'select round(last_row(usmallint_col)) from t1', + 'select round(last_row(utinyint_col)) from super', + 'select round(last_row(utinyint_col)) from t1', + 'select round(interp(int_col)) from t1', + 'select round(interp(bigint_col)) from t1', + 'select round(interp(float_col)) from t1', + 'select round(interp(double_col)) from t1', + 'select round(interp(smallint_col)) from t1', + 'select round(interp(tinyint_col)) from t1', + 'select round(interp(uint_col)) from t1', + 'select round(interp(ubigint_col)) from t1', + 'select round(interp(usmallint_col)) from t1', + 'select round(interp(utinyint_col)) from t1', + 'select round(spread(ts)) from super', + 'select round(spread(ts)) from t1', + 'select round(spread(timestamp_col)) from super', + 'select round(spread(timestamp_col)) from t1', + 'select round(spread(int_col)) from super', + 'select round(spread(int_col)) from t1', + 'select round(spread(bigint_col)) from super', + 'select round(spread(bigint_col)) from t1', + 'select round(spread(float_col)) from super', + 'select round(spread(float_col)) from t1', + 'select round(spread(double_col)) from super', + 'select round(spread(double_col)) from t1', + 'select round(spread(smallint_col)) from super', + 'select round(spread(smallint_col)) from t1', + 'select round(spread(tinyint_col)) from super', + 'select round(spread(tinyint_col)) from t1', + 'select round(spread(uint_col)) from super', + 'select round(spread(uint_col)) from t1', + 'select round(spread(ubigint_col)) from super', + 'select round(spread(ubigint_col)) from t1', + 'select round(spread(usmallint_col)) from super', + 'select round(spread(usmallint_col)) from t1', + 'select round(spread(utinyint_col)) from super', + 'select round(spread(utinyint_col)) from t1', + 'select round(int_col + int_col) from super', + 'select round(int_col + int_col) from t1', + 'select round(bigint_col + bigint_col) from super', + 'select round(bigint_col + bigint_col) from t1', + 'select round(float_col + float_col) from super', + 'select round(float_col + float_col) from t1', + 'select round(double_col + double_col) from super', + 'select round(double_col + double_col) from t1', + 'select round(smallint_col + smallint_col) from super', + 'select round(smallint_col + smallint_col) from t1', + 'select round(tinyint_col + tinyint_col) from super', + 'select round(tinyint_col + tinyint_col) from t1', + 'select round(uint_col + uint_col) from super', + 'select round(uint_col + uint_col) from t1', + 'select round(ubigint_col + ubigint_col) from super', + 'select round(ubigint_col + ubigint_col) from t1', + 'select round(usmallint_col + usmallint_col) from super', + 'select round(usmallint_col + usmallint_col) from t1', + 'select round(utinyint_col + utinyint_col) from super', + 'select round(utinyint_col + utinyint_col) from t1', + 'select round(int_col - int_col) from super', + 'select round(int_col - int_col) from t1', + 'select round(bigint_col - bigint_col) from super', + 'select round(bigint_col - bigint_col) from t1', + 'select round(float_col - float_col) from super', + 'select round(float_col - float_col) from t1', + 'select round(double_col - double_col) from super', + 'select round(double_col - double_col) from t1', + 'select round(smallint_col - smallint_col) from super', + 'select round(smallint_col - smallint_col) from t1', + 'select round(tinyint_col - tinyint_col) from super', + 'select round(tinyint_col - tinyint_col) from t1', + 'select round(uint_col - uint_col) from super', + 'select round(uint_col - uint_col) from t1', + 'select round(ubigint_col - ubigint_col) from super', + 'select round(ubigint_col - ubigint_col) from t1', + 'select round(usmallint_col - usmallint_col) from super', + 'select round(usmallint_col - usmallint_col) from t1', + 'select round(utinyint_col - utinyint_col) from super', + 'select round(utinyint_col - utinyint_col) from t1', + 'select round(int_col * int_col) from super', + 'select round(int_col * int_col) from t1', + 'select round(bigint_col * bigint_col) from super', + 'select round(bigint_col * bigint_col) from t1', + 'select round(float_col * float_col) from super', + 'select round(float_col * float_col) from t1', + 'select round(double_col * double_col) from super', + 'select round(double_col * double_col) from t1', + 'select round(smallint_col * smallint_col) from super', + 'select round(smallint_col * smallint_col) from t1', + 'select round(tinyint_col * tinyint_col) from super', + 'select round(tinyint_col * tinyint_col) from t1', + 'select round(uint_col * uint_col) from super', + 'select round(uint_col * uint_col) from t1', + 'select round(ubigint_col * ubigint_col) from super', + 'select round(ubigint_col * ubigint_col) from t1', + 'select round(usmallint_col * usmallint_col) from super', + 'select round(usmallint_col * usmallint_col) from t1', + 'select round(utinyint_col * utinyint_col) from super', + 'select round(utinyint_col * utinyint_col) from t1', + 'select round(int_col / int_col) from super', + 'select round(int_col / int_col) from t1', + 'select round(bigint_col / bigint_col) from super', + 'select round(bigint_col / bigint_col) from t1', + 'select round(float_col / float_col) from super', + 'select round(float_col / float_col) from t1', + 'select round(double_col / double_col) from super', + 'select round(double_col / double_col) from t1', + 'select round(smallint_col / smallint_col) from super', + 'select round(smallint_col / smallint_col) from t1', + 'select round(tinyint_col / tinyint_col) from super', + 'select round(tinyint_col / tinyint_col) from t1', + 'select round(uint_col / uint_col) from super', + 'select round(uint_col / uint_col) from t1', + 'select round(ubigint_col / ubigint_col) from super', + 'select round(ubigint_col / ubigint_col) from t1', + 'select round(usmallint_col / usmallint_col) from super', + 'select round(usmallint_col / usmallint_col) from t1', + 'select round(utinyint_col / utinyint_col) from super', + 'select round(utinyint_col / utinyint_col) from t1', + 'select int_col, round(int_col), int_col from super', + 'select int_col, round(int_col), int_col from t1', + 'select bigint_col, round(bigint_col), bigint_col from super', + 'select bigint_col, round(bigint_col), bigint_col from t1', + 'select float_col, round(float_col), float_col from super', + 'select float_col, round(float_col), float_col from t1', + 'select double_col, round(double_col), double_col from super', + 'select double_col, round(double_col), double_col from t1', + 'select smallint_col, round(smallint_col), smallint_col from super', + 'select smallint_col, round(smallint_col), smallint_col from t1', + 'select tinyint_col, round(tinyint_col), tinyint_col from super', + 'select tinyint_col, round(tinyint_col), tinyint_col from t1', + 'select uint_col, round(uint_col), uint_col from super', + 'select uint_col, round(uint_col), uint_col from t1', + 'select ubigint_col, round(ubigint_col), ubigint_col from super', + 'select ubigint_col, round(ubigint_col), ubigint_col from t1', + 'select usmallint_col, round(usmallint_col), usmallint_col from super', + 'select usmallint_col, round(usmallint_col), usmallint_col from t1', + 'select utinyint_col, round(utinyint_col), utinyint_col from super', + 'select utinyint_col, round(utinyint_col), utinyint_col from t1', + 'select 1, round(int_col), 1 from super', + 'select 1, round(int_col), 1 from t1', + 'select 1, round(bigint_col), 1 from super', + 'select 1, round(bigint_col), 1 from t1', + 'select 1, round(float_col), 1 from super', + 'select 1, round(float_col), 1 from t1', + 'select 1, round(double_col), 1 from super', + 'select 1, round(double_col), 1 from t1', + 'select 1, round(smallint_col), 1 from super', + 'select 1, round(smallint_col), 1 from t1', + 'select 1, round(tinyint_col), 1 from super', + 'select 1, round(tinyint_col), 1 from t1', + 'select 1, round(uint_col), 1 from super', + 'select 1, round(uint_col), 1 from t1', + 'select 1, round(ubigint_col), 1 from super', + 'select 1, round(ubigint_col), 1 from t1', + 'select 1, round(usmallint_col), 1 from super', + 'select 1, round(usmallint_col), 1 from t1', + 'select 1, round(utinyint_col), 1 from super', + 'select 1, round(utinyint_col), 1 from t1', + 'select round(int_col) as anyName from super', + 'select round(int_col) as anyName from t1', + 'select round(bigint_col) as anyName from super', + 'select round(bigint_col) as anyName from t1', + 'select round(float_col) as anyName from super', + 'select round(float_col) as anyName from t1', + 'select round(double_col) as anyName from super', + 'select round(double_col) as anyName from t1', + 'select round(smallint_col) as anyName from super', + 'select round(smallint_col) as anyName from t1', + 'select round(tinyint_col) as anyName from super', + 'select round(tinyint_col) as anyName from t1', + 'select round(uint_col) as anyName from super', + 'select round(uint_col) as anyName from t1', + 'select round(ubigint_col) as anyName from super', + 'select round(ubigint_col) as anyName from t1', + 'select round(usmallint_col) as anyName from super', + 'select round(usmallint_col) as anyName from t1', + 'select round(utinyint_col) as anyName from super', + 'select round(utinyint_col) as anyName from t1'] + + shouldPass2 = ['select round(super.int_col) from super', + 'select round(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.bigint_col) from super', + 'select round(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.float_col) from super', + 'select round(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.double_col) from super', + 'select round(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.smallint_col) from super', + 'select round(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.tinyint_col) from super', + 'select round(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.uint_col) from super', + 'select round(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.ubigint_col) from super', + 'select round(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.usmallint_col) from super', + 'select round(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(super.utinyint_col) from super', + 'select round(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag', + 'select round(t1.int_col) from t1', + 'select round(t1.bigint_col) from t1', + 'select round(t1.float_col) from t1', + 'select round(t1.double_col) from t1', + 'select round(t1.smallint_col) from t1', + 'select round(t1.tinyint_col) from t1', + 'select round(t1.uint_col) from t1', + 'select round(t1.ubigint_col) from t1', + 'select round(t1.usmallint_col) from t1', + 'select round(t1.utinyint_col) from t1'] + + for s in range(len(select_command)): for f in range(len(from_command)): sql = "select " + select_command[s] + from_command[f] - if (select_command[s] == "round(int_col)"\ - or select_command[s] == "round(bigint_col)"\ - or select_command[s] == "round(smallint_col)" \ - or select_command[s] == "round(float_col)"\ - or select_command[s] == "round(double_col)"\ - or select_command[s] == "round(tinyint_col)"\ - or select_command[s] == "round(uint_col)"\ - or select_command[s] == "round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col)"\ - or select_command[s] == "1, round(int_col), 1"\ - or select_command[s] == "1, round(bigint_col), 1"\ - or select_command[s] == "1, round(float_col), 1"\ - or select_command[s] == "1, round(double_col), 1"\ - or select_command[s] == "1, round(smallint_col), 1"\ - or select_command[s] == "1, round(tinyint_col), 1"\ - or select_command[s] == "1, round(uint_col), 1"\ - or select_command[s] == "1, round(ubigint_col), 1"\ - or select_command[s] == "1, round(usmallint_col), 1"\ - or select_command[s] == "1, round(utinyint_col), 1"\ - or select_command[s] == "int_col, round(int_col), int_col"\ - or select_command[s] == "bigint_col, round(bigint_col), bigint_col"\ - or select_command[s] == "float_col, round(float_col), float_col"\ - or select_command[s] == "double_col, round(double_col), double_col"\ - or select_command[s] == "smallint_col, round(smallint_col), smallint_col"\ - or select_command[s] == "tinyint_col, round(tinyint_col), tinyint_col"\ - or select_command[s] == "uint_col, round(uint_col), uint_col"\ - or select_command[s] == "ubigint_col, round(ubigint_col), ubigint_col"\ - or select_command[s] == "usmallint_col, round(usmallint_col), usmallint_col"\ - or select_command[s] == "utinyint_col, round(utinyint_col), utinyint_col"\ - or select_command[s] == "round(int_col) as anyName"\ - or select_command[s] == "round(bigint_col) as anyName"\ - or select_command[s] == "round(float_col) as anyName"\ - or select_command[s] == "round(double_col) as anyName"\ - or select_command[s] == "round(smallint_col) as anyName"\ - or select_command[s] == "round(tinyint_col) as anyName"\ - or select_command[s] == "round(uint_col) as anyName"\ - or select_command[s] == "round(ubigint_col) as anyName"\ - or select_command[s] == "round(usmallint_col) as anyName"\ - or select_command[s] == "round(utinyint_col) as anyName"\ - or select_command[s] == "round(int_col) + round(int_col)"\ - or select_command[s] == "round(bigint_col) + round(bigint_col)"\ - or select_command[s] == "round(float_col) + round(float_col)"\ - or select_command[s] == "round(double_col) + round(double_col)"\ - or select_command[s] == "round(smallint_col) + round(smallint_col)"\ - or select_command[s] == "round(tinyint_col) + round(tinyint_col)"\ - or select_command[s] == "round(uint_col) + round(uint_col)"\ - or select_command[s] == "round(ubigint_col) + round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col) + round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col) + round(utinyint_col)"\ - or select_command[s] == "round(int_col) + round(int_col)"\ - or select_command[s] == "round(bigint_col) + round(bigint_col)"\ - or select_command[s] == "round(float_col) + round(float_col)"\ - or select_command[s] == "round(double_col) + round(double_col)"\ - or select_command[s] == "round(smallint_col) + round(smallint_col)"\ - or select_command[s] == "round(tinyint_col) + round(tinyint_col)"\ - or select_command[s] == "round(uint_col) + round(uint_col)"\ - or select_command[s] == "round(ubigint_col) + round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col) + round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col) + cei(utinyint_col)"\ - or select_command[s] == "round(int_col) - round(int_col)"\ - or select_command[s] == "round(bigint_col) - round(bigint_col)"\ - or select_command[s] == "round(float_col) - round(float_col)"\ - or select_command[s] == "round(double_col) - round(double_col)"\ - or select_command[s] == "round(smallint_col) - round(smallint_col)"\ - or select_command[s] == "round(tinyint_col) - round(tinyint_col)"\ - or select_command[s] == "round(uint_col) - round(uint_col)"\ - or select_command[s] == "round(ubigint_col) - round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col) - round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col) - round(utinyint_col)"\ - or select_command[s] == "round(int_col) * round(int_col)"\ - or select_command[s] == "round(bigint_col) * round(bigint_col)"\ - or select_command[s] == "round(float_col) * round(float_col)"\ - or select_command[s] == "round(double_col) * round(double_col)"\ - or select_command[s] == "round(smallint_col) * round(smallint_col)"\ - or select_command[s] == "round(tinyint_col) * round(tinyint_col)"\ - or select_command[s] == "round(uint_col) * round(uint_col)"\ - or select_command[s] == "round(ubigint_col) * round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col) * round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col) * round(utinyint_col)"\ - or select_command[s] == "round(int_col) / round(int_col)"\ - or select_command[s] == "round(bigint_col) / round(bigint_col)"\ - or select_command[s] == "round(float_col) / round(float_col)"\ - or select_command[s] == "round(double_col) / round(double_col)"\ - or select_command[s] == "round(smallint_col) / round(smallint_col)"\ - or select_command[s] == "round(tinyint_col) / round(tinyint_col)"\ - or select_command[s] == "round(uint_col) / round(uint_col)"\ - or select_command[s] == "round(ubigint_col) / round(ubigint_col)"\ - or select_command[s] == "round(usmallint_col) / round(usmallint_col)"\ - or select_command[s] == "round(utinyint_col) / round(utinyint_col)"): + if sql in shouldPass: tdSql.query(sql) else: tdSql.error(sql) @@ -1475,40 +1942,10 @@ class TDTestCase: sql = "select " + simple_select_command[ sim] + advance_from_command[fr] + filter_command[ filter] + fill_command[fill] - if sql == "select round(t1.int_col) from t1"\ - or sql == "select round(super.int_col) from super"\ - or sql == "select round(t1.bigint_col) from t1"\ - or sql == "select round(super.bigint_col) from super"\ - or sql == "select round(t1.smallint_col) from t1"\ - or sql == "select round(super.smallint_col) from super"\ - or sql == "select round(t1.tinyint_col) from t1"\ - or sql == "select round(super.tinyint_col) from super"\ - or sql == "select round(t1.float_col) from t1"\ - or sql == "select round(super.float_col) from super"\ - or sql == "select round(t1.double_col) from t1"\ - or sql == "select round(super.double_col) from super"\ - or sql == "select round(t1.uint_col) from t1"\ - or sql == "select round(super.uint_col) from super"\ - or sql == "select round(t1.ubigint_col) from t1"\ - or sql == "select round(super.ubigint_col) from super"\ - or sql == "select round(t1.usmallint_col) from t1"\ - or sql == "select round(super.usmallint_col) from super"\ - or sql == "select round(t1.utinyint_col) from t1"\ - or sql == "select round(super.utinyint_col) from super"\ - or sql == "select round(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ - or sql == "select round(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + if sql in shouldPass2: tdSql.query(sql) else: - tdSql.error(sql) - + tdSql.error(sql) def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py index 3ff2b82bf6b326ed4d07a5a51027c9e266c2fd72..b9eadeb3443127c927b29fbb16bda4c12378e71a 100644 --- a/tests/pytest/functions/function_stddev.py +++ b/tests/pytest/functions/function_stddev.py @@ -123,8 +123,33 @@ class TDTestCase: tdSql.execute("insert into t1 values(now, 1, 'abc');") tdLog.info("select stddev(k) from t1 where b <> 'abc' interval(1s);") tdSql.query("select stddev(k) from t1 where b <> 'abc' interval(1s);") - - + + tdSql.execute("create table stdtable(ts timestamp, col1 int) tags(loc nchar(64))") + tdSql.execute("create table std1 using stdtable tags('beijing')") + tdSql.execute("create table std2 using stdtable tags('shanghai')") + tdSql.execute("create table std3 using stdtable tags('河南')") + tdSql.execute("insert into std1 values(now + 1s, 1)") + tdSql.execute("insert into std1 values(now + 2s, 2);") + tdSql.execute("insert into std2 values(now + 3s, 1);") + tdSql.execute("insert into std2 values(now + 4s, 2);") + tdSql.execute("insert into std3 values(now + 5s, 4);") + tdSql.execute("insert into std3 values(now + 6s, 8);") + tdSql.query("select stddev(col1) from stdtable group by loc;") + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, 0.5) + tdSql.checkData(2, 0, 0.5) + + tdSql.execute("create table stdtableint(ts timestamp, col1 int) tags(num int)") + tdSql.execute("create table stdint1 using stdtableint tags(1)") + tdSql.execute("create table stdint2 using stdtableint tags(2)") + tdSql.execute("insert into stdint1 values(now + 1s, 1)") + tdSql.execute("insert into stdint1 values(now + 2s, 2);") + tdSql.execute("insert into stdint2 values(now + 3s, 1);") + tdSql.execute("insert into stdint2 values(now + 4s, 2);") + tdSql.query("select stddev(col1) from stdtableint group by num") + tdSql.checkData(0, 0, 0.5) + tdSql.checkData(1, 0, 0.5) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/functions/variable_httpDbNameMandatory.py b/tests/pytest/functions/variable_httpDbNameMandatory.py new file mode 100644 index 0000000000000000000000000000000000000000..3be620ad1e1631126697d93e388df82be3e9d57c --- /dev/null +++ b/tests/pytest/functions/variable_httpDbNameMandatory.py @@ -0,0 +1,146 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from requests.auth import HTTPBasicAuth +import requests +import json + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def rest_query(self,sql,db=''): + host = '127.0.0.1' + user = 'root' + password = 'taosdata' + port =6041 + if db == '': + url = "http://{}:{}/rest/sql".format(host, port ) + else: + url = "http://{}:{}/rest/sql/{}".format(host, port, db ) + try: + r = requests.post(url, + data = 'use db' , + auth = HTTPBasicAuth('root', 'taosdata')) + r = requests.post(url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + except: + print("REST API Failure (TODO: more info here)") + raise + rj = dict(r.json()['data']) + return rj + + def TS834(self): + tdLog.printNoPrefix("==========TS-782==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "httpDbNameMandatory") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 0) + + rj = self.rest_query("show variables") + if 'httpDbNameMandatory' not in rj: + tdLog.info('has no httpDbNameMandatory shown') + tdLog.exit(1) + if rj['httpDbNameMandatory'] != '0': + tdLog.info('httpDbNameMandatory data:%s == expect:0'%rj['httpDbNameMandatory']) + tdLog.exit(1) + tdLog.info("httpDbNameMandatory by restful query data:%s == expect:0" % (rj['httpDbNameMandatory'])) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdLog.info("restart taosd ") + tdDnodes.stop(index) + cmd = f"echo 'httpDbNameMandatory 1' >> {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 1) + + rj = self.rest_query("show variables", 'db') + if 'httpDbNameMandatory' not in rj: + tdLog.info('has no httpDbNameMandatory shown') + tdLog.exit(1) + if rj['httpDbNameMandatory'] != '1': + tdLog.info('httpDbNameMandatory data:%s == expect:0'%rj['httpDbNameMandatory']) + tdLog.exit(1) + tdLog.info("httpDbNameMandatory by restful query data:%s == expect:1" % (rj['httpDbNameMandatory'])) + + def run(self): + + #TS-834 https://jira.taosdata.com:18080/browse/TS-834 + self.TS834() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/import_merge/import_update_0.py b/tests/pytest/import_merge/import_update_0.py index 66e0d7d14420251a227e5f0c2bacec219273d032..269f2e88bbb25c64ef512de9f21cea31196b5c75 100644 --- a/tests/pytest/import_merge/import_update_0.py +++ b/tests/pytest/import_merge/import_update_0.py @@ -1055,7 +1055,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) + os.system("%staosBenchmark -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) tdSql.execute('''insert into table_2 values( %d , NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);''' %(self.ts - 200)) sql = '''select * from table_2 where ts = %d ;''' %(self.ts - 200) diff --git a/tests/pytest/import_merge/import_update_1.py b/tests/pytest/import_merge/import_update_1.py index 7edfd610c2e6eac6588ae78f81c939118845973d..ec204f0e38b7ef48c74105cd3174a379bb2f6df8 100644 --- a/tests/pytest/import_merge/import_update_1.py +++ b/tests/pytest/import_merge/import_update_1.py @@ -1055,7 +1055,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) + os.system("%staosBenchmark -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) tdSql.execute('''insert into table_2 values( %d , NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);''' %(self.ts - 200)) sql = '''select * from table_2 where ts = %d ;''' %(self.ts - 200) diff --git a/tests/pytest/import_merge/import_update_2.py b/tests/pytest/import_merge/import_update_2.py index a0efe31ab25f68a898a124e0be22c369fedabf7f..5baeec674056ee98b18158bfff9e573808e31652 100644 --- a/tests/pytest/import_merge/import_update_2.py +++ b/tests/pytest/import_merge/import_update_2.py @@ -1181,7 +1181,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) + os.system("%staosBenchmark -N -d taosdemo -t 100 -n 100 -l 1000 -y" % binPath) tdLog.info("========== stable ==========") tdSql.execute('''insert into table_2 values( %d , NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);''' %(self.ts - 200)) diff --git a/tests/pytest/insert/insertJSONPayload.py b/tests/pytest/insert/insertJSONPayload.py index a2e773328a7a346f17d8c256cce79d7beb9628e4..b151aa946c4f6c6a808df70e921adb1f6f11f5df 100644 --- a/tests/pytest/insert/insertJSONPayload.py +++ b/tests/pytest/insert/insertJSONPayload.py @@ -129,7 +129,7 @@ class TDTestCase: print("schemaless_insert result {}".format(code)) tdSql.query("describe stb0_3") - tdSql.checkData(1, 1, "BINARY") + tdSql.checkData(1, 1, "NCHAR") payload = [''' { @@ -835,7 +835,7 @@ class TDTestCase: code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value) print("schemaless_insert result {}".format(code)) - tdSql.query("describe `stable`") + tdSql.query("describe `STABLE`") tdSql.checkRows(9) #tdSql.query("select * from `key`") diff --git a/tests/pytest/insert/insertTelnetLines.py b/tests/pytest/insert/insertTelnetLines.py index 149e62c362ab802fbbc4f2d939b3bf149cbf7e1b..774027ffed1199de643957970e3cd8122a25905c 100644 --- a/tests/pytest/insert/insertTelnetLines.py +++ b/tests/pytest/insert/insertTelnetLines.py @@ -333,7 +333,7 @@ class TDTestCase: tdSql.query('describe `!@#$.%^&*()`') tdSql.checkRows(9) - tdSql.query('describe `stable`') + tdSql.query('describe `STABLE`') tdSql.checkRows(9) #tdSql.query('select * from `123`') diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py index acc43d80e719065706aaee95c5cdbaf5235ae04b..d95df3a8491f73f7279e583afd446a7182adf823 100644 --- a/tests/pytest/insert/line_insert.py +++ b/tests/pytest/insert/line_insert.py @@ -86,6 +86,67 @@ class TDTestCase: #tdSql.query('select tbname, * from childtable') #tdSql.checkRows(1) + ###Test when tag is omitted + lines3 = [ "sti c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "sti c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000" + ] + + code = self._conn.schemaless_insert(lines3, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from sti') + tdSql.checkRows(2) + + tdSql.query('select tbname from sti') + tdSql.checkRows(1) + + lines4 = [ "stp c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "stp c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000" + ] + code = self._conn.schemaless_insert([ lines4[0] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines4[1] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from stp') + tdSql.checkRows(2) + + tdSql.query('select tbname from stp') + tdSql.checkRows(1) + + lines5 = [ "stq c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "stq,t1=abc c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000", + "stq,t2=abc c1=3i64,c3=L\"passitagin\",c4=5f64,c5=5f64,c6=true 1626006833640000000" + ] + code = self._conn.schemaless_insert([ lines5[0] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines5[1] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines5[2] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from stq') + tdSql.checkRows(3) + + tdSql.query('select tbname from stq') + tdSql.checkRows(3) + + lines6 = [ "str c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "str,t1=abc c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000", + "str,t2=abc c1=3i64,c3=L\"passitagin\",c4=5f64,c5=5f64,c6=true 1626006833640000000" + ] + code = self._conn.schemaless_insert(lines6, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from str') + tdSql.checkRows(3) + + tdSql.query('select tbname from str') + tdSql.checkRows(3) + ###Special Character and keyss self._conn.schemaless_insert([ "1234,id=3456,abc=4i64,def=3i64 123=3i64,int=2i64,bool=false,into=5f64,column=7u64,!@#$.%^&*()=false 1626006933641", @@ -112,7 +173,7 @@ class TDTestCase: tdSql.query('describe `!@#$.%^&*()`') tdSql.checkRows(9) - tdSql.query('describe `stable`') + tdSql.query('describe `STABLE`') tdSql.checkRows(9) #tdSql.query('select * from `3456`') diff --git a/tests/pytest/insert/openTsdbTelnetLinesInsert.py b/tests/pytest/insert/openTsdbTelnetLinesInsert.py index de27ff7a08cb46a7d7219edaa186edad6f662716..c6a84c7def8301fa6ecd1752f9238731ce922338 100644 --- a/tests/pytest/insert/openTsdbTelnetLinesInsert.py +++ b/tests/pytest/insert/openTsdbTelnetLinesInsert.py @@ -28,6 +28,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self._conn = conn + self.smlChildTableName_value = tdSql.getVariable("smlChildTableName")[0].upper() def createDb(self, name="test", db_update_tag=0): if db_update_tag == 0: @@ -159,14 +160,27 @@ class TDTestCase: td_col_type_list = [] for elm in stb_tag_list: - if "id=" in elm.lower(): - tb_name = elm.split('=')[1] + if self.smlChildTableName_value == "ID": + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0].lower()) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) else: - tag_name_list.append(elm.split("=")[0].lower()) - tag_value_list.append(elm.split("=")[1]) - tb_name = "" - td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) - td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + if "id" == elm.split("=")[0].lower(): + tag_name_list.insert(0, elm.split("=")[0]) + tag_value_list.insert(0, elm.split("=")[1]) + td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) col_name_list.append('value') col_value_list.append(stb_col_value) @@ -201,7 +215,7 @@ class TDTestCase: if tb_name == "": tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' if t0 == "": - t0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + t0 = "t" if value == "": value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) if id_upper_tag is not None: @@ -232,7 +246,7 @@ class TDTestCase: if c_blank_tag is not None: sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' if t_blank_tag is not None: - sql_seq = f'{stb_name} {ts} {value} {id}={tb_name}' + sql_seq = f'{stb_name} {ts} {value}' if chinese_tag is not None: sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"' if multi_field_tag is not None: @@ -260,11 +274,10 @@ class TDTestCase: def genLongSql(self, tag_count): stb_name = tdCom.getLongName(7, mode="letters") - tb_name = f'{stb_name}_1' tag_str = self.genMulTagColStr("tag", tag_count) col_str = self.genMulTagColStr("col") ts = "1626006833641" - long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + f'id={tb_name}' + ' ' + tag_str + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str return long_sql, stb_name def getNoIdTbName(self, stb_name): @@ -451,11 +464,11 @@ class TDTestCase: def stbTbNameCheckCase(self): """ test illegal id name - mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?" + mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') tdCom.cleanTb() - rstr = list("~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?") + rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") for i in rstr: input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"") self.resCmp(input_sql, f'`{stb_name}`') @@ -524,16 +537,25 @@ class TDTestCase: self.resCmp(input_sql, stb_name) tdSql.query(f'select * from {stb_name}') tdSql.checkRows(1) - for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + if self.smlChildTableName_value == "ID": + for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' + else: + input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) raise Exception("should not reach here") except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) - - input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' - stb_name = "Abcdffgg" + input_sql = 'Abcdffgg 1626006833640 False T1=127i8' + stb_name = f'`{input_sql.split(" ")[0]}`' self.resCmp(input_sql, stb_name) + tdSql.execute('drop table `Abcdffgg`') def tagNameLengthCheckCase(self): """ @@ -766,6 +788,7 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) ##### stb exist ##### + @tdCom.smlPass def noIdStbExistCheckCase(self): """ case no id when stb exist @@ -790,6 +813,7 @@ class TDTestCase: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) self.resCmp(input_sql, stb_name) + @tdCom.smlPass def tagColBinaryNcharLengthCheckCase(self): """ check length increase @@ -802,6 +826,7 @@ class TDTestCase: input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + @tdCom.smlPass def tagColAddDupIDCheckCase(self): """ check tag count add, stb and tb duplicate @@ -833,6 +858,7 @@ class TDTestCase: tdSql.checkData(0, 12, None) self.createDb() + @tdCom.smlPass def tagColAddCheckCase(self): """ check tag count add @@ -880,8 +906,7 @@ class TDTestCase: tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') tdCom.cleanTb() stb_name = tdCom.getLongName(7, "letters") - tb_name = f'{stb_name}_1' - input_sql = f'{stb_name} 1626006833640 f id={tb_name} t2={tdCom.getLongName(1, "letters")}' + input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) # * legal nchar could not be larger than 16374/4 @@ -1062,15 +1087,24 @@ class TDTestCase: def tbnameTagsColsNameCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') tdCom.cleanTb() - input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' - self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) - query_sql = 'select * from `rfa$sta`' - query_res = tdSql.query(query_sql, True) - tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) - col_tag_res = tdSql.getColNameList(query_sql) - tdSql.checkEqual(col_tag_res, ['ts', 'value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) - tdSql.execute('drop table `rfa$sta`') - + if self.smlChildTableName_value == "ID": + input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', 'value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) + tdSql.execute('drop table `rFa$sta`') + else: + input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', 'value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1']) + tdSql.execute('drop table `rFa$sta`') def genSqlList(self, count=5, stb_name="", tb_name=""): """ stb --> supertable @@ -1153,11 +1187,12 @@ class TDTestCase: s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) - expected_tb_name = self.getNoIdTbName(stb_name)[0] - tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) tdSql.query(f"select * from {stb_name};") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) def sStbStbDdataAtInsertMultiThreadCheckCase(self): """ @@ -1171,11 +1206,12 @@ class TDTestCase: s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) - expected_tb_name = self.getNoIdTbName(stb_name)[0] - tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) tdSql.query(f"select * from {stb_name};") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) def sStbStbDdataMtInsertMultiThreadCheckCase(self): """ @@ -1189,11 +1225,12 @@ class TDTestCase: s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) - expected_tb_name = self.getNoIdTbName(stb_name)[0] - tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) tdSql.query(f"select * from {stb_name};") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) def sStbDtbDdataInsertMultiThreadCheckCase(self): """ @@ -1254,7 +1291,7 @@ class TDTestCase: (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) @@ -1270,11 +1307,11 @@ class TDTestCase: s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) tdSql.query(f"select * from {stb_name} where t8 is not NULL") - tdSql.checkRows(6) + tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1) def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): """ @@ -1292,12 +1329,12 @@ class TDTestCase: (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(1) + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) for t in ["t10", "t11"]: tdSql.query(f"select * from {stb_name} where {t} is not NULL;") - tdSql.checkRows(0) + tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5) def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): """ @@ -1386,7 +1423,7 @@ class TDTestCase: self.sStbDtbDdataMtInsertMultiThreadCheckCase() self.sStbDtbDdataAtInsertMultiThreadCheckCase() self.sStbStbDdataDtsInsertMultiThreadCheckCase() - self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() self.sStbDtbDdataDtsInsertMultiThreadCheckCase() self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() @@ -1395,6 +1432,7 @@ class TDTestCase: print("running {}".format(__file__)) self.createDb() try: + # self.blankTagInsertCheckCase() self.runAll() except Exception as err: print(''.join(traceback.format_exception(None, err, err.__traceback__))) diff --git a/tests/pytest/insert/openTsdbTelnetTaosadapterInsert.py b/tests/pytest/insert/openTsdbTelnetTaosadapterInsert.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8af6c566c60517f9a2e2a72c1aab1e01be5b82 --- /dev/null +++ b/tests/pytest/insert/openTsdbTelnetTaosadapterInsert.py @@ -0,0 +1,1390 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +from util.types import TDSmlProtocolType, TDSmlTimestampType +import threading +import requests + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self.smlChildTableName_value = tdSql.getVariable("smlChildTableName")[0].upper() + + def timeTrans(self, time_value): + if int(time_value) == 0: + ts = time.time() + else: + if len(time_value) == 13: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000 + elif len(time_value) == 10: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def getTdTypeValue(self, value, vtype="col"): + if vtype == "col": + if value.lower().endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.lower().endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.lower().endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + if "e" in value.lower(): + td_tag_value = str(float(td_tag_value)) + elif value.lower().startswith('l"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "BINARY" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value.lower() == "true": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value.lower() == "false": + td_type = "BOOL" + td_tag_value = "False" + elif value.isdigit(): + td_type = "DOUBLE" + td_tag_value = str(float(value)) + else: + td_type = "DOUBLE" + if "e" in value.lower(): + td_tag_value = str(float(value)) + else: + td_tag_value = value + elif vtype == "tag": + td_type = "NCHAR" + td_tag_value = str(value) + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "BINARY": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql): + input_sql_split_list = input_sql.split(" ") + stb_name = input_sql_split_list[0] + stb_tag_list = input_sql_split_list[3:] + stb_col_value = input_sql_split_list[2] + ts_value = self.timeTrans(input_sql_split_list[1]) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + for elm in stb_tag_list: + if self.smlChildTableName_value == "ID": + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + if "id" == elm.split("=")[0].lower(): + tag_name_list.insert(0, elm.split("=")[0]) + tag_value_list.insert(0, elm.split("=")[1]) + td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + + col_name_list.append('value') + col_value_list.append(stb_col_value) + + td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1]) + td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", ts="1626006833641", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None, point_trans_tag=None): + if stb_name == "": + stb_name = tdCom.getLongName(len=6, mode="letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = "t" + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + if id_mixul_tag is not None: + id = random.choice(["iD", "Id"]) + else: + id = "id" + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}' + if id_change_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_double_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}' + if t_mul_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_multi_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value}' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}' + if point_trans_tag is not None: + sql_seq = f'.point.trans.test {ts} {value} t0={t0}' + return sql_seq, stb_name + + def genMulTagColStr(self, genType, count=1): + """ + genType must be tag/col + """ + tag_str = "" + col_str = "" + if genType == "tag": + for i in range(0, count): + if i < (count-1): + tag_str += f't{i}=f ' + else: + tag_str += f't{i}=f' + return tag_str + if genType == "col": + col_str = "t" + return col_str + + def genLongSql(self, tag_count): + stb_name = tdCom.getLongName(7, mode="letters") + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col") + ts = "1626006833641" + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str + return long_sql, stb_name + + def getNoIdTbName(self, stb_name): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag): + tdSql.execute('reset query cache') + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, none_check_tag=None, precision=None): + expect_list = self.inputHandle(input_sql) + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True) + if ts == 0: + res_ts = tdCom.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(res_row_list[0], expect_list[0]) + tdSql.checkEqual(res_field_list_without_ts, expect_list[1]) + for i in range(len(res_type_list)): + tdSql.checkEqual(res_type_list[i], expect_list[2][i]) + tdSql.checkEqual(res.status_code, 200) + + def initCheckCase(self): + """ + normal tags and cols, one for every elm + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + + def boolTypeCheckCase(self): + """ + check all normal type + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(t0=t_type) + self.resCmp(input_sql, stb_name) + + def symbolsCheckCase(self): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols) + input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols) + self.resCmp(input_sql1, stb_name1) + self.resCmp(input_sql2, stb_name2) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(ts=1626006834) + self.resCmp(input_sql, stb_name) + + tdCom.restApiPost("drop database if exists test_ts") + tdCom.restApiPost("create database if not exists test_ts precision 'ms'") + tdSql.execute("use test_ts") + input_sql = 'test_ms 1626006833640 t t0=t\ntest_ms 1626006833641 f t0=t' + tdCom.schemalessApiPost(input_sql, url_type="telnet", dbname="test_ts") + res = tdSql.query('select * from test_ms', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000") + tdCom.createDb(api_type="restful") + + def openTstbTelnetTsCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts=0) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]: + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid combination of client/service time", res.text) + + + def idSeqCheckCase(self): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def idLetterCheckCase(self): + """ + check id param + eg: id and ID + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True) + self.resCmp(input_sql, stb_name) + + def noIdCheckCase(self): + """ + id not exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + for input_sql in [self.genLongSql(128)[0]]: + tdCom.cleanTb(type="restful") + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + for input_sql in [self.genLongSql(129)[0]]: + tdCom.cleanTb(type="restful") + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Table does not exist", res.text) + + def stbTbNameCheckCase(self): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") + for i in rstr: + input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"") + self.resCmp(input_sql, f'`{stb_name}`') + tdCom.restApiPost(f'drop table if exists `{stb_name}`') + + def idStartWithNumCheckCase(self): + """ + id is start with num + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb") + self.resCmp(input_sql, stb_name) + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(ts="now")[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid combination of client/service time", res.text) + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid combination of client/service time", res.text) + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid combination of client/service time", res.text) + + def tbnameCheckCase(self): + """ + check length 192 + check upper tbname + chech upper tag + length of stb_name tb_name <= 192 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + stb_name_192 = tdCom.getLongName(len=192, mode="letters") + tb_name_192 = tdCom.getLongName(len=192, mode="letters") + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) + self.resCmp(input_sql, stb_name) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + if self.smlChildTableName_value == "ID": + for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Table name too long", res.text) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' + else: + input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Table name too long", res.text) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8' + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name) + tdCom.restApiPost('drop table `Abcdffgg`') + + def tagNameLengthCheckCase(self): + """ + check tag name limit <= 62 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tag_name = tdCom.getLongName(61, "letters") + tag_name = f'T{tag_name}' + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f' + self.resCmp(input_sql, stb_name) + input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid column length", res.text) + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid operation", res.text) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + # i8 + for value in ["-127i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb(type="restful") + for value in ["-128i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + # i16 + tdCom.cleanTb(type="restful") + for value in ["-32767i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb(type="restful") + for value in ["-32768i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # i32 + tdCom.cleanTb(type="restful") + for value in ["-2147483647i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb(type="restful") + for value in ["-2147483648i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # i64 + tdCom.cleanTb(type="restful") + for value in ["-9223372036854775807i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb(type="restful") + for value in ["-9223372036854775808i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # f32 + tdCom.cleanTb(type="restful") + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb(type="restful") + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # f64 + tdCom.cleanTb(type="restful") + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 1.797693134862316*(10**308) + tdCom.cleanTb(type="restful") + for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(value=value)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # # binary + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + + tdCom.cleanTb(type="restful") + input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Table does not exist", res.text) + + # nchar + # * legal nchar could not be larger than 16374/4 + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + + tdCom.cleanTb(type="restful") + input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Table does not exist", res.text) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1, stb_name = self.genFullTypeSql(t0=i) + self.resCmp(input_sql1, stb_name) + input_sql2, stb_name = self.genFullTypeSql(value=i) + self.resCmp(input_sql2, stb_name) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(value="1s2i8")[0], + self.genFullTypeSql(value="1s2i16")[0], + self.genFullTypeSql(value="1s2i32")[0], + self.genFullTypeSql(value="1s2i64")[0], + self.genFullTypeSql(value="11.1s45f32")[0], + self.genFullTypeSql(value="11.1s45f64")[0], + ]: + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+{}|[]、「」:;'): + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"' + self.resCmp(input_sql1, input_sql1.split(" ")[0]) + self.resCmp(input_sql2, input_sql2.split(" ")[0]) + + def blankCheckCase(self): + ''' + check blank case + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', + f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', + f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" '] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + res = tdCom.schemalessApiPost(sql=input_sql_id, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("duplicated tag names", res.text) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + res = tdCom.schemalessApiPost(sql=input_sql_tag, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("duplicated tag names", res.text) + + ##### stb exist ##### + @tdCom.smlPass + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + @tdCom.smlPass + def tagColAddDupIDCheckCase(self): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + tdCom.createDb("test_update", db_update_tag=db_update_tag, api_type="restful") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + else: + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 1, True) + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + tdCom.createDb(api_type="restful") + + @tdCom.smlPass + def tagColAddCheckCase(self): + """ + check tag count add + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + print(res) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}' + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid operation", res.text) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(8, "letters") + tdCom.restApiPost(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + + lines = f'st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"\n\ +st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64\n\ +{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"\n\ +stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64\n\ +st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"\n\ +{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"\n\ +{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"\n\ +st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64\n\ +st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64' + + res = tdCom.schemalessApiPost(sql=lines, url_type="telnet") + tdSql.checkEqual(res.status_code, 200) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + long_sql = '' + stb_name = tdCom.getLongName(8, "letters") + tdCom.restApiPost(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + long_sql += f'{input_sql}\n' + res = tdCom.schemalessApiPost(sql=long_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 200) + tdSql.query('show tables') + tdSql.checkRows(count) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(8, "letters") + lines = f"st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"\n\ + {stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\"" + res = tdCom.schemalessApiPost(sql=lines, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid combination of client/service time", res.text) + + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(c_multi_tag=True)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Syntax error in Line", res.text) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Invalid value in client", res.text) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + print(input_sql) + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Syntax error in Line", res.text) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + res = tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + tdSql.checkEqual(res.status_code, 500) + tdSql.checkIn("Syntax error in Line", res.text) + + def spellCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64'] + for input_sql in input_sql_list: + stb_name = input_sql.split(' ')[0] + self.resCmp(input_sql, stb_name) + + def pointTransCheckCase(self): + """ + metric value "." trans to "_" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genFullTypeSql(point_trans_tag=True)[0] + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name) + tdCom.restApiPost("drop table `.point.trans.test`") + + def defaultTypeCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + + def tbnameTagsColsNameCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + if self.smlChildTableName_value == "ID": + input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', 'value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) + else: + input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', 'value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1']) + tdCom.restApiPost('drop table `rFa$sta`') + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=tdCom.schemalessApiPost,args=(insert_sql[0], "telnet")) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql = self.genSqlList()[0] + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + print(s_stb_s_tb_m_tag_list) + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb(type="restful") + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def test(self): + try: + input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64' + tdCom.schemalessApiPost(sql=input_sql, url_type="telnet") + except SchemalessError as err: + print(err.errno) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + self.symbolsCheckCase() + self.tsCheckCase() + self.openTstbTelnetTsCheckCase() + self.idSeqCheckCase() + self.idLetterCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + self.stbTbNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tbnameCheckCase() + self.tagNameLengthCheckCase() + self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.blankCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + self.tagColNcharMaxLengthCheckCase() + self.batchInsertCheckCase() + self.multiInsertCheckCase(10) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.spellCheckCase() + self.pointTransCheckCase() + self.defaultTypeCheckCase() + self.tbnameTagsColsNameCheckCase() + # # MultiThreads + self.stbInsertMultiThreadCheckCase() + self.sStbStbDdataInsertMultiThreadCheckCase() + self.sStbStbDdataAtInsertMultiThreadCheckCase() + self.sStbStbDdataMtInsertMultiThreadCheckCase() + self.sStbDtbDdataInsertMultiThreadCheckCase() + self.sStbDtbDdataMtInsertMultiThreadCheckCase() + self.sStbDtbDdataAtInsertMultiThreadCheckCase() + self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() + self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + + def run(self): + print("running {}".format(__file__)) + tdCom.createDb(api_type="restful") + try: + self.runAll() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/timestamp.py b/tests/pytest/insert/timestamp.py new file mode 100644 index 0000000000000000000000000000000000000000..4c9cf36f40e31a792b550a557937d25d968f4fe2 --- /dev/null +++ b/tests/pytest/insert/timestamp.py @@ -0,0 +1,81 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.ts = 1607281690000 + + def run(self): + tdSql.prepare() + + # TS-806 + tdLog.info("test case for TS-806") + + # Case 1 + tdSql.execute("create table t1(ts timestamp, c1 int)") + tdSql.execute("insert into t1(c1, ts) values(1, %d)" % self.ts) + tdSql.query("select * from t1") + tdSql.checkRows(1) + + # Case 2 + tdSql.execute("insert into t1(c1, ts) values(2, %d)(3, %d)" % (self.ts + 1000, self.ts + 2000)) + tdSql.query("select * from t1") + tdSql.checkRows(3) + + # Case 3 + tdSql.execute("create table t2(ts timestamp, c1 timestamp)") + tdSql.execute(" insert into t2(c1, ts) values(%d, %d)" % (self.ts, self.ts + 5000)) + tdSql.query("select * from t2") + tdSql.checkRows(1) + + tdSql.execute(" insert into t2(c1, ts) values(%d, %d)(%d, %d)" % (self.ts, self.ts + 6000, self.ts + 3000, self.ts + 8000)) + tdSql.query("select * from t2") + tdSql.checkRows(3) + + # Case 4 + tdSql.execute("create table stb(ts timestamp, c1 int, c2 binary(20)) tags(tstag timestamp, t1 int)") + tdSql.execute("insert into tb1(c2, ts, c1) using stb(t1, tstag) tags(1, now) values('test', %d, 1)" % self.ts) + tdSql.query("select * from stb") + tdSql.checkRows(1) + + # Case 5 + tdSql.execute("insert into tb1(c2, ts, c1) using stb(t1, tstag) tags(1, now) values('test', now, 1) tb2(c1, ts) using stb tags(now + 2m, 1000) values(1, now - 1h)") + tdSql.query("select * from stb") + tdSql.checkRows(3) + + tdSql.execute(" insert into tb1(c2, ts, c1) using stb(t1, tstag) tags(1, now) values('test', now + 10s, 1) tb2(c1, ts) using stb(tstag) tags(now + 2m) values(1, now - 3h)(2, now - 2h)") + tdSql.query("select * from stb") + tdSql.checkRows(6) + + # Case 6 + tdSql.execute("create table stb2 (ts timestamp, c1 timestamp, c2 timestamp) tags(t1 timestamp, t2 timestamp)") + tdSql.execute(" insert into tb4(c1, c2, ts) using stb2(t2, t1) tags(now, now + 1h) values(now + 1s, now + 2s, now + 3s)(now -1s, now - 2s, now - 3s) tb5(c2, ts, c1) using stb2(t2) tags(now + 1h) values(now, now, now)") + tdSql.query("select * from stb2") + tdSql.checkRows(3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/manualTest/TD-5114/continueCreateDn.py b/tests/pytest/manualTest/TD-5114/continueCreateDn.py index 9494ee5f3685d3ddaeb1848a58878d63fa7a54b6..e1d6f5193a3c3045f669815d8b98516d64def6e1 100644 --- a/tests/pytest/manualTest/TD-5114/continueCreateDn.py +++ b/tests/pytest/manualTest/TD-5114/continueCreateDn.py @@ -44,7 +44,7 @@ class TwoClients: # insert data with c connector for i in range(10): - os.system("taosdemo -f manualTest/TD-5114/insertDataDb3Replica2.json -y ") + os.system("%staosBenchmark -f manualTest/TD-5114/insertDataDb3Replica2.json -y ") # # check data correct tdSql.execute("show databases") tdSql.execute("use db3") diff --git a/tests/pytest/manualTest/manual_alter_block.py b/tests/pytest/manualTest/manual_alter_block.py index ccd98b1421400a765d85a35cf3a0b13b15f35f8e..14663b79cb9f604c251682263ab1189bc9e9c2b9 100644 --- a/tests/pytest/manualTest/manual_alter_block.py +++ b/tests/pytest/manualTest/manual_alter_block.py @@ -55,7 +55,7 @@ class TDTestCase: tdSql.checkData(0,9,3) #run taosdemo to occupy all cache, need to manually check memory consumption - os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) input("please check memory usage for taosd. After checking, press enter") #alter cache block to 8, then check alter @@ -64,7 +64,7 @@ class TDTestCase: tdSql.checkData(0,9,8) #run taosdemo to occupy all cache, need to manually check memory consumption - os.system("%staosdemo -f tools/taosdemoAllTest/manual_block2.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/manual_block2.json" % binPath) input("please check memory usage for taosd. After checking, press enter") ##expected result the peak memory consumption should increase by around 80MB = 5 blocks of cache diff --git a/tests/pytest/manualTest/manual_alter_comp.py b/tests/pytest/manualTest/manual_alter_comp.py index 6c3e0fc29606caae32b981c662daaacbd31b15be..978279c29a64ee4f413a5eef79fb7a48e359fc3b 100644 --- a/tests/pytest/manualTest/manual_alter_comp.py +++ b/tests/pytest/manualTest/manual_alter_comp.py @@ -74,7 +74,7 @@ class TDTestCase: tdSql.query('show databases') tdSql.execute('alter database db blocks 3') # minimize the data in cache tdSql.checkData(0,14,2) - os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data/vnode") print('comp = 2') input("please check disk usage for taosd. After checking, press enter") @@ -91,7 +91,7 @@ class TDTestCase: tdSql.execute('alter database db comp 0') tdSql.query('show databases') tdSql.checkData(0,14,0) - os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data") print('comp = 0') input("please check disk usage for taosd. After checking, press enter") @@ -108,7 +108,7 @@ class TDTestCase: tdSql.execute('alter database db comp 1') tdSql.query('show databases') tdSql.checkData(0,14,1) - os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data") print('comp = 1') input("please check disk usage for taosd. After checking, press enter") diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py index e4480df6b6753df88e3526930ac4bee087264d35..2f17e0bd3ada7ece5f8544033192758fd4747b52 100644 --- a/tests/pytest/perfbenchmark/bug3433.py +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -211,7 +211,7 @@ class TDTestCase: tdLog.info(f"taosd found in {buildPath}") binPath = buildPath + "/build/bin/" - create_table_cmd = f"{binPath}taosdemo -f {filepath} > /dev/null 2>&1" + create_table_cmd = f"{binPath}taosBenchmark -f {filepath} > /dev/null 2>&1" _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") def droptmpfile(self): diff --git a/tests/pytest/perfbenchmark/bug3589.py b/tests/pytest/perfbenchmark/bug3589.py index c54ef8595d0bea1c4984b3f90e282f09659576c3..e01bb998d8b59fd06c65288c520b08d932ae8447 100644 --- a/tests/pytest/perfbenchmark/bug3589.py +++ b/tests/pytest/perfbenchmark/bug3589.py @@ -89,7 +89,7 @@ class TDTestCase: tdLog.info(f"taosd found in {buildPath}") binPath = buildPath + "/debug/build/bin/" - query_table_cmd = f"yes | {binPath}taosdemo -f {filepath}" + query_table_cmd = f"yes | {binPath}taosBenchmark -f {filepath}" _ = subprocess.check_output(query_table_cmd, shell=True).decode("utf-8") def checkqueryresult(self, expectrows): diff --git a/tests/pytest/query/nestedQuery/nestedQuery.py b/tests/pytest/query/nestedQuery/nestedQuery.py index 9260aced9a1d79649802917daad8f36ec9cf249e..89751bb7b808002b42e09d4a6bee2ef16e7ac775 100755 --- a/tests/pytest/query/nestedQuery/nestedQuery.py +++ b/tests/pytest/query/nestedQuery/nestedQuery.py @@ -2220,7 +2220,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # regualr-table - os.system("%staosdemo -N -d regular -t 2 -n 1000 -l 4095 -y" % binPath) + os.system("%staosBenchmark -N -d regular -t 2 -n 1000 -l 4095 -y" % binPath) tdSql.execute("use regular") tdSql.query("select * from d0;") tdSql.checkCols(4096) @@ -2281,7 +2281,7 @@ class TDTestCase: tdSql.checkRows(1000) #stable - os.system("%staosdemo -d super -t 2 -n 1000 -l 4093 -y" % binPath) + os.system("%staosBenchmark -d super -t 2 -n 1000 -l 4093 -y" % binPath) tdSql.execute("use super") tdSql.query("select * from meters;") tdSql.checkCols(4096) diff --git a/tests/pytest/query/nestedQuery/nestedQueryJson.py b/tests/pytest/query/nestedQuery/nestedQueryJson.py index 36a231a9165a15cb46cbc0c1d37152f90e54b03e..fa12713f3603e80329416ce2edf9f518d7f91cc3 100644 --- a/tests/pytest/query/nestedQuery/nestedQueryJson.py +++ b/tests/pytest/query/nestedQuery/nestedQueryJson.py @@ -49,7 +49,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath) + os.system("%staosBenchmark -f query/nestedQuery/insertData.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1000) diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py index 66a7f85120fe13293996d1bd3153b6fe9b1d6a72..186b3bfe1d1d06c4210c950fff097cb37a73d5df 100644 --- a/tests/pytest/query/queryCnameDisplay.py +++ b/tests/pytest/query/queryCnameDisplay.py @@ -79,16 +79,22 @@ class TDTestCase: tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, false, "bb");') # select as cname with cname_list - sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' - sql_seq_no_as = sql_seq.replace(' as ', ' ') - res = tdSql.getColNameList(sql_seq) - res_no_as = tdSql.getColNameList(sql_seq_no_as) + sql_seq1 = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from super_table_cname_check' + sql_seq2 = f'select count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' + sql_seq_no_as1 = sql_seq1.replace(' as ', ' ') + sql_seq_no_as2 = sql_seq2.replace(' as ', ' ') + res1 = tdSql.getColNameList(sql_seq1) + res2 = tdSql.getColNameList(sql_seq2) + res_no_as1 = tdSql.getColNameList(sql_seq_no_as1) + res_no_as2 = tdSql.getColNameList(sql_seq_no_as2) # cname[1] > 64, it is expected to be equal to 64 cname_list_1_expected = cname_list[1][:-1] cname_list[1] = cname_list_1_expected - checkColNameList = tdSql.checkColNameList(res, cname_list) - checkColNameList = tdSql.checkColNameList(res_no_as, cname_list) + tdSql.checkColNameList(res1, cname_list[:10]) + tdSql.checkColNameList(res2, cname_list[10:]) + tdSql.checkColNameList(res_no_as1, cname_list[:10]) + tdSql.checkColNameList(res_no_as2, cname_list[10:]) def run(self): tdSql.prepare() diff --git a/tests/pytest/query/queryLike.py b/tests/pytest/query/queryLike.py index 2bcd5906a8eaa505e1702cefce7f8b2594f53f43..b3916ed84db1d558e4b95f62c2def19deee75944 100644 --- a/tests/pytest/query/queryLike.py +++ b/tests/pytest/query/queryLike.py @@ -92,13 +92,13 @@ class TDTestCase: tdSql.query("select * from st where tbname like 'tb_\_'") tdSql.checkRows(1) tdSql.query("select * from st where tbname like 'tb___'") - tdSql.checkRows(4) - tdSql.query("select * from st where tbname like 'tb_\__'") tdSql.checkRows(3) + tdSql.query("select * from st where tbname like 'tb_\__'") + tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb_\_\_'") tdSql.checkRows(1) tdSql.query("select * from st where tbname like 'tb\__\_'") - tdSql.checkRows(1) + tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb\__\__'") tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb\__\_\_'") @@ -116,9 +116,9 @@ class TDTestCase: tdSql.query("select * from st where name like 'tbname\__';") tdSql.checkRows(3) tdSql.query("select * from st where name like 'tbname___';") - tdSql.checkRows(4) - tdSql.query("select * from st where name like 'tbname_\__';") tdSql.checkRows(3) + tdSql.query("select * from st where name like 'tbname_\__';") + tdSql.checkRows(2) tdSql.query("select * from st where name like 'tbname_\_\_';") tdSql.checkRows(1) tdSql.query("select * from st where name like 'tbname\_\__';") @@ -132,7 +132,8 @@ class TDTestCase: tdSql.query("select * from st where name like 'tbname\_\__\_';") tdSql.checkRows(2) tdSql.query("select name from st where name like 'tbname\_\_\__';") - tdSql.checkData(0,0 "tbname____") + tdSql.checkRows(1) + tdSql.checkData(0,0, "tbname____") # check escape about tags tdSql.query("select * from st where tagg like 'tag\_';") @@ -142,9 +143,9 @@ class TDTestCase: tdSql.query("select * from st where tagg like 'tag\__';") tdSql.checkRows(3) tdSql.query("select * from st where tagg like 'tag___';") - tdSql.checkRows(4) - tdSql.query("select * from st where tagg like 'tag_\__';") tdSql.checkRows(3) + tdSql.query("select * from st where tagg like 'tag_\__';") + tdSql.checkRows(2) tdSql.query("select * from st where tagg like 'tag_\_\_';") tdSql.checkRows(1) tdSql.query("select * from st where tagg like 'tag\_\__';") @@ -158,7 +159,7 @@ class TDTestCase: tdSql.query("select * from st where tagg like 'tag\_\__\_';") tdSql.checkRows(2) tdSql.query("select * from st where tagg like 'tag\_\__\_';") - tdSql.checkData(0,0 "tag__a_") + tdSql.checkData(0,0, "tag__a_") os.system("rm -rf ./*.py.sql") diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py index 17022bdc41057bcb67e1530a2cb6d399bada20ff..a50b9cbf8afd7052a78e1f6ef85b8c464e816e71 100644 --- a/tests/pytest/query/querySort.py +++ b/tests/pytest/query/querySort.py @@ -97,7 +97,7 @@ class TDTestCase: self.checkColumnSorted(0, "desc") print("======= step 2: verify order for special column =========") - + tdSql.query("select tbcol1 from st order by ts desc") tdSql.query("select tbcol6 from st order by ts desc") @@ -122,6 +122,63 @@ class TDTestCase: (i, i)) self.checkColumnSorted(1, "desc") + # order by rules: https://jira.taosdata.com:18090/pages/viewpage.action?pageId=123455481 + tdSql.error("select tbcol1 from st order by 123") + tdSql.error("select tbcol1 from st order by tbname") + tdSql.error("select tbcol1 from st order by tagcol1") + tdSql.error("select tbcol1 from st order by ''") + tdSql.error("select top(tbcol1, 12) from st1 order by tbcol1,ts") + tdSql.error("select top(tbcol1, 12) from st order by tbcol1,ts,tbcol2") + tdSql.error("select top(tbcol1, 12) from st order by ts, tbcol1") + tdSql.error("select top(tbcol1, 2) from st1 group by tbcol1 order by tbcol2") + + fun_list = ['avg','count','twa','sum','stddev','leastsquares','min', + 'max','first','last','top','bottom','percentile','apercentile', + 'last_row','diff','spread','distinct'] + key = ['tbol','tagcol'] + for i in range(1,15): + for k in key: + for j in fun_list: + if j == 'leastsquares': + pick_func=j+'('+ k + str(i) +',1,1)' + elif j == 'top' or j == 'bottom' : continue + elif j == 'percentile' or j == 'apercentile': + pick_func=j+'('+ k + str(i) +',1)' + else: + pick_func=j+'('+ k + str(i) +')' + sql = 'select %s from st group by %s order by %s' % (pick_func , k+str(i), k+str(i)) + tdSql.error(sql) + sql = 'select %s from st6 group by %s order by %s ' % (pick_func , k+str(i), k+str(i)) + tdSql.error(sql) + + tdSql.query("select top(tbcol1, 2) from st1 group by tbcol2 order by tbcol2") + tdSql.query("select top(tbcol1, 12) from st order by tbcol1, ts") + + tdSql.query("select avg(tbcol1) from st group by tbname order by tbname") + tdSql.checkData(1, 0, 5.5) + tdSql.checkData(5, 1, "st6") + + tdSql.query("select top(tbcol1, 2) from st group by tbname order by tbname") + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 2, "st2") + + tdSql.query("select top(tbcol1, 12) from st order by tbcol1") + tdSql.checkData(1, 1, 9) + + tdSql.error("select top(tbcol1, 12) from st1 order by tbcol1,ts") + tdSql.error("select top(tbcol1, 12),tbname from st order by tbcol1,tbname") + + tdSql.query("select top(tbcol1, 12) from st group by tbname order by tbname desc") + tdSql.checkData(1, 2, "st10") + tdSql.checkData(10, 2, "st9") + + tdSql.query("select top(tbcol1, 2) from st group by tbname order by tbname desc,ts") + tdSql.checkData(1, 2, "st10") + tdSql.checkData(10, 2, "st5") + tdSql.checkData(0, 0, "2018-09-17 09:00:00.109") + tdSql.checkData(1, 0, "2018-09-17 09:00:00.110") + tdSql.checkData(2, 0, "2018-09-17 09:00:00.099") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py index 9b580a24acd145e2e90ae6ee513e946d72820f2c..8bfd71ef2788fdcf1fc0f2876b52934633638068 100644 --- a/tests/pytest/query/select_last_crash.py +++ b/tests/pytest/query/select_last_crash.py @@ -16,6 +16,8 @@ import taos from util.log import * from util.cases import * from util.sql import * +from util.dnodes import * +import random class TDTestCase: @@ -41,6 +43,32 @@ class TDTestCase: tdSql.query("select last(*) from st") tdSql.checkRows(1) + + # TS-717 + tdLog.info("case for TS-717") + cachelast_values = [0, 1, 2, 3] + + for value in cachelast_values: + tdLog.info("case for cachelast value: %d" % value) + tdSql.execute("drop database if exists db") + tdLog.sleep(1) + tdSql.execute("create database db cachelast %d" % value) + tdSql.execute("use db") + tdSql.execute("create table stb(ts timestamp, c1 int, c2 binary(20), c3 binary(5)) tags(t1 int)") + + sql = "insert into t1 using stb tags(1) (ts, c1, c2) values" + for i in range(self.rowNum): + sql += "(%d, %d, 'test')" % (self.ts + i, random.randint(1,100)) + tdSql.execute(sql) + + tdSql.query("select * from stb") + tdSql.checkRows(self.rowNum) + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select * from stb") + tdSql.checkRows(self.rowNum) def stop(self): tdSql.close() diff --git a/tests/pytest/query/unionAllTest.py b/tests/pytest/query/unionAllTest.py index d1e1bf4d3e11191be2875c464150793936acd065..eaef5d2d573ba3c0ab08ed7ede991d25913c2454 100644 --- a/tests/pytest/query/unionAllTest.py +++ b/tests/pytest/query/unionAllTest.py @@ -103,10 +103,58 @@ class TDTestCase: select count(*) as count, loc from st where ts between 1600000000000 and 1600000000010 group by loc''') tdSql.checkRows(6) + # https://jira.taosdata.com:18080/browse/TS-715 + tdLog.info("test case for TS-715") + sql = "" + + tdSql.execute("create table st2(ts timestamp, c1 int, c2 int, c3 int) tags(loc nchar(20))") + + for i in range(101): + if i == 0: + sql = "select last(*) from sub0 " + else: + sql += f"union all select last(*) from sub{i} " + + tdSql.execute("create table sub%d using st2 tags('nchar%d')" % (i, i)) + tdSql.execute("insert into sub%d values(%d, %d, %d, %d)(%d, %d, %d, %d)" % (i, self.ts + i, i, i, i,self.ts + i + 101, i + 101, i + 101, i + 101)) + + tdSql.error(sql) + + # TS-795 + tdLog.info("test case for TS-795") + + functions = ["*", "count", "avg", "twa", "irate", "sum", "stddev", "leastsquares", "min", "max", "first", "last", "top", "bottom", "percentile", "apercentile", "last_row"] + + for func in functions: + expr = func + if func == "top" or func == "bottom": + expr += "(c1, 1)" + elif func == "percentile" or func == "apercentile": + expr += "(c1, 0.5)" + elif func == "leastsquares": + expr = func + "(c1, 1, 1)" + elif func == "*": + expr = func + else: + expr += "(c1)" + + for i in range(100): + if i == 0: + sql = f"select {expr} from sub0 " + else: + sql += f"union all select {expr} from sub{i} " + + tdSql.query(sql) + if func == "*": + tdSql.checkRows(200) + else: + tdSql.checkRows(100) + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stable/json_tag.rawsql b/tests/pytest/stable/json_tag.rawsql new file mode 100644 index 0000000000000000000000000000000000000000..222a3b784fa1a81d10e2197e2224ec5f9d209b94 --- /dev/null +++ b/tests/pytest/stable/json_tag.rawsql @@ -0,0 +1,163 @@ +create database db_json_tag_test; +drop table if exists db_json_tag_test.jsons1; +drop table if exists db_json_tag_test.jsons2; +drop table if exists db_json_tag_test.jsons3; +drop table if exists db_json_tag_test.jsons1_1; +drop table if exists db_json_tag_test.jsons1_2; +drop table if exists db_json_tag_test.jsons1_3; +drop table if exists db_json_tag_test.jsons1_4; +drop table if exists db_json_tag_test.jsons1_5; +drop table if exists db_json_tag_test.jsons1_6; +drop table if exists db_json_tag_test.jsons1_7; +drop table if exists db_json_tag_test.jsons1_8; +drop table if exists db_json_tag_test.jsons1_9; +drop table if exists db_json_tag_test.jsons1_10; +drop table if exists db_json_tag_test.jsons1_11; +drop table if exists db_json_tag_test.jsons1_12; +drop table if exists db_json_tag_test.jsons1_13; +drop table if exists db_json_tag_test.jsons1_20; +drop table if exists db_json_tag_test.jsons1_21; +drop table if exists db_json_tag_test.jsons1_22; +create table if not exists db_json_tag_test.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50)) tags(jtag json); +CREATE TABLE if not exists db_json_tag_test.jsons1_1 using db_json_tag_test.jsons1 tags('{"loc":"fff","id":5}'); +insert into db_json_tag_test.jsons1_2 using db_json_tag_test.jsons1 tags('{"num":5,"location":"beijing"}') values (now, 2, true, 'json2'); +insert into db_json_tag_test.jsons1_1 values(now, 1, false, 'json1'); +insert into db_json_tag_test.jsons1_3 using db_json_tag_test.jsons1 tags('{"num":34,"location":"beijing","level":"l1"}') values (now, 3, false 'json3'); +insert into db_json_tag_test.jsons1_4 using db_json_tag_test.jsons1 tags('{"class":55,"location":"shanghai","name":"name4"}') values (now, 4, true, 'json4'); + +ALTER TABLE db_json_tag_test.jsons1_1 SET TAG jtag='{"sex":"femail","age":35, "isKey":true}'; +select jtag from db_json_tag_test.jsons1_1; + +select * from db_json_tag_test.jsons1; + +select jtag->'location' from db_json_tag_test.jsons1_2; + +select jtag->'location' from db_json_tag_test.jsons1; + +select jtag from db_json_tag_test.jsons1_1; + +# test json string value +select * from db_json_tag_test.jsons1 where jtag->'location'='beijing'; + +select * from db_json_tag_test.jsons1 where jtag->'location'!='beijing'; + +select jtag->'num' from db_json_tag_test.jsons1 where jtag->'level'='l1'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'class'>5 and jtag->'class'<9; tdSql.checkRows(0) + +select *,tbname from db_json_tag_test.jsons1 where jtag->'class'>5 and jtag->'class'<92; + +select * from db_json_tag_test.jsons1 where jtag?'sex' or jtag?'num'; + +select * from db_json_tag_test.jsons1 where jtag?'sex' or jtag?'numww'; + +select * from db_json_tag_test.jsons1 where jtag?'sex' and jtag?'num'; + +select jtag->'sex' from db_json_tag_test.jsons1 where jtag?'sex' or jtag?'num'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'location'='beijing'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'num'=5 or jtag?'sex'; + +select * from db_json_tag_test.jsons1 where tbname = 'jsons1_1'; + +select * from db_json_tag_test.jsons1 where tbname = 'jsons1_1' or jtag?'num'; + +select * from db_json_tag_test.jsons1 where tbname = 'jsons1_1' and jtag?'num'; + +select * from db_json_tag_test.jsons1 where tbname = 'jsons1_1' or jtag->'num'=5; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'location' like 'bei%'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'location' like 'bei%' and jtag->'location'='beijin'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'location' like 'bei%' or jtag->'location'='beijin'; + +select *,tbname from db_json_tag_test.jsons1 where jtag->'location' like 'bei%' and jtag->'num'=34; + +select *,tbname from db_json_tag_test.jsons1 where (jtag->'location' like 'bei%' or jtag->'num'=34) and jtag->'class'=55; + +select * from db_json_tag_test.jsons1 where jtag->'location' in ('beijing'); + +select * from db_json_tag_test.jsons1 where jtag->'num' in (5,34); + +select * from db_json_tag_test.jsons1 where jtag->'location' in ('shanghai') and jtag->'class'=55; + +select * from db_json_tag_test.jsons1 where jtag->'location' match 'jin$'; + +select * from db_json_tag_test.jsons1 where jtag->'location' match 'jin'; + +select * from db_json_tag_test.jsons1 where datastr match 'json' and jtag->'location' match 'jin'; + +CREATE TABLE if not exists db_json_tag_test.jsons1_5 using db_json_tag_test.jsons1 tags('\t'); +CREATE TABLE if not exists db_json_tag_test.jsons1_6 using db_json_tag_test.jsons1 tags(''); + +select jtag from db_json_tag_test.jsons1_6; + +CREATE TABLE if not exists db_json_tag_test.jsons1_7 using db_json_tag_test.jsons1 tags('{}'); +select jtag from db_json_tag_test.jsons1_7; + +CREATE TABLE if not exists db_json_tag_test.jsons1_8 using db_json_tag_test.jsons1 tags('null'); +select jtag from db_json_tag_test.jsons1_8; + +CREATE TABLE if not exists db_json_tag_test.jsons1_9 using db_json_tag_test.jsons1 tags('{"":4, "time":null}'); +select jtag from db_json_tag_test.jsons1_9; + +CREATE TABLE if not exists db_json_tag_test.jsons1_10 using db_json_tag_test.jsons1 tags('{"k1":"","k1":"v1","k2":true,"k3":false,"k4":55}'); +select jtag from db_json_tag_test.jsons1_10; + +select jtag->'k2' from db_json_tag_test.jsons1_10; + +select jtag from db_json_tag_test.jsons1 where jtag->'k1'=''; + +select jtag from db_json_tag_test.jsons1 where jtag->'k2'=true; + +select jtag from db_json_tag_test.jsons1 where jtag is null; + +select jtag from db_json_tag_test.jsons1 where jtag is not null; + +select * from db_json_tag_test.jsons1 where jtag->'location' is not null; + +select tbname,jtag from db_json_tag_test.jsons1 where jtag->'location' is null; + +select * from db_json_tag_test.jsons1 where jtag->'num' is not null; + +select * from db_json_tag_test.jsons1 where jtag->'location'='null'; + +select distinct jtag from db_json_tag_test.jsons1; + +select distinct jtag->'location' from db_json_tag_test.jsons1; + +CREATE TABLE if not exists db_json_tag_test.jsons1_11 using db_json_tag_test.jsons1 tags('{"k1":"中国","k5":"是是是"}'); + +select tbname,jtag from db_json_tag_test.jsons1 where jtag->'k1' match '中'; + +select tbname,jtag from db_json_tag_test.jsons1 where jtag->'k1'='中国'; + +INSERT INTO db_json_tag_test.jsons1_12 using db_json_tag_test.jsons1 tags('{"tbname":"tt","databool":true,"dataStr":"是是是"}') values(now, 4, false, "你就会;; + +select *,tbname,jtag from db_json_tag_test.jsons1 where jtag->'dataStr' match '是'; + +select tbname,jtag->'tbname' from db_json_tag_test.jsons1 where jtag->'tbname'='tt'; + +select *,tbname,jtag from db_json_tag_test.jsons1 where dataBool=true; + +CREATE TABLE if not exists db_json_tag_test.jsons1_13 using db_json_tag_test.jsons1 tags('{"1loc":"fff",";id":5}'); + +create table if not exists db_json_tag_test.jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50)) tags(jtag json); +create table if not exists db_json_tag_test.jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50)) tags(jtag json); +CREATE TABLE if not exists db_json_tag_test.jsons2_1 using db_json_tag_test.jsons2 tags('{"loc":"fff","id":5}'); +insert into db_json_tag_test.jsons3_1 using db_json_tag_test.jsons3 tags('{"loc":"fff","num":5,"location":"beijing"}') values ('2020-04-18 15:00:00.000', 2, true, 'json2'); +insert into db_json_tag_test.jsons2_1 values('2020-04-18 15:00:00.000', 1, false, 'json1'); +select 'sss',33,a.jtag->'loc' from db_json_tag_test.jsons2 a,db_json_tag_test.jsons3 b where a.ts=b.ts and a.jtag->'loc'=b.jtag->'loc'; + +select avg(dataint),count(*) from db_json_tag_test.jsons1 group by jtag->'location' order by jtag->'location' desc; +INSERT INTO db_json_tag_test.jsons1_20 using db_json_tag_test.jsons1 tags('{"tagint":1}') values(now, 1, false, "你就会;; +INSERT INTO db_json_tag_test.jsons1_21 using db_json_tag_test.jsons1 tags('{"tagint":11}') values(now, 11, false, "你就会;; +INSERT INTO db_json_tag_test.jsons1_22 using db_json_tag_test.jsons1 tags('{"tagint":2}') values(now, 2, false, "你就会;; +select avg(dataint),count(*) from db_json_tag_test.jsons1 group by jtag->'tagint' order by jtag->'tagint' desc; +select avg(dataint),count(*) from db_json_tag_test.jsons1 group by jtag->'tagint' order by jtag->'tagint'; +insert into db_json_tag_test.jsons1_9 values('2020-04-17 15:20:00.000', 5, false, 'json19'); +select * from db_json_tag_test.jsons1; +select * from db_json_tag_test.jsons1 where jtag->'time' is null; +select * from db_json_tag_test.jsons1 where jtag->'time'=null; \ No newline at end of file diff --git a/tests/pytest/stable/json_tag2.rawsql b/tests/pytest/stable/json_tag2.rawsql new file mode 100644 index 0000000000000000000000000000000000000000..6b1420b60d66930d53f3cf2c1fca1724820ab1fe --- /dev/null +++ b/tests/pytest/stable/json_tag2.rawsql @@ -0,0 +1,137 @@ +drop database db; +create database db; +use db; +create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json) +insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json') +insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss') +insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe') +insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd') +insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe') +insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','') +insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws') + +CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90}') + +insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw') +CREATE TABLE if not exists jsons1_10 using jsons1 tags('') +CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ') +CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}') +CREATE TABLE if not exists jsons1_13 using jsons1 tags('null') + +ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}' + + +select dataint from jsons1 + +select * from jsons1 +select jtag from jsons1 +select jtag from jsons1 where jtag is null +select jtag from jsons1 where jtag is not null +select jtag from jsons1_8 +select jtag from jsons1_1 +select jtag from jsons1_9 +select jtag->'tag1' from jsons1_1 +select jtag->'tag2' from jsons1_6 +select jtag->'tag2' from jsons1_1 +select jtag->'tag3' from jsons1_1 +select jtag->'tag1' from jsons1_4 +select jtag->'tag1' from jsons1_5 +select jtag->'tag10' from jsons1_4 + +#select jtag->'tag1' from jsons1 +#select * from jsons1 where jtag->'tag2'='beijing' +#select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' +select * from jsons1 where jtag->'tag1'='beijing' +select * from jsons1 where jtag->'tag1'='收到货' +select * from jsons1 where jtag->'tag2'>'beijing' +select * from jsons1 where jtag->'tag2'>='beijing' +select * from jsons1 where jtag->'tag2'<'beijing' +select * from jsons1 where jtag->'tag2'<='beijing' +select * from jsons1 where jtag->'tag2'!='beijing' +select * from jsons1 where jtag->'tag2'='' + +select * from jsons1 where jtag->'tag1'=5 +select * from jsons1 where jtag->'tag1'=10 +select * from jsons1 where jtag->'tag1'<54 +select * from jsons1 where jtag->'tag1'<=11 +select * from jsons1 where jtag->'tag1'>4 +select * from jsons1 where jtag->'tag1'>=5 +select * from jsons1 where jtag->'tag1'!=5 +select * from jsons1 where jtag->'tag1'!=55 + +select * from jsons1 where jtag->'tag1'=1.232 +select * from jsons1 where jtag->'tag1'<1.232 +select * from jsons1 where jtag->'tag1'<=1.232 +select * from jsons1 where jtag->'tag1'>1.23 +select * from jsons1 where jtag->'tag1'>=1.232 +select * from jsons1 where jtag->'tag1'!=1.232 +select * from jsons1 where jtag->'tag1'!=3.232 + +select * from jsons1 where jtag->'tag1'=true +select * from jsons1 where jtag->'tag1'=false +select * from jsons1 where jtag->'tag1'!=false +select * from jsons1 where jtag->'tag1'=null +select * from jsons1 where jtag is null +select * from jsons1 where jtag is not null + +select * from jsons1 where jtag->'tag_no_exist'=3 +select * from jsons1 where jtag->'tag1' is null +select * from jsons1 where jtag->'tag4' is null +select * from jsons1 where jtag->'tag3' is not null + +select * from jsons1 where jtag conatins 'tag1' +select * from jsons1 where jtag conatins 'tag3' +select * from jsons1 where jtag conatins 'tag_no_exist' + +select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing' +select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing' +select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai' +select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai' +select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35 +select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35 +select * from jsons1 where jtag->'tag1' is not null and jtag conatins 'tag3' +select * from jsons1 where jtag->'tag1'='femail' and jtag conatins 'tag3' +select * from jsons1 where tbname = 'jsons1_1' +select * from jsons1 where tbname = 'jsons1_1' and jtag conatins 'tag3' +select * from jsons1 where tbname = 'jsons1_1' and jtag conatins 'tag3' and dataint=3 +select * from jsons1 where tbname = 'jsons1_1' and jtag conatins 'tag3' and dataint=23 + +select *,tbname from jsons1 where jtag->'tag2' like 'bei%' +select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null + +select * from jsons1 where jtag->'tag1' match 'ma' +select * from jsons1 where jtag->'tag1' match 'ma$' +select * from jsons1 where jtag->'tag2' match 'jing$' +select * from jsons1 where jtag->'tag1' match '收到' + +insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws') +select distinct jtag->'tag1' from jsons1 +select distinct jtag from jsons1 + +INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\") +select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js' +select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14' + +create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json) +insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2') +insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss') + +create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json) +insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3') +insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss') +select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1' + + +select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc + +select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc + +select stddev(dataint) from jsons1 group by jtag->'tag1' + +select top(dataint,100) from jsons1 group by jtag->'tag1' + +select * from (select jtag, dataint from jsons1) + +select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1) + +select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts) diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index 9b4eb8725c96f95196f251c55b0b773cd68e9ed5..95f5d233855bbb743eaf0390d6c145258fd8f84d 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -153,10 +153,14 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkData(0, 2, 's1') tdSql.checkData(1, 2, 's0') + tdSql.execute('kill stream %s ;' % tdSql.queryResult[0][0]) + time.sleep(5) + tdSql.query("show streams") + tdSql.checkRows(1) def stop(self): - tdSql.close() + #tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/table/create_db_from_normal_db.py b/tests/pytest/table/create_db_from_normal_db.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5182c3b16ca31b2bbf966df294e2c4e4c12ff3 --- /dev/null +++ b/tests/pytest/table/create_db_from_normal_db.py @@ -0,0 +1,45 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("test case for TS-783") + tdSql.execute("drop table if exists db.state1;") + tdSql.execute("create table db.state1 (ts timestamp, c1 int);") + tdSql.error("create table db.test1 using db.state1 tags('tt');") + + tdSql.execute("drop table if exists db.state2;") + tdSql.execute("create table db.state2 (ts timestamp, c1 int) tags (t binary(20));") + tdSql.query("create table db.test2 using db.state2 tags('tt');") + tdSql.error("create table db.test22 using db.test2 tags('tt');") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/alter_tag.py b/tests/pytest/tag_lite/alter_tag.py index 9e5abb6c134840ecb4ab52c7d3a6ab623885e12b..c4d738e4cf0d9bd63f7813e9d267080f9f045fac 100644 --- a/tests/pytest/tag_lite/alter_tag.py +++ b/tests/pytest/tag_lite/alter_tag.py @@ -30,7 +30,7 @@ class TDTestCase: print("==============step1") tdSql.execute( - "CREATE TABLE IF NOT EXISTS ampere (ts TIMESTAMP(8),ampere DOUBLE(8)) TAGS (device_name BINARY(50),build_id BINARY(50),project_id BINARY(50),alias BINARY(50))") + "CREATE TABLE IF NOT EXISTS ampere (ts TIMESTAMP,ampere DOUBLE) TAGS (device_name BINARY(50),build_id BINARY(50),project_id BINARY(50),alias BINARY(50))") tdSql.execute("insert into d1001 using ampere tags('test', '2', '2', '2') VALUES (now, 123)") tdSql.execute("ALTER TABLE ampere ADD TAG variable_id BINARY(50)") diff --git a/tests/pytest/tag_lite/json_tag_extra.py b/tests/pytest/tag_lite/json_tag_extra.py deleted file mode 100644 index 40ee69d46b770a33a8255783f675d5071513bc28..0000000000000000000000000000000000000000 --- a/tests/pytest/tag_lite/json_tag_extra.py +++ /dev/null @@ -1,375 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, db_test.stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -import time -import random - -class TDTestCase: - - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tdSql.prepare() - - print("==============step1 tag format =======") - tdLog.info("create database two stables and ") - tdSql.execute("create database db_json_tag_test") - tdSql.execute("use db_json_tag_test") - # test tag format - tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(128))") - tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(64),jtag1 json(100))") - tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(64),dataBool bool)") - - tdSql.execute("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":\"fff\",\"id\":5}')") - tdSql.execute("use db_json_tag_test") - - - # two stables: jsons1 jsons2 ,test tag's value and key - tdSql.execute("insert into jsons1_1(ts,dataInt) using jsons1 tags('{\"loc+\":\"fff\",\"id\":5}') values (now,12)") - - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{oc:\"fff\",\"id\":5}')") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":fff,\"id\":5}')") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('3333')") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":}')") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":bool)") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags(true)") - tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('[{\"num\":5}]')") - - # test object and key max length. max key length is 256, max object length is 4096 include abcd. - tdSql.execute("create table if not exists jsons4(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(128))") - - char1= ''.join(['abcd']*64) - char2=''.join(char1) - char3= ''.join(['abcd']*1022) - print(len(char3)) # 4088 - tdSql.execute("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s\":5}')" % char1) # len(key)=256 - tdSql.error("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s1\":5}')" % char2) # len(key)=257 - tdSql.execute("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"T\":\"%s\"}')" % char3) # len(object)=4096 - tdSql.error("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"TS\":\"%s\"}')" % char3) # len(object)=4097 - - tdSql.execute("insert into jsons1_1 values(now, 1, 'json1')") - tdSql.execute("insert into jsons1_1 values(now+1s, 1, 'json1')") - tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"num\":5,\"location\":\"beijing\"}') values (now, 1, 'json2')") - tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"num\":34,\"location\":\"beijing\",\"level\":\"l1\"}') values (now, 1, 'json3')") - tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"class\":55,\"location\":\"beijing\",\"name\":\"name4\"}') values (now, 1, 'json4')") - - # test : json'vaule is null and - tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt2 int, dataStr2 nchar(50)) tags(jtag2 json(300))") - tdSql.execute("CREATE TABLE if not exists jsons2_1 using jsons2 tags('{}')") - tdSql.query("select jtag2 from jsons2_1") - tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons2_2 using jsons2 tags('')") - tdSql.query("select jtag2 from jsons2_2") - tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons2_3 using jsons2 tags('null')") - tdSql.query("select jtag2 from jsons2_3") - tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons2_4 using jsons2 tags('\t')") - tdSql.query("select jtag2 from jsons2_4") - tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons2_5 using jsons2 tags(' ')") - tdSql.query("select jtag2 from jsons2_5") - tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons2_6 using jsons2 tags('{\"nv\":null,\"tea\":true,\"\":false,\"\":123,\"tea\":false}')") - tdSql.query("select jtag2 from jsons2_6") - tdSql.checkData(0, 0, "{\"tea\":true}") - tdSql.error("CREATE TABLE if not exists jsons2_7 using jsons2 tags('{\"nv\":null,\"tea\":123,\"\":false,\"\":123,\"tea\":false}')") - tdSql.execute("CREATE TABLE if not exists jsons2_7 using jsons2 tags('{\"test7\":\"\"}')") - tdSql.query("select jtag2 from jsons2_7") - tdSql.checkData(0, 0, "{\"test7\":\"\"}") - - print("==============step2 alter json table==") - tdLog.info("alter stable add tag") - tdSql.error("ALTER STABLE jsons2 add tag jtag3 nchar(20)") - tdSql.error("ALTER STABLE jsons2 drop tag jtag2") - tdSql.execute("ALTER STABLE jsons2 change tag jtag2 jtag3") - tdSql.query("select jtag3 from jsons2_6") - tdSql.checkData(0, 0, "{\"tea\":true}") - tdSql.error("ALTER TABLE jsons2_6 SET TAG jtag3='{\"tea-=[].;!@#$%^&*()/\":}'") - tdSql.execute("ALTER TABLE jsons2_6 SET TAG jtag3='{\"tea-=[].;!@#$%^&*()/\":false}'") - tdSql.query("select jtag3 from jsons2_6") - tdSql.checkData(0, 0, "{\"tea-=[].;!@#$%^&*()/\":false}") - tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"sex\":\"femail\",\"age\":35}'") - tdSql.query("select jtag from jsons1_1") - tdSql.checkData(0, 0, "{\"sex\":\"femail\",\"age\":35}") - - - - print("==============step3") - tdLog.info("select table") - - tdSql.query("select jtag from jsons1_1") - tdSql.checkData(0, 0, "{\"sex\":\"femail\",\"age\":35}") - - tdSql.query("select jtag from jsons1 where jtag->'name'='name4'") - tdSql.checkData(0, 0, "{\"class\":55,\"location\":\"beijing\",\"name\":\"name4\"}") - - - tdSql.query("select * from jsons1") - tdSql.checkRows(6) - - tdSql.query("select * from jsons1_1") - tdSql.checkRows(3) - - tdSql.query("select * from jsons1 where jtag->'location'='beijing'") - tdSql.checkRows(3) - - tdSql.query("select jtag->'location' from jsons1_2") - tdSql.checkData(0, 0, "beijing") - - - tdSql.query("select jtag->'num' from jsons1 where jtag->'level'='l1'") - tdSql.checkData(0, 0, 34) - - tdSql.query("select jtag->'location' from jsons1") - tdSql.checkRows(4) - - tdSql.query("select jtag from jsons1_1") - tdSql.checkRows(1) - - tdSql.query("select * from jsons1 where jtag?'sex' or jtag?'num'") - tdSql.checkRows(5) - - tdSql.query("select * from jsons1 where jtag?'sex' and jtag?'num'") - tdSql.checkRows(0) - - tdSql.query("select jtag->'sex' from jsons1 where jtag?'sex' or jtag?'num'") - tdSql.checkData(0, 0, "femail") - tdSql.checkRows(3) - - tdSql.query("select *,tbname from jsons1 where jtag->'location'='beijing'") - tdSql.checkRows(3) - - tdSql.query("select *,tbname from jsons1 where jtag->'num'=5 or jtag?'sex'") - tdSql.checkRows(4) - - # test with tbname - tdSql.query("select * from jsons1 where tbname = 'jsons1_1'") - tdSql.checkRows(3) - - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' or jtag?'num'") - tdSql.checkRows(5) - - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag?'num'") - tdSql.checkRows(0) - - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' or jtag->'num'=5") - tdSql.checkRows(4) - - # test where condition like - tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%'") - tdSql.checkRows(3) - - tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' and jtag->'location'='beijin'") - tdSql.checkRows(0) - - tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' or jtag->'location'='beijin'") - tdSql.checkRows(3) - - tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' and jtag->'num'=34") - tdSql.checkRows(1) - - tdSql.query("select *,tbname from jsons1 where (jtag->'location' like 'shanghai%' or jtag->'num'=34) and jtag->'class'=55") - tdSql.checkRows(0) - - tdSql.error("select * from jsons1 where jtag->'num' like '5%'") - - # test where condition in - tdSql.query("select * from jsons1 where jtag->'location' in ('beijing')") - tdSql.checkRows(3) - - tdSql.query("select * from jsons1 where jtag->'num' in (5,34)") - tdSql.checkRows(2) - - tdSql.error("select * from jsons1 where jtag->'num' in ('5',34)") - - tdSql.query("select * from jsons1 where jtag->'location' in ('beijing') and jtag->'class'=55") - tdSql.checkRows(1) - - # test where condition match - tdSql.query("select * from jsons1 where jtag->'location' match 'jin$'") - tdSql.checkRows(0) - - tdSql.query("select * from jsons1 where jtag->'location' match 'jin'") - tdSql.checkRows(3) - - tdSql.query("select * from jsons1 where datastr match 'json' and jtag->'location' match 'jin'") - tdSql.checkRows(3) - - tdSql.error("select * from jsons1 where jtag->'num' match '5'") - - # test json string parse - tdSql.error("CREATE TABLE if not exists jsons1_5 using jsons1 tags('efwewf')") - tdSql.execute("CREATE TABLE if not exists jsons1_5 using jsons1 tags('\t')") - tdSql.execute("CREATE TABLE if not exists jsons1_6 using jsons1 tags('')") - - tdSql.query("select jtag from jsons1_6") - tdSql.checkData(0, 0, None) - - tdSql.execute("CREATE TABLE if not exists jsons1_7 using jsons1 tags('{}')") - tdSql.query("select jtag from jsons1_7") - tdSql.checkData(0, 0, None) - - tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('null')") - tdSql.query("select jtag from jsons1_8") - tdSql.checkData(0, 0, None) - - tdSql.execute("CREATE TABLE if not exists jsons1_9 using jsons1 tags('{\"\":4, \"time\":null}')") - tdSql.query("select jtag from jsons1_9") - tdSql.checkData(0, 0, None) - - tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('{\"k1\":\"\",\"k1\":\"v1\",\"k2\":true,\"k3\":false,\"k4\":55}')") - tdSql.query("select jtag from jsons1_10") - tdSql.checkData(0, 0, "{\"k1\":\"\",\"k2\":true,\"k3\":false,\"k4\":55}") - - tdSql.query("select jtag->'k2' from jsons1_10") - tdSql.checkData(0, 0, "true") - - tdSql.query("select jtag from jsons1 where jtag->'k1'=''") - tdSql.checkRows(1) - - tdSql.query("select jtag from jsons1 where jtag->'k2'=true") - tdSql.checkRows(1) - - tdSql.query("select jtag from jsons1 where jtag is null") - tdSql.checkRows(5) - - tdSql.query("select jtag from jsons1 where jtag is not null") - tdSql.checkRows(5) - - tdSql.query("select * from jsons1 where jtag->'location' is not null") - tdSql.checkRows(3) - - tdSql.query("select tbname,jtag from jsons1 where jtag->'location' is null") - tdSql.checkRows(7) - - tdSql.query("select * from jsons1 where jtag->'num' is not null") - tdSql.checkRows(2) - - tdSql.query("select * from jsons1 where jtag->'location'='null'") - tdSql.checkRows(0) - - tdSql.error("select * from jsons1 where jtag->'num'='null'") - - # test distinct - tdSql.query("select distinct jtag from jsons1") - tdSql.checkRows(6) - - tdSql.query("select distinct jtag->'location' from jsons1") - tdSql.checkRows(2) - - # test chinese - tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags('{\"k1\":\"中国\",\"k5\":\"是是是\"}')") - - tdSql.query("select tbname,jtag from jsons1 where jtag->'k1' match '中'") - tdSql.checkRows(1) - - tdSql.query("select tbname,jtag from jsons1 where jtag->'k1'='中国'") - tdSql.checkRows(1) - - #test dumplicate key with normal colomn - tdSql.execute("INSERT INTO jsons1_12 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"dataStr\":\"是是是\"}') values(now, 4, \"你就会\")") - - tdSql.query("select *,tbname,jtag from jsons1 where jtag->'dataStr' match '是'") - tdSql.checkRows(1) - - tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt'") - tdSql.checkRows(1) - - # tdSql.query("select * from jsons1 where jtag->'num' is not null or jtag?'class' and jtag?'databool'") - # tdSql.checkRows(0) - - # tdSql.query("select * from jsons1 where jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%'") - - # tdSql.query("select * from jsons1 where datastr like '你就会' and ( jtag->'num' is not null or jtag?'class' and jtag?'databool' )") - - - tdSql.error("select * from jsons1 where datastr like '你就会' or jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%' ") - - # tdSql.query("select * from jsons1 where datastr like '你就会' and (jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%' )") - # tdSql.checkRows(0) - - tdSql.error("select *,tbname,jtag from jsons1 where dataBool=true") - - # test error - tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags(3333)") - tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"1loc\":\"fff\",\";id\":5}')") - tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"。loc\":\"fff\",\"fsd\":5}')") - tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"试试\":\"fff\",\";id\":5}')") - tdSql.error("insert into jsons1_13 using jsons1 tags(3)") - - # test query normal column - tdSql.execute("create stable if not exists jsons3(ts timestamp, dataInt3 int(100), dataBool3 bool, dataStr3 nchar(50)) tags(jtag3 json)") - tdSql.execute("create table jsons3_2 using jsons3 tags('{\"t\":true,\"t123\":123,\"\":\"true\"}')") - - tdSql.execute("create table jsons3_3 using jsons3 tags('{\"t\":true,\"t123\":456,\"k1\":true}')") - tdSql.execute("insert into jsons3_3 values(now, 4, true, 'test')") - - tdSql.execute("insert into jsons3_4 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":false,\"s\":null}') values(now, 5, true, 'test')") - tdSql.query("select * from jsons3 where jtag3->'k1'=true") - tdSql.checkRows(1) - tdSql.error("select jtag3->k1 from jsons3 ") - tdSql.error("select jtag3 from jsons3 where jtag3->'k1'") - tdSql.error("select jtag3 from jsons3 where jtag3?'k1'=true") - tdSql.error("select jtag3?'k1' from jsons3;") - tdSql.error("select jtag3?'k1'=true from jsons3;") - tdSql.error("select jtag3->'k1'=true from jsons3;") - tdSql.error("insert into jsons3_5 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":1,\"s\":null}') values(now, 5, true, 'test')") - tdSql.execute("insert into jsons3_5 using jsons3 tags('{\"t\":true,\"t123\":012,\"k1\":null,\"s\":null}') values(now, 5, true, 'test')") - tdSql.execute("insert into jsons3_6 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":false,\"s\":null}') values(now, 5, true, 'test')") - # tdSql.execute("select distinct jtag3 from jsons3 where jtag3->'t123'=12 or jtag3?'k1'") - # tdSql.checkRows(3) - - - tdSql.execute("INSERT INTO jsons1_14 using jsons1 tags('{\"tbname\":\"tt\",\"location\":\"tianjing\",\"dataStr\":\"是是是\"}') values(now,5, \"你就会\")") - - # tdSql.execute("select ts,dataint3,jtag->tbname from jsons1 where dataint>=1 and jtag->'location' in ('tianjing','123') and jtag?'tbname'") - # tdSql.checkRows(1) - # tdSql.checkData(0, 2, 'tt') - - # query normal column and tag column - tdSql.query("select jtag3->'',dataint3 from jsons3") - tdSql.checkRows(4) - - # query child table - - tdSql.error("select * from jsons3_2 where jtag3->'k1'=true;") - - # tdSql.checkData(0, 0, None) - # tdSql.checkRows(3) - - - - # # test drop tables and databases - # tdSql.execute("drop table jsons1_1") - # tdSql.execute("drop stable jsons1") - # tdSql.execute("drop stable jsons3") - # tdSql.execute("drop stable jsons2") - # tdSql.execute("drop database db_json_tag_test") - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py index b248eda6b0ce42d8cb681a74d5c807d0275e6e14..36221e4b7ff21b82ccf72451cfea8472952b622d 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py @@ -51,7 +51,7 @@ class TDTestCase: # insert: create one or mutiple tables per sql and insert multiple rows per sql # check the params of taosdemo about time_step is nano - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath) tdSql.execute("use testdb1") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -68,7 +68,7 @@ class TDTestCase: tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") # check the params of taosdemo about time_step is us - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath) tdSql.execute("use testdb2") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -85,7 +85,7 @@ class TDTestCase: tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") # check the params of taosdemo about time_step is ms - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath) tdSql.execute("use testdb3") tdSql.query("show stables") tdSql.checkData(0, 4, 100) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py index f069bb8f7030dbd8d4eec8c9c741d246f261671b..34acbb2c0112b56cee6a637b9e1fbd5ddb42ddf7 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -53,7 +53,7 @@ class TDTestCase: # check stable stb0 os.system( - "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % + "%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) tdSql.execute("use nsdb") tdSql.query("show stables") @@ -88,7 +88,7 @@ class TDTestCase: # check stable stb0 os.system( - "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % + "%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) tdSql.execute("use nsdb2") @@ -108,7 +108,7 @@ class TDTestCase: # cols os.system( - "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % + "%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) tdSql.execute("use nsdbcsv") tdSql.query("show stables") @@ -130,7 +130,7 @@ class TDTestCase: # taosdemo test insert with command and parameter , detals show # taosdemo --help os.system( - "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % + "%staosBenchmark -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) @@ -152,7 +152,7 @@ class TDTestCase: sleep(10) - os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) + os.system("%staosBenchmark -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py index 6c3e4d6c8a0d72a4b468cca22b2b1a6a25659db5..137cbe724310260254591c874e7bc0362f1e7f2f 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py @@ -1,4 +1,4 @@ -################################################################### +''################################################################### # Copyright (c) 2016 by TAOS Technologies, Inc. # All rights reserved. # @@ -49,7 +49,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # query: query test for nanoSecond with where and max min groupby order - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) tdSql.execute("use nsdb") @@ -92,10 +92,10 @@ class TDTestCase: # query : query above sqls by taosdemo and continuously - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) tdSql.execute("use nsdbcsv") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -140,7 +140,7 @@ class TDTestCase: tdSql.checkRows(10) # query : query above sqls by taosdemo and continuously - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) os.system("rm -rf ./query_res*.txt*") os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/*.py.sql") diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py index 95c1a731bcde4c78d0aa272183bd2e7b7a4b168b..44a25801210c3bc8e74c576626ec974eb20dc70b 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py @@ -73,8 +73,8 @@ class TDTestCase: # insert data - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) - os.system("nohup %staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath) query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py index d7926d6e5b5a3db80f3c66df0655266a5c673999..e498e74bf1fe72519676d6a329dab226d1e41ee4 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py @@ -56,7 +56,7 @@ class TDTestCase: #print("==============taosdemo,#create stable,table; insert table; show table; select table; drop table") self.tsdemo = "tsdemo~!.@#$%^*[]-_=+{,?.}" #this escape character is not support in shell . include & () <> | / - os.system("%staosdemo -d test -E -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) + os.system("%staosBenchmark -d test -E -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) tdSql.execute("use test ;" ) tdSql.query("select count(*) from meters") tdSql.checkData(0, 0, 1000) @@ -91,14 +91,14 @@ class TDTestCase: tdSql.error("select * from test.`%s2` ; " %self.tsdemo) # Exception - os.system("%staosdemo -d test -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) + os.system("%staosBenchmark -d test -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) tdSql.query("show test.tables ") tdSql.checkRows(0) #print("==============taosdemo,#create regular table; insert table; show table; select table; drop table") self.tsdemo = "tsdemo~!.@#$%^*[]-_=+{,?.}" #this escape character is not support in shell . include & () <> | / - os.system("%staosdemo -N -E -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) + os.system("%staosBenchmark -N -E -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) tdSql.execute("use test ;" ) tdSql.query("select count(*) from `%s1`" %self.tsdemo) tdSql.checkData(0, 0, 100) @@ -112,7 +112,7 @@ class TDTestCase: tdSql.checkRows(11) tdSql.query("show create table test.`%s1` ; " %self.tsdemo) tdSql.checkData(0, 0, self.tsdemo+str(1)) - tdSql.checkData(0, 1, "create table `%s1` (ts TIMESTAMP,c0 FLOAT,c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT)" %self.tsdemo) + tdSql.checkData(0, 1, "create table `%s1` (ts TIMESTAMP,c0 FLOAT,c1 INT,c2 FLOAT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT)" %self.tsdemo) print("==============drop table\stable") try: @@ -125,13 +125,13 @@ class TDTestCase: tdSql.checkRows(9) # Exception - os.system("%staosdemo -N -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) + os.system("%staosBenchmark -N -m %s -t 10 -n 100 -l 10 -y " % (binPath,self.tsdemo)) tdSql.query("show test.tables ") tdSql.checkRows(0) #print("==============taosdemo——json_yes,#create stable,table; insert table; show table; select table; drop table") - os.system("%staosdemo -f tools/taosdemoAllTest/TD-10539/create_taosdemo_yes.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-10539/create_taosdemo_yes.json -y " % binPath) tdSql.execute("use dbyes") self.tsdemo_stable = "tsdemo_stable~!.@#$%^*[]-_=+{,?.}" @@ -171,8 +171,8 @@ class TDTestCase: #print("==============taosdemo——json_no,#create stable,table; insert table; show table; select table; drop table") - assert os.system("%staosdemo -f tools/taosdemoAllTest/TD-10539/create_taosdemo_no.json -y " % binPath) == 0 - tdSql.query("show dbno.tables ") + os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-10539/create_taosdemo_no.json -y " % binPath) + tdSql.query("show dbno.tables;") tdSql.checkRows(0) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py index fe29409f296b310012773b9d78ca8735cfd52a13..e8e65b68b89c35f33e239b4121f4d99b84c796a0 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py @@ -52,13 +52,13 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # # insert 1000w rows in stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0,60) tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 6000000) - os.system('%staosdemo -f tools/taosdemoAllTest/TD-3453/queryall.json -y & ' % binPath) + os.system('%staosBenchmark -f tools/taosdemoAllTest/TD-3453/queryall.json -y & ' % binPath) time.sleep(2) query_pid = int(subprocess.getstatusoutput('ps aux|grep "TD-3453/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1]) taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py index f5e2d7ce08b4804d8c5ad9745e775f0fa1ebbc1b..987dd1bfa4771b505023bdeab78994ba488d671a 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -55,7 +55,7 @@ class TDTestCase: # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-4985 os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") - os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10000) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index c9c4ae2c1b650da99853d6c82106b3f6ee80d0c0..d6e3afdea31955992cc0c9cc8842bc6ae7c6e3f6 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -15,7 +15,7 @@ "max_sql_len": 102400000, "databases": [{ "dbinfo": { - "name": "json", + "name": "json_test", "drop": "yes", "replica": 1, "days": 10, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py index eb844b6fe24338b0301c45b918967faec7debcc0..56b51f5498aed0a540a86bf03625266ad3599b58 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -52,7 +52,7 @@ class TDTestCase: #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force #regular old && new startTime = time.time() - os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath) + os.system("%staosBenchmark -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath) tdSql.execute("use regular_old") tdSql.query("show tables;") tdSql.checkRows(1) @@ -61,7 +61,7 @@ class TDTestCase: tdSql.query("describe d0;") tdSql.checkRows(1024) - os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) + os.system("%staosBenchmark -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) tdSql.execute("use regular_new") tdSql.query("show tables;") tdSql.checkRows(1) @@ -71,7 +71,7 @@ class TDTestCase: tdSql.checkRows(4096) #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force - os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath) + os.system("%staosBenchmark -d super_old -t 1 -n 10 -l 1021 -y" % binPath) tdSql.execute("use super_old") tdSql.query("show tables;") tdSql.checkRows(1) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.query("describe d0;") tdSql.checkRows(1024) - os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath) + os.system("%staosBenchmark -d super_new -t 1 -n 10 -l 4093 -y" % binPath) tdSql.execute("use super_new") tdSql.query("show tables;") tdSql.checkRows(1) @@ -104,8 +104,8 @@ class TDTestCase: # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-5213 - os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) - tdSql.execute("use json") + os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) + tdSql.execute("use json_test") tdSql.query("select count (tbname) from stb_old") tdSql.checkData(0, 0, 1) diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index 0068a9c30463ff39d49cbd14d15b5d84747d0a59..d73719ebe41c5f25fc2cd585bc9974d9e83a946e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 11, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 1, @@ -61,7 +61,7 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 10, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 10, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-default.json b/tests/pytest/tools/taosdemoAllTest/insert-default.json new file mode 100644 index 0000000000000000000000000000000000000000..423f94819f6302a50835a74e7eb5bb06fbf58c94 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-default.json @@ -0,0 +1,16 @@ +{ + "filetype": "insert", + "databases": [{ + "dbinfo": { + "name": "db" + }, + "super_tables": [{ + "name": "stb0", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "insert_rows": 123, + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py index 077ced5d02c792b1c3344ea3e8b129038652b4b8..955545ac2cb2578089b705e67a97d99678182154 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py @@ -50,10 +50,10 @@ class TDTestCase: # insert: drop and child_table_exists combination test # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-newdb.json" % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit1.json & " % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit94.json & " % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit5.json & " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/moredemo-offset-newdb.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/moredemo-offset-limit1.json & " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/moredemo-offset-limit94.json & " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/moredemo-offset-limit5.json & " % binPath) sleep(15) tdSql.execute("use db") tdSql.query("select count(*) from stb0") diff --git a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv deleted file mode 100644 index 5fc779b41b44eda002d246d9554f0abcea03c8d3..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv +++ /dev/null @@ -1,100 +0,0 @@ -8.855,"binary_str0" ,1626870128248246976 -8.75,"binary_str1" ,1626870128249060032 -5.44,"binary_str2" ,1626870128249067968 -8.45,"binary_str3" ,1626870128249072064 -4.07,"binary_str4" ,1626870128249075904 -6.97,"binary_str5" ,1626870128249078976 -6.86,"binary_str6" ,1626870128249082048 -1.585,"binary_str7" ,1626870128249085120 -1.4,"binary_str8" ,1626870128249087936 -5.135,"binary_str9" ,1626870128249092032 -3.15,"binary_str10" ,1626870128249095104 -1.765,"binary_str11" ,1626870128249097920 -7.71,"binary_str12" ,1626870128249100992 -3.91,"binary_str13" ,1626870128249104064 -5.615,"binary_str14" ,1626870128249106880 -9.495,"binary_str15" ,1626870128249109952 -3.825,"binary_str16" ,1626870128249113024 -1.94,"binary_str17" ,1626870128249117120 -5.385,"binary_str18" ,1626870128249119936 -7.075,"binary_str19" ,1626870128249123008 -5.715,"binary_str20" ,1626870128249126080 -1.83,"binary_str21" ,1626870128249128896 -6.365,"binary_str22" ,1626870128249131968 -6.55,"binary_str23" ,1626870128249135040 -6.315,"binary_str24" ,1626870128249138112 -3.82,"binary_str25" ,1626870128249140928 -2.455,"binary_str26" ,1626870128249145024 -7.795,"binary_str27" ,1626870128249148096 -2.47,"binary_str28" ,1626870128249150912 -1.37,"binary_str29" ,1626870128249155008 -5.39,"binary_str30" ,1626870128249158080 -5.13,"binary_str31" ,1626870128249160896 -4.09,"binary_str32" ,1626870128249163968 -5.855,"binary_str33" ,1626870128249167040 -0.17,"binary_str34" ,1626870128249170112 -1.955,"binary_str35" ,1626870128249173952 -0.585,"binary_str36" ,1626870128249178048 -0.33,"binary_str37" ,1626870128249181120 -7.925,"binary_str38" ,1626870128249183936 -9.685,"binary_str39" ,1626870128249187008 -2.6,"binary_str40" ,1626870128249191104 -5.705,"binary_str41" ,1626870128249193920 -3.965,"binary_str42" ,1626870128249196992 -4.43,"binary_str43" ,1626870128249200064 -8.73,"binary_str44" ,1626870128249202880 -3.105,"binary_str45" ,1626870128249205952 -9.39,"binary_str46" ,1626870128249209024 -2.825,"binary_str47" ,1626870128249212096 -9.675,"binary_str48" ,1626870128249214912 -9.99,"binary_str49" ,1626870128249217984 -4.51,"binary_str50" ,1626870128249221056 -4.94,"binary_str51" ,1626870128249223872 -7.72,"binary_str52" ,1626870128249226944 -4.135,"binary_str53" ,1626870128249231040 -2.325,"binary_str54" ,1626870128249234112 -4.585,"binary_str55" ,1626870128249236928 -8.76,"binary_str56" ,1626870128249240000 -4.715,"binary_str57" ,1626870128249243072 -0.56,"binary_str58" ,1626870128249245888 -5.35,"binary_str59" ,1626870128249249984 -5.075,"binary_str60" ,1626870128249253056 -6.665,"binary_str61" ,1626870128249256128 -7.13,"binary_str62" ,1626870128249258944 -2.775,"binary_str63" ,1626870128249262016 -5.775,"binary_str64" ,1626870128249265088 -1.62,"binary_str65" ,1626870128249267904 -1.625,"binary_str66" ,1626870128249270976 -8.15,"binary_str67" ,1626870128249274048 -0.75,"binary_str68" ,1626870128249277120 -3.265,"binary_str69" ,1626870128249280960 -8.585,"binary_str70" ,1626870128249284032 -1.88,"binary_str71" ,1626870128249287104 -8.44,"binary_str72" ,1626870128249289920 -5.12,"binary_str73" ,1626870128249295040 -2.58,"binary_str74" ,1626870128249298112 -9.42,"binary_str75" ,1626870128249300928 -1.765,"binary_str76" ,1626870128249304000 -2.66,"binary_str77" ,1626870128249308096 -1.405,"binary_str78" ,1626870128249310912 -5.595,"binary_str79" ,1626870128249315008 -2.28,"binary_str80" ,1626870128249318080 -9.24,"binary_str81" ,1626870128249320896 -9.03,"binary_str82" ,1626870128249323968 -6.055,"binary_str83" ,1626870128249327040 -1.74,"binary_str84" ,1626870128249330112 -5.77,"binary_str85" ,1626870128249332928 -1.97,"binary_str86" ,1626870128249336000 -0.3,"binary_str87" ,1626870128249339072 -7.145,"binary_str88" ,1626870128249342912 -0.88,"binary_str89" ,1626870128249345984 -8.025,"binary_str90" ,1626870128249349056 -4.81,"binary_str91" ,1626870128249351872 -0.725,"binary_str92" ,1626870128249355968 -3.85,"binary_str93" ,1626870128249359040 -9.455,"binary_str94" ,1626870128249362112 -2.265,"binary_str95" ,1626870128249364928 -3.985,"binary_str96" ,1626870128249368000 -9.375,"binary_str97" ,1626870128249371072 -0.2,"binary_str98" ,1626870128249373888 -6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv deleted file mode 100644 index 18fb855d6d9f55c29325c6ea6f77120effa72884..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv +++ /dev/null @@ -1,100 +0,0 @@ -"string0",7,8.615 -"string1",4,9.895 -"string2",3,2.92 -"string3",3,5.62 -"string4",7,1.615 -"string5",6,1.45 -"string6",5,7.48 -"string7",7,3.01 -"string8",5,4.76 -"string9",10,7.09 -"string10",2,8.38 -"string11",7,8.65 -"string12",5,5.025 -"string13",10,5.765 -"string14",2,4.57 -"string15",2,1.03 -"string16",7,6.98 -"string17",10,0.23 -"string18",7,5.815 -"string19",1,2.37 -"string20",10,8.865 -"string21",3,1.235 -"string22",2,8.62 -"string23",9,1.045 -"string24",8,4.34 -"string25",1,5.455 -"string26",2,4.475 -"string27",1,6.95 -"string28",2,3.39 -"string29",3,6.79 -"string30",7,9.735 -"string31",1,9.79 -"string32",10,9.955 -"string33",1,5.095 -"string34",3,3.86 -"string35",9,5.105 -"string36",10,4.22 -"string37",1,2.78 -"string38",9,6.345 -"string39",1,0.975 -"string40",5,6.16 -"string41",4,7.735 -"string42",5,6.6 -"string43",8,2.845 -"string44",1,0.655 -"string45",3,2.995 -"string46",9,3.6 -"string47",8,3.47 -"string48",3,7.98 -"string49",6,2.225 -"string50",9,5.44 -"string51",4,6.335 -"string52",3,2.955 -"string53",1,0.565 -"string54",6,5.575 -"string55",6,9.905 -"string56",9,6.025 -"string57",8,0.94 -"string58",10,0.15 -"string59",8,1.555 -"string60",4,2.28 -"string61",2,8.29 -"string62",9,6.22 -"string63",6,3.35 -"string64",10,6.7 -"string65",3,9.345 -"string66",7,9.815 -"string67",1,5.365 -"string68",10,3.81 -"string69",1,6.405 -"string70",8,2.715 -"string71",3,8.58 -"string72",8,6.34 -"string73",2,7.49 -"string74",4,8.64 -"string75",3,8.995 -"string76",7,3.465 -"string77",1,7.64 -"string78",6,3.65 -"string79",6,1.4 -"string80",6,5.875 -"string81",2,1.22 -"string82",5,7.87 -"string83",9,8.41 -"string84",9,8.9 -"string85",9,3.89 -"string86",2,5.0 -"string87",2,4.495 -"string88",4,2.835 -"string89",3,5.895 -"string90",7,8.41 -"string91",5,5.125 -"string92",7,9.165 -"string93",5,8.315 -"string94",10,7.485 -"string95",7,4.635 -"string96",2,6.015 -"string97",8,0.595 -"string98",3,8.79 -"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/query-interrupt.py index 270bfd8b60f559c370eb921cf74fe4f7b82ae06e..df021cbe3be5da9a0a28b78bbafef548c24697fa 100644 --- a/tests/pytest/tools/taosdemoAllTest/query-interrupt.py +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.py @@ -52,13 +52,13 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # # insert 1000w rows in stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/query-interrupt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/query-interrupt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0,60) tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 6000000) - os.system('%staosdemo -f tools/taosdemoAllTest/queryall.json -y & ' % binPath) + os.system('%staosBenchmark -f tools/taosdemoAllTest/queryall.json -y & ' % binPath) time.sleep(2) query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1]) taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json index 7ebad5e2b2f5af687656c8eed041579d7de1e2c2..c2ff21ea911f09bd459648a57a7ce4609f8bfb58 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryQps.json +++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json @@ -17,7 +17,7 @@ "result": "./query_res0.txt" }, { - "sql": "select last_row(*) from stb00_99 ", + "sql": "select last_row(*) from stb00_9 ", "result": "./query_res1.txt" }] diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..741a9f814b8a61c692343621c3dcc1117544fbed --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 2592000, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json new file mode 100644 index 0000000000000000000000000000000000000000..bb21003e9340b91496b8f96014aa7b318bb44895 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 2592000, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json new file mode 100644 index 0000000000000000000000000000000000000000..615baad853987220ea2c76663327a2a783b4cdb4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "rand", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..be7c182d0ebdfd377bff4a020c63a03333160b39 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json new file mode 100644 index 0000000000000000000000000000000000000000..f74ac693a90f48ce8cf0fceca61723861631d37a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..5cd06c02759ddcba93eaa8ef4ef848a9b645cbda --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json @@ -0,0 +1,142 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json similarity index 52% rename from tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json rename to tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json index e6c4b3205a77e20714067733bfa6f6c4053f087c..0885e01782b41079ccbfb7a30a8b4d3628ba9c20 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json @@ -5,23 +5,23 @@ "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, + "thread_count": 4, + "thread_count_create_tbl": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "interlace_rows": 100, + "interlace_rows": 10, "num_of_records_per_req": 1000, "max_sql_len": 1024000, "databases": [{ "dbinfo": { - "name": "nsdb", + "name": "db", "drop": "yes", "replica": 1, "days": 10, "cache": 50, "blocks": 8, - "precision": "ns", + "precision": "ms", "keep": 36500, "minRows": 100, "maxRows": 4096, @@ -35,54 +35,54 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb0_", + "childtable_count": 10, + "childtable_prefix": "stb00_", "auto_create_table": "no", - "batch_create_tbl_num": 20, + "batch_create_tbl_num": 10, "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100, + "insert_mode": "sml", + "insert_rows": 150, + "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "", - "sample_file": "", + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, - {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, - {"type": "BOOL"},{"type": "NCHAR","len":16}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }, { "name": "stb1", "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb1_", + "childtable_count": 15, + "childtable_prefix": "stb01_", "auto_create_table": "no", - "batch_create_tbl_num": 20, + "batch_create_tbl_num": 10, "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100, + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, "insert_interval":0, "max_sql_len": 1024000, - "disorder_ratio": 10, + "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "", - "sample_file": "", + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, - {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, - {"type": "BOOL"},{"type": "NCHAR","len":16}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..5be20c28bba11ff40296d062f93ab4fda57a1f88 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "TIMESTAMP"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 16, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 16, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..6f24801cb04f9f515e33898fb587b95029def325 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 100, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 100, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..92e6ec0df7a70329312676298c3b5ffccc2a8767 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..c09493ec7b892baba37a7be4addb0ce526752f07 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/src/kit/taosdemo/insert.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json similarity index 75% rename from src/kit/taosdemo/insert.json rename to tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json index 43c729502cbf9ac11e138d9cbea60e459d3c27e5..e04f2ff5e7cb24cb5384b7451712b3fe83bf18c3 100644 --- a/src/kit/taosdemo/insert.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json @@ -10,7 +10,8 @@ "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "num_of_records_per_req": 100, + "interlace_rows": 100, + "num_of_records_per_req": 1000, "max_sql_len": 1024000, "databases": [{ "dbinfo": { @@ -18,7 +19,7 @@ "drop": "yes", "replica": 1, "days": 10, - "cache": 16, + "cache": 50, "blocks": 8, "precision": "ms", "keep": 36500, @@ -32,15 +33,20 @@ "update": 0 }, "super_tables": [{ - "name": "stb", + "name": "stb0", "child_table_exists":"no", - "childtable_count": 10000, - "childtable_prefix": "stb_", + "childtable_count": 100, + "childtable_prefix": "stb00_", "auto_create_table": "no", + "batch_create_tbl_num": 20, "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100000, - "interlace_rows": 0, + "insert_mode": "sml", + "insert_rows": 150, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 151, + "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..4a4227adb8fdcd0cb025a10c5b6f417c921acd96 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 100, + "interlace_rows": 0, + "num_of_records_per_req": 2000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 35, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval": 200, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..1d29842e02c654987c50e6e73d4aec5eed48aa83 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..886503a950ca18b752bfa264218bb8564ce44ae0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 30, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..ca99d135c5f466c911f3063b88fbb3e58c4e4ed4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbno", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..d0109b50cf449b0e7e1b258ae29723a560b1d2f6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"yes", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset":7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..f8f3a8ee5cea1834c31ebb275a10977cd960f829 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..780fd60bb7e98f18a5c33798b6bb35a77e1d85db --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbtest123", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "./tools/taosdemoAllTest/tags.csv", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json new file mode 100644 index 0000000000000000000000000000000000000000..2de298efa6553ec0c6de095ee0515a73e777445f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json @@ -0,0 +1,270 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT", "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 4, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "BIGINT", "count":1}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "SMALLINT", "count":1}] + }, + { + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 40, + "childtable_prefix": "stb05_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "FLOAT", "count":1}] + }, + { + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 15, + "childtable_prefix": "stb06_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "DOUBLE", "count":1}] + }, + { + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb07_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "BOOL"}], + "tags": [{"type": "BOOL", "count":1}] + }, + { + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb08_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "NCHAR","len": 16, "count":1}], + "tags": [{"type": "NCHAR", "count":1}] + }, + { + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb09_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "BINARY", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json new file mode 100644 index 0000000000000000000000000000000000000000..ff825440e5cbfd8aa5d8d6e74538c5802af8af38 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json @@ -0,0 +1,374 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UINT"}], + "tags": [{"type": "UINT", "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 4, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "BIGINT", "count":1}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":30, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "SMALLINT", "count":1}] + }, + { + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 15, + "childtable_prefix": "stb05_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":20, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "FLOAT", "count":1}] + }, + { + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb06_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":10, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "DOUBLE", "count":1}] + }, + { + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 30, + "childtable_prefix": "stb07_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":5, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "BOOL"}], + "tags": [{"type": "BOOL", "count":1}] + }, + { + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb08_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":30, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "NCHAR","len": 16, "count":1}], + "tags": [{"type": "NCHAR", "count":1}] + }, + { + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb09_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "BINARY", "count":1}] + }, + { + "name": "stb10", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb10_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UBIGINT"}], + "tags": [{"type": "UBIGINT", "count":1}] + }, + { + "name": "stb11", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb11_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UTINYINT"}], + "tags": [{"type": "UTINYINT", "count":1}] + }, + { + "name": "stb12", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb12_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "USMALLINT"}], + "tags": [{"type": "USMALLINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..1d496b6b46bf3df3c4312bacafbfb77125491058 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..83689d6c40e3844707cc367431f37f4f8ec144d5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10240000000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16374, "count":1}], + "tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 100, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..28f566833fc8958d364ee867c7628d573b4bf8ee --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 0, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..8f27feba6be7e3018461b0070420cc759cf8fc72 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": -1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json similarity index 59% rename from tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json rename to tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json index fd458a88d1a434c22958d5086949009cdd6080bf..2e4063cf272ba18732f0e456362cb1103ba6d5c4 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json @@ -5,17 +5,17 @@ "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, + "thread_count": 4, + "thread_count_create_tbl": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "interlace_rows": 100, + "interlace_rows": 0, "num_of_records_per_req": 1000, - "max_sql_len": 1024000, + "max_sql_len": 10240000000, "databases": [{ "dbinfo": { - "name": "testdb3", + "name": "db", "drop": "yes", "replica": 1, "days": 10, @@ -35,29 +35,28 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb0_", + "childtable_count": 10, + "childtable_prefix": "stb00_", "auto_create_table": "no", - "batch_create_tbl_num": 20, + "batch_create_tbl_num": 10, "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100, + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 1000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "", - "sample_file": "", + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, - {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, - {"type": "BOOL"},{"type": "NCHAR","len":16}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] }] }] } - diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..c6fe0300f535a2b9d798b09853f0ad333e3bbcfd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..92e88141ca09971d0d202ee488471c14e07d4cd3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json similarity index 58% rename from tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json rename to tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json index 3b4c43d5d05ee1a1b26ee4016b1c38aade592b56..18f1a39e0afcdae3d52e4bc4a4a97e15dbcfda37 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json @@ -5,23 +5,23 @@ "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, + "thread_count": 4, + "thread_count_create_tbl": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "interlace_rows": 100, + "interlace_rows": 0, "num_of_records_per_req": 1000, - "max_sql_len": 1024000, + "max_sql_len": 10240000000, "databases": [{ "dbinfo": { - "name": "nsdb2", + "name": "db", "drop": "yes", "replica": 1, "days": 10, "cache": 50, "blocks": 8, - "precision": "ns", + "precision": "ms", "keep": 36500, "minRows": 100, "maxRows": 4096, @@ -35,28 +35,28 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb0_", + "childtable_count": 10, + "childtable_prefix": "stb00_", "auto_create_table": "no", - "batch_create_tbl_num": 20, + "batch_create_tbl_num": 10, "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100, + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "now", - "sample_format": "", - "sample_file": "", + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, - {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, - {"type": "BOOL"},{"type": "NCHAR","len":16}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json new file mode 100644 index 0000000000000000000000000000000000000000..01ec546012ad04f94cfb6224048fffd89d5cbbc8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..d950a260f6ed3ad4a9ed53bc859304a71e5a680a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..0deed5ba5420a1dd9a1efddbb6e1e7a757dc10d0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 0, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..9d1d1ee71898d5e80a7310822da00de6c4636746 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": -1, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/src/kit/taosdemo/insert-interlace.json b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json similarity index 65% rename from src/kit/taosdemo/insert-interlace.json rename to tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json index cf3e1de2f4a76f5cc242399b9a268c95c2dca878..f732d2e0c5575740dc3d1eeade05e09de8860faf 100644 --- a/src/kit/taosdemo/insert-interlace.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json @@ -9,19 +9,20 @@ "thread_count_create_tbl": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", - "insert_interval": 1000, + "insert_interval": 0, + "interlace_rows": 10, "num_of_records_per_req": 100, - "max_sql_len": 1024000, + "max_sql_len": 10240000000, "databases": [{ "dbinfo": { "name": "db", "drop": "yes", "replica": 1, "days": 10, - "cache": 16, + "cache": 50, "blocks": 8, "precision": "ms", - "keep": 365, + "keep": 36500, "minRows": 100, "maxRows": 4096, "comp":2, @@ -32,15 +33,20 @@ "update": 0 }, "super_tables": [{ - "name": "stb", + "name": "stb0", "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "stb_", + "childtable_count": 10, + "childtable_prefix": "stb00_", "auto_create_table": "no", + "batch_create_tbl_num": 10, "data_source": "rand", - "insert_mode": "taosc", + "insert_mode": "sml", "insert_rows": 1000, - "interlace_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -49,8 +55,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}], - "tags": [{"type": "TINYINT", "count":1}] + "columns": [{"type": "DOUBLE", "count":4096}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..24f468d719546733b900ecbd283f2904e96d222f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000000, + "max_sql_len": 1024000000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 10000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..07e625dad3a28929a63475aa18310ff6d5b24cc6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json @@ -0,0 +1,65 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "databases": [ + { + "dbinfo": { + "name": "blf", + "drop": "yes" + }, + "super_tables": [ + { + "name": "p_0_topics", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "p_0_topics_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 525600, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 60000, + "start_timestamp": "2019-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + { + "type": "INT", + "count": 1 + }, + { + "type": "FLOAT", + "count": 1 + }, + { + "type": "BINARY", + "len": 12, + "count": 1 + } + ], + "tags": [ + { + "type": "BINARY", + "len": 12, + "count": 10 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py index 270eea17cb6c913719fb67c4b8f33065b0a0445d..e1a9f647fa2b5d1f561e50a06b9755d78bebfa79 100644 --- a/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py +++ b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py @@ -68,7 +68,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -f tools/taosdemoAllTest/sub_no_result.json -g 2>&1 | tee sub_no_result.log" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/sub_no_result.json -g 2>&1 | tee sub_no_result.log" % binPath) test_line = int(self.execCmdAndGetOutput("cat sub_no_result.log | wc -l")) if(test_line < 1100024): tdLog.exit("failed test subscribeNoResult: %d != expected(1100024)" % test_line) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json deleted file mode 100644 index 14bb9e9be07d9bd61dc089af0bb34acd523155d9..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, - "num_of_records_per_req": 1000, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "testdb2", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 50, - "blocks": 8, - "precision": "us", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb0", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb0_", - "auto_create_table": "no", - "batch_create_tbl_num": 20, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "", - "sample_file": "", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, - {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, - {"type": "BOOL"},{"type": "NCHAR","len":16}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] - }] - }] -} - diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertAllType.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertAllType.py index 1e5794dc6d41188a861c9960f0a3e06bc346a1da..3bb290fdd0550bbebd95ebd9c30ee34272808281 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertAllType.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertAllType.py @@ -49,7 +49,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosdemo -f tools/taosdemoAllTest/insert-allDataType.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-allDataType.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1000) @@ -65,7 +65,7 @@ class TDTestCase: tdSql.checkData(0, 0, 200000) # stmt interface - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-allDataType-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1000) @@ -81,7 +81,7 @@ class TDTestCase: tdSql.checkData(0, 0, 200000) # taosdemo command line - os.system("%staosdemo -t 1000 -n 100 -T 10 -b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,NCHAR,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY -y " % binPath) + os.system("%staosBenchmark -t 1000 -n 100 -T 10 -b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,NCHAR,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY -y " % binPath) tdSql.execute("use test") tdSql.query("select count (tbname) from meters") tdSql.checkData(0, 0, 1000) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py deleted file mode 100644 index 7b3b865df91f87622737eede640ec79e880e433b..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py +++ /dev/null @@ -1,115 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - - # insert: create one or mutiple tables per sql and insert multiple rows per sql - - # check the params of taosdemo about time_step is nano - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath) - tdSql.execute("use testdb1") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.getData(9, 1) - tdSql.checkDataType(9, 1,"TIMESTAMP") - tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") - - # check the params of taosdemo about time_step is us - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath) - tdSql.execute("use testdb2") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.getData(9, 1) - tdSql.checkDataType(9, 1,"TIMESTAMP") - tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") - - # check the params of taosdemo about time_step is ms - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath) - tdSql.execute("use testdb3") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.checkDataType(9, 1,"TIMESTAMP") - tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") - - - os.system("rm -rf ./res.txt") - os.system("rm -rf ./*.py.sql") - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 0e68e2e88078d3239ceba8d88200e7ea5b1cffe4..2aaa5795866e03ab0bf4d3dbf6c0e431ebd604d3 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -48,24 +48,28 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert*_res.txt*") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 11) tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 10) tdSql.query("select count(*) from stb00_0") tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 1100) tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 200000) + tdSql.checkData(0, 0, 2000) # restful connector insert data - os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) @@ -80,9 +84,19 @@ class TDTestCase: tdSql.query("select count(*) from stb1") tdSql.checkData(0, 0, 200) - + # default values json files + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-default.json -y " % binPath) + tdSql.query("show databases;") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == 'db': + tdSql.checkData(i, 2, 100) + tdSql.checkData(i, 4, 1) + tdSql.checkData(i, 6, 10) + tdSql.checkData(i, 16, 'ms') + # insert: create mutiple tables per sql and insert one rows per sql . - os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) @@ -99,7 +113,7 @@ class TDTestCase: # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 - os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -117,9 +131,9 @@ class TDTestCase: # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) tdSql.error("show dbno.stables") - os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 5) @@ -131,7 +145,7 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") tdSql.checkData(0, 0, 8) - os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -143,7 +157,7 @@ class TDTestCase: tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) - os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 150) @@ -155,7 +169,7 @@ class TDTestCase: tdSql.checkData(0, 0, 340) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 400) - os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -171,30 +185,30 @@ class TDTestCase: # insert: let parament in json file is illegal, it'll expect error. tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath) tdSql.error("select * from db.stb0") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkData(0, 0, 10000) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(0) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables like 'stb0%' ") tdSql.checkData(0, 2, 11) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) tdSql.error("use db1") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(1) tdSql.query("select count(*) from db.stb1") @@ -204,19 +218,19 @@ class TDTestCase: tdSql.query("select count(*) from db.stb3") tdSql.checkRows(1) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) tdSql.error("select count(*) from db.stb0") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists blf") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) tdSql.execute("use blf") tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") @@ -224,7 +238,7 @@ class TDTestCase: tdSql.checkData(0, 0, "2019-10-01 00:00:00") tdSql.query("select last(ts) from blf.p_0_topics_6 ") tdSql.checkData(0, 0, "2020-09-29 23:59:00") - os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 5000000) @@ -234,7 +248,7 @@ class TDTestCase: # insert: timestamp and step - os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.query("select count (tbname) from stb0") @@ -251,7 +265,7 @@ class TDTestCase: tdSql.checkData(0, 0, 400) # # insert: disorder_ratio - os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1) @@ -263,7 +277,7 @@ class TDTestCase: tdSql.checkData(0, 0, 10) # insert: sample json - os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample-ts.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample-ts.json -y " % binPath) tdSql.execute("use dbtest123") tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) @@ -282,7 +296,7 @@ class TDTestCase: tdSql.checkRows(10) # insert: sample json - os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) tdSql.execute("use dbtest123") tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) @@ -295,7 +309,7 @@ class TDTestCase: # insert: test interlace parament - os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 100) @@ -308,7 +322,7 @@ class TDTestCase: tdSql.execute('drop database if exists db') tdSql.execute('create database db') tdSql.execute('use db') - os.system("%staosdemo -y -f tools/taosdemoAllTest/insert-drop-exist-auto-N00.json " % binPath) # drop = no, child_table_exists, auto_create_table varies + os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-N00.json " % binPath) # drop = no, child_table_exists, auto_create_table varies tdSql.execute('use db') tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123 tdSql.checkRows(20) @@ -324,7 +338,7 @@ class TDTestCase: tdSql.checkRows(0) tdSql.execute('drop database if exists db') - os.system("%staosdemo -y -f tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies + os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies tdSql.execute('use db') tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123 tdSql.checkRows(20) @@ -339,9 +353,10 @@ class TDTestCase: tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes tdSql.checkRows(20) - testcaseFilename = os.path.split(__file__)[-1] - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + # rm useless files + os.system("rm -rf ./insert*_res.txt*") + + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py new file mode 100644 index 0000000000000000000000000000000000000000..4c2baf5a11d3f5dff3a98664be11cac78ebb9c6b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py @@ -0,0 +1,264 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # line_protocol——telnet and json + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 4000) + + + # insert: create mutiple tables per sql and insert one rows per sql . + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 15) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 150) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1500) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 3000) + + # insert: using parament "insert_interval to controls spped of insert. + # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-interval-speed-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select tbname from stb0") + tdSql.checkRows(100 ) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2000) + tdSql.query("show stables") + tdSql.checkData(1, 4, 20) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 35) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 700) + + # spend 2min30s for 3 testcases. + # insert: drop and child_table_exists combination test + # insert: sml can't support parament "childtable_offset and childtable_limit" \ drop=no or child_table_exists = yes + + # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json -y" % binPath) + # tdSql.error("show dbno.stables") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-newdb-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 5) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 6) + tdSql.query("select count (tbname) from stb2") + tdSql.checkData(0, 0, 7) + tdSql.query("select count (tbname) from stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count (tbname) from stb4") + tdSql.checkData(0, 0, 8) + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-renewdb-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 120) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 140) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + + + # insert: let parament in json file is illegal, it'll expect error. + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json -y " % binPath) + tdSql.error("select * from db.stb0") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(0) + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables like 'stb0%' ") + tdSql.checkData(0, 2, 11) + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json -y " % binPath) + tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(1) + tdSql.query("select count(*) from db.stb1") + tdSql.checkRows(1) + tdSql.error("select * from db.stb4") + tdSql.error("select * from db.stb2") + tdSql.query("select count(*) from db.stb3") + tdSql.checkRows(1) + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertChildTab0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists blf") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") + tdSql.query("select first(ts) from blf.p_0_topics_2") + tdSql.checkData(0, 0, "2019-10-01 00:00:00") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.checkData(0, 0, "2020-09-29 23:59:00") + # it will be commented in ci because it spend too much time to insert data, but when you can excute it when you want to test this case. + # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 5000000) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 5000000) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 5000000) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 5000000) + + + # insert: timestamp and step + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-timestep-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select last(ts) from db.stb00_0") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 200) + tdSql.query("select last(ts) from db.stb01_0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400) + + # # insert: disorder_ratio + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-disorder-sml.json 2>&1 -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10) + + # insert: sample json + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-sample-sml.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select c2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select * from stb1 where t1=-127") + tdSql.checkRows(20) + tdSql.query("select * from stb1 where t2=127") + tdSql.checkRows(10) + tdSql.query("select * from stb1 where t2=126") + tdSql.checkRows(10) + + # insert: test interlace parament + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-interlace-row-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 15000) + + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py index f09880ab727d9a197fb602663da1dc4c6fff7bb7..5dba103ef9eafaf15d3159cae94e2b3a264cd8a9 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py @@ -49,7 +49,7 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1000) @@ -66,7 +66,7 @@ class TDTestCase: # insert: create mutiple tables per sql and insert one rows per sql . - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 10) @@ -83,7 +83,7 @@ class TDTestCase: # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json -y" % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -101,9 +101,9 @@ class TDTestCase: # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json -y" % binPath) tdSql.error("show dbno.stables") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newdb-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-newdb-stmt.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 5) @@ -115,7 +115,7 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") tdSql.checkData(0, 0, 8) - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-offset-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-offset-stmt.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -127,7 +127,7 @@ class TDTestCase: tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newtable-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-newtable-stmt.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 150) @@ -139,7 +139,7 @@ class TDTestCase: tdSql.checkData(0, 0, 340) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 400) - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json -y" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json -y" % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 50) @@ -155,29 +155,29 @@ class TDTestCase: # insert: let parament in json file is illegal, it'll expect error. tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath) tdSql.error("select * from db.stb0") # tdSql.execute("drop database if exists db") - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath) # tdSql.query("select count(*) from db.stb0") # tdSql.checkData(0, 0, 10000) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(0) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables like 'stb0%' ") tdSql.checkData(0, 2, 11) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json -y " % binPath) tdSql.error("use db1") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(1) tdSql.query("select count(*) from db.stb1") @@ -187,19 +187,19 @@ class TDTestCase: tdSql.query("select count(*) from db.stb3") tdSql.checkRows(1) tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json -y " % binPath) tdSql.error("select count(*) from db.stb0") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists blf") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json -y " % binPath) tdSql.execute("use blf") tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") @@ -207,7 +207,7 @@ class TDTestCase: tdSql.checkData(0, 0, "2019-10-01 00:00:00") tdSql.query("select last(ts) from blf.p_0_topics_6 ") tdSql.checkData(0, 0, "2020-09-29 23:59:00") - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 5000000) @@ -216,7 +216,7 @@ class TDTestCase: # insert: sample json - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json -y " % binPath) tdSql.execute("use dbtest123") tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) @@ -235,7 +235,7 @@ class TDTestCase: tdSql.checkRows(10) # insert: timestamp and step - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-timestep-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-timestep-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.query("select count (tbname) from stb0") @@ -252,7 +252,7 @@ class TDTestCase: tdSql.checkData(0, 0, 400) # # insert: disorder_ratio - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-disorder-stmt.json 2>&1 -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-disorder-stmt.json 2>&1 -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 1) @@ -264,7 +264,7 @@ class TDTestCase: tdSql.checkData(0, 0, 10) # insert: sample json - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) tdSql.execute("use dbtest123") tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) @@ -276,7 +276,7 @@ class TDTestCase: tdSql.checkRows(10) # insert: test interlace parament - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 100) @@ -289,7 +289,7 @@ class TDTestCase: tdSql.execute('drop database if exists db') tdSql.execute('create database db') tdSql.execute('use db') - os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json " % binPath) # drop = no, child_table_exists, auto_create_table varies + os.system("%staosBenchmark -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json " % binPath) # drop = no, child_table_exists, auto_create_table varies tdSql.execute('use db') tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123 tdSql.checkRows(20) @@ -305,7 +305,7 @@ class TDTestCase: tdSql.checkRows(0) tdSql.execute('drop database if exists db') - os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies + os.system("%staosBenchmark -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies tdSql.execute('use db') tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123 tdSql.checkRows(20) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmtPerformance.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmtPerformance.py index bc8cf74ab21c20be24dcd4c9c53d9dc01b49f1c4..ff22ffe5a56bf69559d77bc202f289d4349e1e38 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmtPerformance.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmtPerformance.py @@ -49,16 +49,16 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosdemo -f tools/taosdemoAllTest/stmt/1174-small-stmt-random.json -y " % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/1174-small-stmt-random.json -y " % binPath) # sleep(60) - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/1174-small-taosc.json -y " % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/1174-small-taosc.json -y " % binPath) # sleep(60) - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/1174-small-stmt.json -y " % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/1174-small-stmt.json -y " % binPath) # sleep(60) - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/1174-large-taosc.json -y " % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/1174-large-taosc.json -y " % binPath) # sleep(60) - # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/1174-large-stmt.json -y " % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/stmt/1174-large-stmt.json -y " % binPath) # tdSql.execute("use db") # tdSql.query("select count (tbname) from stb0") diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json deleted file mode 100644 index a19132b1da9c99b8fe3792a1c2d475fd4f18ef91..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, - "num_of_records_per_req": 1000, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "subnsdb", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 50, - "blocks": 8, - "precision": "ns", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb0", - "child_table_exists":"no", - "childtable_count": 10, - "childtable_prefix": "tb0_", - "auto_create_table": "no", - "batch_create_tbl_num": 20, - "data_source": "samples", - "insert_mode": "taosc", - "insert_rows": 10, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", - "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", - "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], - "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] - }, - { - "name": "stb1", - "child_table_exists":"no", - "childtable_count": 10, - "childtable_prefix": "tb1_", - "auto_create_table": "no", - "batch_create_tbl_num": 20, - "data_source": "samples", - "insert_mode": "taosc", - "insert_rows": 10, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 10, - "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", - "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", - "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], - "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] - }] - }] -} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json deleted file mode 100644 index 7fb90727ef6fa38da73639ebe11125924b9ed507..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 10, - "thread_count_create_tbl": 10, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, - "num_of_records_per_req": 1000, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "nsdbcsv", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 50, - "blocks": 8, - "precision": "ns", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb0", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb0_", - "auto_create_table": "no", - "batch_create_tbl_num": 20, - "data_source": "samples", - "insert_mode": "taosc", - "insert_rows": 100, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", - "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", - "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], - "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] - }, - { - "name": "stb1", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "tb1_", - "auto_create_table": "no", - "batch_create_tbl_num": 20, - "data_source": "samples", - "insert_mode": "taosc", - "insert_rows": 100, - "childtable_offset":0, - "multi_thread_write_one_tbl": "no", - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 10, - "disorder_range": 1000, - "timestamp_step": 10000000, - "start_timestamp": "2021-07-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./tools/taosdemoAllTest/nano_samples.csv", - "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv", - "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], - "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] - }] - }] -} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py index 3a3152ecde3c4eca09d8b8583cf90bbfdc0cc31d..e0b56b93ba0ed2c1e0a3e25bdc176059ea1ef61a 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -52,8 +52,8 @@ class TDTestCase: self.expectResult = expectResult with open("%s" % filename, 'r+') as f1: for line in f1.readlines(): - queryResult = line.strip().split()[0] - self.assertCheck(filename, queryResult, expectResult) + queryResultTaosc = line.strip().split()[0] + self.assertCheck(filename, queryResultTaosc, expectResult) # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。 def getfileDataRestful(self, filename): @@ -65,9 +65,12 @@ class TDTestCase: pattern = re.compile("{.*}") contents = pattern.search(contents).group() contentsDict = ast.literal_eval(contents) # 字符串转换为字典 - queryResult = contentsDict['data'][0][0] + queryResultRest = contentsDict['data'][0][0] break - return queryResult + else : + queryResultRest = "" + return queryResultRest + # 获取taosc接口查询次数 def queryTimesTaosc(self, filename): @@ -105,10 +108,10 @@ class TDTestCase: # taosc query: query specified table and query super table os.system( - "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) os.system( - "%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryTaosc.json" % binPath) os.system("cat query_res0.txt* > all_query_res0_taosc.txt") os.system("cat query_res1.txt* > all_query_res1_taosc.txt") @@ -135,10 +138,10 @@ class TDTestCase: # use restful api to query os.system( - "%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryInsertrestdata.json" % binPath) os.system( - "%staosdemo -f tools/taosdemoAllTest/queryRestful.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryRestful.json" % binPath) os.system("cat query_res0.txt* > all_query_res0_rest.txt") os.system("cat query_res1.txt* > all_query_res1_rest.txt") @@ -172,54 +175,55 @@ class TDTestCase: # query times less than or equal to 100 os.system( - "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) os.system( - "%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % + "%staosBenchmark -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath) os.system( - "%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % + "%staosBenchmark -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath) # query result print QPS os.system( - "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) - os.system( - "%staosdemo -f tools/taosdemoAllTest/queryQps.json" % + exceptcode = os.system( + "%staosBenchmark -f tools/taosdemoAllTest/queryQps.json" % binPath) + assert exceptcode == 0 # use illegal or out of range parameters query json file os.system( - "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) exceptcode = os.system( - "%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryTimes0.json" % binPath) assert exceptcode != 0 exceptcode0 = os.system( - "%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryTimesless0.json" % binPath) assert exceptcode0 != 0 exceptcode1 = os.system( - "%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryConcurrentless0.json" % binPath) assert exceptcode1 != 0 exceptcode2 = os.system( - "%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/queryConcurrent0.json" % binPath) assert exceptcode2 != 0 exceptcode3 = os.system( - "%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/querrThreadsless0.json" % binPath) assert exceptcode3 != 0 exceptcode4 = os.system( - "%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" % + "%staosBenchmark -f tools/taosdemoAllTest/querrThreads0.json" % binPath) assert exceptcode4 != 0 @@ -228,7 +232,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/*.py.sql") os.system("rm -rf ./querySystemInfo*") os.system("rm -rf ./query_res*") -# os.system("rm -rf ./all_query*") + os.system("rm -rf ./all_query*") os.system("rm -rf ./test_query_res0.txt") def stop(self): diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py index 3e967581a4491da4108b981ccd83949751406b82..265f50237bf2b5dae80eccc94aad885a7b84f5f2 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py @@ -74,8 +74,8 @@ class TDTestCase: os.system("rm -rf ./all_subscribe_res*") sleep(2) # subscribe: sync - os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) - os.system("nohup %staosdemo -f tools/taosdemoAllTest/subSync.json &" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdata.json" % binPath) + os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/subSync.json &" % binPath) query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subSync.json" |grep -v "grep"|awk \'{print $2}\'')[1]) # insert extral data @@ -112,21 +112,21 @@ class TDTestCase: os.system("rm -rf ./all_subscribe*") # # sql number lager 100 - os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) - assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 - assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 + os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + assert os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 + assert os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 # # result files is null - # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) - # os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) - # # assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) != 0 + # os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) + # # assert os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) != 0 # resubAfterConsume= -1 endAfter=-1 ; os.system('kill -9 `ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'` ') - os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json & " % binPath) + os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json & " % binPath) sleep(2) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'')[1]) print("get sub1 process'pid") @@ -144,9 +144,9 @@ class TDTestCase: os.system("rm -rf ./subscribe_res*") # # resubAfterConsume= -1 endAfter=0 ; - # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) # os.system('kill -9 `ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'` ') - # os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json & " % binPath) + # os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json & " % binPath) # sleep(2) # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'')[1]) # print("get sub2 process'pid") @@ -185,8 +185,8 @@ class TDTestCase: # self.assertCheck("all_subscribe_res2.txt",subTimes2 ,1900) - # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) - # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py index f2aa01e8703d9703d647507736130de2dd582bfb..b236b2b48b93f210f0e73ebb5be240413f82d878 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py @@ -72,8 +72,8 @@ class TDTestCase: os.system("rm -rf ./all_subscribe_res*") # subscribe: resultfile - os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) - os.system("nohup %staosdemo -f tools/taosdemoAllTest/subAsync.json &" % binPath) + os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdata.json" % binPath) + os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/subAsync.json &" % binPath) query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subAsync.json" |grep -v "grep"|awk \'{print $2}\'')[1]) # insert extral data @@ -106,9 +106,9 @@ class TDTestCase: os.system("kill -9 %d" % query_pid) # # query times less than or equal to 100 - os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) - assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 - assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 + os.system("%staosBenchmark -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + assert os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 + assert os.system("%staosBenchmark -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 # delete useless files os.system("rm -rf ./insert_res.txt") diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py deleted file mode 100644 index d8c68af0f9b43443744d7d799db6f5ee1e1dacaa..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ /dev/null @@ -1,168 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - return buildPath - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath + "/build/bin/" - - # insert: create one or mutiple tables per sql and insert multiple rows per sql - # insert data from a special timestamp - # check stable stb0 - - os.system( - "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % - binPath) - tdSql.execute("use nsdb") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.checkDataType(9, 1, "TIMESTAMP") - tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") - - # check stable stb1 which is insert with disord - - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb1_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10000) - # check c8 is an nano timestamp - tdSql.query("describe stb1") - tdSql.checkDataType(9, 1, "TIMESTAMP") - # check insert timestamp_step is nano_second - tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") - - # insert data from now time - - # check stable stb0 - os.system( - "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % - binPath) - - tdSql.execute("use nsdb2") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from tb0_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - # check c8 is an nano timestamp - tdSql.query("describe stb0") - tdSql.checkDataType(9, 1, "TIMESTAMP") - - # insert by csv files and timetamp is long int , strings in ts and - # cols - - os.system( - "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % - binPath) - tdSql.execute("use nsdbcsv") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.checkDataType(3, 1, "TIMESTAMP") - tdSql.query( - "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") - tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") - tdSql.checkData(0, 0, 10000) - - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") - - # taosdemo test insert with command and parameter , detals show - # taosdemo --help - os.system( - "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % - binPath) - tdSql.query("select count(*) from test.meters") - tdSql.checkData(0, 0, 600) - # check taosdemo -s - - sqls_ls = [ - 'drop database if exists nsdbsql;', - 'create database nsdbsql precision "ns" keep 36500 days 6 update 1;', - 'use nsdbsql;', - 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', - 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] - - with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files: - for sql in sqls_ls: - sql_files.write(sql + "\n") - sql_files.close() - - sleep(10) - - os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) - tdSql.query("select count(*) from nsdbsql.meters") - tdSql.checkData(0, 0, 2) - - os.system("rm -rf ./res.txt") - os.system("rm -rf ./*.py.sql") - os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json deleted file mode 100644 index fff1017588bb10f55a82aa2bd7bc6997df71abfd..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "no", - "databases": "nsdb", - "query_times": 10, - "query_mode": "taosc", - "specified_table_query": { - "query_interval": 1, - "concurrent": 2, - "sqls": [ - { - "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", - "result": "./query_res0.txt" - }, - { - "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", - "result": "./query_res1.txt" - }, - { - "sql": "select count(*) from stb0 where ts>now-20d ;", - "result": "./query_res2.txt" - }, - { - "sql": "select max(c10) from stb0;", - "result": "./query_res3.txt" - }, - { - "sql": "select min(c1) from stb0;", - "result": "./query_res4.txt" - }, - { - "sql": "select avg(c1) from stb0;", - "result": "./query_res5.txt" - }, - { - "sql":"select count(*) from stb0 group by tbname;", - "result":"./query_res6.txt" - } - - ] - }, - "super_table_query": { - "stblname": "stb0", - "query_interval": 0, - "threads": 4, - "sqls": [ - { - "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", - "result": "./query_res_tb0.txt" - }, - { - "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", - "result": "./query_res_tb1.txt" - }, - { - "sql":"select first(*) from xxxx ;", - "result": "./query_res_tb2.txt" - }, - { - "sql":"select last(*) from xxxx;", - "result": "./query_res_tb3.txt" - - }, - { - "sql":"select last_row(*) from xxxx ;", - "result": "./query_res_tb4.txt" - - }, - { - "sql":"select max(c10) from xxxx ;", - "result": "./query_res_tb5.txt" - - }, - { - "sql":"select min(c1) from xxxx ;", - "result": "./query_res_tb6.txt" - - }, - { - "sql":"select avg(c10) from xxxx ;", - "result": "./query_res_tb7.txt" - - } - - ] - } - } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py deleted file mode 100644 index 5a37cf9c7cf3153a7bcabb0bc9258063e5f05f09..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py +++ /dev/null @@ -1,157 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - - # query: query test for nanoSecond with where and max min groupby order - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath) - - tdSql.execute("use nsdb") - - # use where to filter - - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") - tdSql.checkData(0, 0, 4000) - tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") - tdSql.checkData(0, 0, 5900) - - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") - tdSql.checkData(0, 0, 40) - tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") - tdSql.checkData(0, 0, 59) - - - # select max min avg from special col - tdSql.query("select max(c10) from stb0;") - print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) - - tdSql.query("select max(c10) from tb0_0;") - print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) - - - tdSql.query("select min(c1) from stb0;") - print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) - - tdSql.query("select min(c1) from tb0_0;") - print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) - - tdSql.query("select avg(c1) from stb0;") - print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) - - tdSql.query("select avg(c1) from tb0_0;") - print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) - - tdSql.query("select count(*) from stb0 group by tbname;") - tdSql.checkData(0, 0, 100) - tdSql.checkData(10, 0, 100) - - # query : query above sqls by taosdemo and continuously - - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath) - - - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath) - tdSql.execute("use nsdbcsv") - tdSql.query("show stables") - tdSql.checkData(0, 4, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10000) - tdSql.query("describe stb0") - tdSql.checkDataType(3, 1, "TIMESTAMP") - tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") - tdSql.checkData(0, 0, 5000) - tdSql.query("select count(*) from stb0 where ts 162687012800000000') - tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') - tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') - tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') - tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') - tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') - tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') - tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') - tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') - tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') - tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') - tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') - tdSql.query('select avg(c0) from stb0 interval(5000000000b)') - tdSql.checkRows(1) - - tdSql.query('select avg(c0) from stb0 interval(100000000b)') - tdSql.checkRows(10) - - tdSql.error('select avg(c0) from stb0 interval(1b)') - tdSql.error('select avg(c0) from stb0 interval(999b)') - - tdSql.query('select avg(c0) from stb0 interval(1000b)') - tdSql.checkRows(100) - - tdSql.query('select avg(c0) from stb0 interval(1u)') - tdSql.checkRows(100) - - tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') - tdSql.checkRows(10) - - # query : query above sqls by taosdemo and continuously - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) - - os.system("rm -rf ./query_res*.txt*") - os.system("rm -rf tools/taosdemoAllTest/*.py.sql") - - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json deleted file mode 100644 index a3b3c75efa6680aa0d1da0ca7986d863408ee515..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "no", - "databases": "nsdbcsv", - "query_times": 10, - "query_mode": "taosc", - "specified_table_query": { - "query_interval": 1, - "concurrent": 2, - "sqls": [ - { - "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", - "result": "./query_res0.txt" - }, - { - "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", - "result": "./query_res1.txt" - }, - { - "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", - "result": "./query_res2.txt" - }, - { - "sql": "select count(*) from stb0 where c2 <> 162687012800000000';", - "result": "./query_res3.txt" - }, - { - "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", - "result": "./query_res4.txt" - }, - { - "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", - "result": "./query_res5.txt" - }, - { - "sql":"select count(*) from stb0 group by tbname;", - "result":"./query_res6.txt" - }, - { - "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", - "result":"./query_res7.txt" - }, - { - "sql":"select avg(c0) from stb0 interval(5000000000b);", - "result":"./query_res8.txt" - }, - { - "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", - "result":"./query_res9.txt" - } - - ] - }, - "super_table_query": { - "stblname": "stb0", - "query_interval": 0, - "threads": 4, - "sqls": [ - { - "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", - "result": "./query_res_tb0.txt" - }, - { - "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", - "result": "./query_res_tb1.txt" - }, - { - "sql":"select first(*) from xxxx ;", - "result": "./query_res_tb2.txt" - }, - { - "sql":"select last(*) from xxxx;", - "result": "./query_res_tb3.txt" - - }, - { - "sql":"select last_row(*) from xxxx ;", - "result": "./query_res_tb4.txt" - - }, - { - "sql":"select max(c0) from xxxx ;", - "result": "./query_res_tb5.txt" - - }, - { - "sql":"select min(c0) from xxxx ;", - "result": "./query_res_tb6.txt" - - }, - { - "sql":"select avg(c0) from xxxx ;", - "result": "./query_res_tb7.txt" - - }, - { - "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", - "result": "./query_res_tb8.txt" - - } - - - ] - } - } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json deleted file mode 100644 index 1cc834164e7c966a9ce565f1ce481d823b1ed2d1..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "filetype":"subscribe", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "subnsdb", - "confirm_parameter_prompt": "no", - "specified_table_query": - { - "concurrent":2, - "mode":"sync", - "interval":10000, - "restart":"yes", - "keepProgress":"yes", - "sqls": [ - { - "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", - "result": "./subscribe_res0.txt" - }, - { - "sql": "select * from stb0 where ts < now -2d-1h-3s ;", - "result": "./subscribe_res1.txt" - }, - { - "sql": "select * from stb0 where ts < 1626918583000000000 ;", - "result": "./subscribe_res2.txt" - }] - - } -} diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py deleted file mode 100644 index da02f45fa1141a028cfc305bae9babb1856ccb40..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +++ /dev/null @@ -1,124 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * -import time -from datetime import datetime -import subprocess - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - # get the number of subscriptions - def subTimes(self,filename): - self.filename = filename - command = 'cat %s |wc -l'% filename - times = int(subprocess.getstatusoutput(command)[1]) - return times - - # assert results - def assertCheck(self,filename,subResult,expectResult): - self.filename = filename - self.subResult = subResult - self.expectResult = expectResult - args0 = (filename, subResult, expectResult) - assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - - # clear env - os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") - os.system("rm -rf ./subscribe_res*") - os.system("rm -rf ./all_subscribe_res*") - - - # insert data - os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) - os.system("nohup %staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json &" % binPath) - query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) - - - # merge result files - sleep(5) - os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") - os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") - os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") - - - # correct subscribeTimes testcase - subTimes0 = self.subTimes("all_subscribe_res0.txt") - self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) - - subTimes1 = self.subTimes("all_subscribe_res1.txt") - self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) - - subTimes2 = self.subTimes("all_subscribe_res2.txt") - self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) - - - # insert extral data - tdSql.execute("use subnsdb") - tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") - sleep(15) - - os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") - subTimes0 = self.subTimes("all_subscribe_res0.txt") - self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) - - - - # correct data testcase - os.system("kill -9 %d" % query_pid) - sleep(3) - os.system("rm -rf ./subscribe_res*") - os.system("rm -rf ./all_subscribe*") - os.system("rm -rf ./*.py.sql") - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 3cdcdcef5afcb14c04204d2489571bdfed937080..54e0906672637c1187cb8b078f1e8496e461e150 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -51,7 +51,7 @@ class TDTestCase: else: tdLog.info("taosdemo found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" % + os.system("%staosBenchmark -y -t %d -n %d -b INT,INT,INT,INT" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute("use test") diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 30c04729b7966660bdb4851ad1b971130d08f726..5b9f6f319f6a451284b01e75a3714d44da1ce7c3 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -49,7 +49,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath + taosdemoCmd = "%staosBenchmark -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath sleepTimes = subprocess.check_output( taosdemoCmd, shell=True).decode("utf-8") print("sleep times: %d" % int(sleepTimes)) diff --git a/tests/pytest/tools/taosdemoTestLimitOffset.py b/tests/pytest/tools/taosdemoTestLimitOffset.py index dd8a1bee701da0ffd2f764cdbedcf12f9dbedb3c..e69098b7f562f996b2bad58b10df63fdaf8a8398 100644 --- a/tests/pytest/tools/taosdemoTestLimitOffset.py +++ b/tests/pytest/tools/taosdemoTestLimitOffset.py @@ -51,8 +51,8 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) - os.system("%staosdemo -f tools/insert-tblimit-tboffset-insertrec.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit-tboffset-createdb.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit-tboffset-insertrec.json" % binPath) tdSql.execute("use db") tdSql.query("select count(tbname) from db.stb") @@ -60,8 +60,8 @@ class TDTestCase: tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 33000) - os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) - os.system("%staosdemo -f tools/insert-tblimit-tboffset0.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit-tboffset-createdb.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit-tboffset0.json" % binPath) tdSql.execute("reset query cache") tdSql.execute("use db") @@ -70,8 +70,8 @@ class TDTestCase: tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 20000) - os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) - os.system("%staosdemo -f tools/insert-tblimit1-tboffset.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit-tboffset-createdb.json" % binPath) + os.system("%staosBenchmark -f tools/insert-tblimit1-tboffset.json" % binPath) tdSql.execute("reset query cache") tdSql.execute("use db") diff --git a/tests/pytest/tools/taosdemoTestQuery.py b/tests/pytest/tools/taosdemoTestQuery.py index bb2bb85052a9b21dc9181887622ec2019707256b..c77688aefaf93898ae33044408dd4027d71b07e9 100644 --- a/tests/pytest/tools/taosdemoTestQuery.py +++ b/tests/pytest/tools/taosdemoTestQuery.py @@ -53,12 +53,12 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -y -t %d -n %d" % + os.system("%staosBenchmark -y -t %d -n %d" % (binPath, self.numberOfTables, self.numberOfRecords)) print("Sleep 2 seconds..") time.sleep(2) - os.system('%staosdemo -f tools/query.json ' % binPath) -# taosdemoCmd = '%staosdemo -f tools/query.json ' % binPath + os.system('%staosBenchmark -f tools/query.json ' % binPath) +# taosdemoCmd = '%staosBenchmark -f tools/query.json ' % binPath # threads = subprocess.check_output( # taosdemoCmd, shell=True).decode("utf-8") # print("threads: %d" % int(threads)) diff --git a/tests/pytest/tools/taosdemoTestSampleData.py b/tests/pytest/tools/taosdemoTestSampleData.py index a8710a9df36358199e567de2efeb90e88b675312..09a2e6c43b68d8271cb0472b288279b54789fded 100644 --- a/tests/pytest/tools/taosdemoTestSampleData.py +++ b/tests/pytest/tools/taosdemoTestSampleData.py @@ -51,7 +51,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -f tools/taosdemo-sampledata.json" % binPath) + os.system("%staosBenchmark -f tools/taosdemo-sampledata.json" % binPath) tdSql.execute("use db") tdSql.query("select count(tbname) from db.stb") diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py index 89c1b92e140cb1e19b549d3248693153e116c52e..7587ab9eb3e89275451864d28ccf985bcaac949a 100644 --- a/tests/pytest/tools/taosdemoTestTblAlt.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -54,9 +54,9 @@ class TDTestCase: binPath = buildPath + "/build/bin/" if(threadID == 0): - print("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" % + print("%staosBenchmark -y -t %d -n %d -b INT,INT,INT,INT" % (binPath, self.numberOfTables, self.numberOfRecords)) - os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" % + os.system("%staosBenchmark -y -t %d -n %d -b INT,INT,INT,INT" % (binPath, self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) diff --git a/tests/pytest/tools/taosdemoTestWithJson.py b/tests/pytest/tools/taosdemoTestWithJson.py index b2ecd5497620bf5a0f90dd2e8529890423fd82a1..0868d07fd99f7731424a3f11883ae52d12f1e878 100644 --- a/tests/pytest/tools/taosdemoTestWithJson.py +++ b/tests/pytest/tools/taosdemoTestWithJson.py @@ -48,7 +48,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -f tools/insert.json -y" % binPath) + os.system("%staosBenchmark -f tools/insert.json -y" % binPath) tdSql.execute("use db01") tdSql.query("select count(*) from stb01") diff --git a/tests/pytest/tools/taosdemoTestWithoutMetric.py b/tests/pytest/tools/taosdemoTestWithoutMetric.py index 01e19355d9dfde5c536ac1e28e1f190f33ab966e..a92a4519e2d716b85df0ec29c9cf49abb0812b47 100644 --- a/tests/pytest/tools/taosdemoTestWithoutMetric.py +++ b/tests/pytest/tools/taosdemoTestWithoutMetric.py @@ -50,7 +50,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -N -y -t %d -n %d" % + os.system("%staosBenchmark -N -y -t %d -n %d" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.query("show databases") diff --git a/tests/pytest/tools/taosdemoTestdatatype.py b/tests/pytest/tools/taosdemoTestdatatype.py index e32d895571da7d2a101dc32201ebba4754ec4740..ba99b0c532af3497de2daf4d0757b9105405dc1d 100644 --- a/tests/pytest/tools/taosdemoTestdatatype.py +++ b/tests/pytest/tools/taosdemoTestdatatype.py @@ -50,7 +50,7 @@ class TDTestCase: tdLog.info("taosdemo found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -d test002 -y -t %d -n %d -b INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % + os.system("%staosBenchmark -d test002 -y -t %d -n %d -b INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute('use test002') @@ -68,7 +68,7 @@ class TDTestCase: tdSql.error('insert into d1 values(now,100,"abcd","abcd"') tdSql.error('insert into d1 values(now,100,100,100)') - os.system("%staosdemo -d test002 -y -t %d -n %d --data-type INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % + os.system("%staosBenchmark -d test002 -y -t %d -n %d --data-type INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute('use test002') @@ -76,7 +76,7 @@ class TDTestCase: tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) - os.system("%staosdemo -d test002 -y -t %d -n %d -bINT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % + os.system("%staosBenchmark -d test002 -y -t %d -n %d -bINT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute('use test002') diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py index 727690c6e629217997bd5ecbf085116be4a7e347..81e315934662184aa7828c2bf5ac5ef0a8cb368a 100644 --- a/tests/pytest/tools/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -143,59 +143,41 @@ class TDTestCase: '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % binPath) - # replace strings to dump in databases - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - - os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + tdSql.execute("drop database timedb1") os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) - os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) - - # dump data and check for taosdump - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0, 0, 1000) - - tdSql.query("select count(*) from dumptmp2.st") + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp3.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 900) + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) + # check data origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: tdLog.info("test nano second : dump check data pass for all data!") else: tdLog.info( "test nano second : dump check data failed for all data!") - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! ") - else: - tdLog.info(" test nano second : dump check data failed for data !") - - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! ") - else: - tdLog.info(" test nano second : dump check data failed for data !") - # us second support test case os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") + tdSql.execute("drop database if exists timedb1") + if not os.path.exists("./taosdumptest/tmp1"): os.makedirs("./taosdumptest/dumptmp1") @@ -228,55 +210,44 @@ class TDTestCase: '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % binPath) - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0, 0, 1000) - - tdSql.query("select count(*) from dumptmp2.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp3.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 900) - origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") - if origin_res == dump_res: - tdLog.info("test us second : dump check data pass for all data!") - else: - tdLog.info("test us second : dump check data failed for all data!") - - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! ") - else: - tdLog.info(" test us second : dump check data failed for data!") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! ") + tdLog.info("test micro second : dump check data pass for all data!") else: - tdLog.info(" test us second : dump check data failed for data! ") + tdLog.info( + "test micro second : dump check data failed for all data!") # ms second support test case os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") + tdSql.execute("drop database if exists timedb1") if not os.path.exists("./taosdumptest/tmp1"): os.makedirs("./taosdumptest/dumptmp1") @@ -309,48 +280,39 @@ class TDTestCase: '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % binPath) - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0, 0, 1000) - - tdSql.query("select count(*) from dumptmp2.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp3.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") tdSql.checkData(0, 0, 900) - origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") - if origin_res == dump_res: - tdLog.info("test ms second : dump check data pass for all data!") - else: - tdLog.info("test ms second : dump check data failed for all data!") - - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! ") - else: - tdLog.info(" test ms second : dump check data failed for data!") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) - origin_res = tdSql.getResult( - "select * from timedb1.st where ts >=1625068810000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + tdSql.execute("drop database timedb1") + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! ") + tdLog.info("test million second : dump check data pass for all data!") else: - tdLog.info(" test ms second : dump check data failed for data! ") + tdLog.info( + "test million second : dump check data failed for all data!") os.system("rm -rf ./taosdumptest/") os.system("rm -rf ./dump_result.txt") diff --git a/tests/pytest/tsdb/tsdbComp.py b/tests/pytest/tsdb/tsdbComp.py index 3563655efe4e69fab4c51a40818da205efb29837..b01d9680c0f0147526676a8d3b124e187625e52a 100644 --- a/tests/pytest/tsdb/tsdbComp.py +++ b/tests/pytest/tsdb/tsdbComp.py @@ -59,11 +59,11 @@ class TDTestCase: #new db and insert data tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb1.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2Newstab.json -y " % binPath) tdSql.execute("use db2") tdSql.execute("drop table if exists stb1_0") diff --git a/tests/pytest/tsdb/tsdbCompCluster.py b/tests/pytest/tsdb/tsdbCompCluster.py index 3df4c9a9d47744bcf729e3d6b01c3b515626058b..c1c1181453c4ea0c34041f47e4b0f613d51d8f23 100644 --- a/tests/pytest/tsdb/tsdbCompCluster.py +++ b/tests/pytest/tsdb/tsdbCompCluster.py @@ -66,11 +66,11 @@ class TwoClients: # new db ,new super tables , child tables, and insert data tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f tsdb/insertDataDb1.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb1.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f tsdb/insertDataDb2.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f tsdb/insertDataDb2Newstab.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2Newstab.json -y " % binPath) # new general tables and modify general tables; tdSql.execute("use db2") diff --git a/tests/pytest/tsdb/tsdbCompClusterReplica2.py b/tests/pytest/tsdb/tsdbCompClusterReplica2.py index cfda271497cde59e8dbe60150ddf935ba63fd9be..6c2b3993294c16859f548fa83b78f4af72f6d851 100644 --- a/tests/pytest/tsdb/tsdbCompClusterReplica2.py +++ b/tests/pytest/tsdb/tsdbCompClusterReplica2.py @@ -65,11 +65,11 @@ class TwoClients: # new db ,new super tables , child tables, and insert data tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f tsdb/insertDataDb1Replica2.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb1Replica2.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f tsdb/insertDataDb2Replica2.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2Replica2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f tsdb/insertDataDb2NewstabReplica2.json -y " % binPath) + os.system("%staosBenchmark -f tsdb/insertDataDb2NewstabReplica2.json -y " % binPath) # new general tables and modify general tables; tdSql.execute("use db2") diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index adf9026e7808dd1fd6715db26f70db56ce339cd5..adfec12cb2a0aafe19b5d125164b583a7dbd288f 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -15,22 +15,76 @@ import random import string from util.sql import tdSql from util.dnodes import tdDnodes +import requests +import time class TDCom: def init(self, conn, logSql): tdSql.init(conn.cursor(), logSql) - def cleanTb(self): + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc"): + ''' + type is taosc or restful + ''' query_sql = "show stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: - tdSql.execute(f'drop table if exists {stb}') + if type == "taosc": + tdSql.execute(f'drop table if exists {stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists {stb}") - query_sql = "show tables" - res_row_list = tdSql.query(query_sql, True) - tb_list = map(lambda x: x[0], res_row_list) - for tb in tb_list: - tdSql.execute(f'drop table if exists {tb}') + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) def getLongName(self, len, mode = "mixed"): """ @@ -83,6 +137,14 @@ class TDCom: letters += i return nums, letters + def smlPass(self, func): + def wrapper(*args): + if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + return func(*args) + else: + pass + return wrapper + def close(self): self.cursor.close() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 55c964c2557eff3204cf31bfb63cd5e3f3dd5501..254d5f166b408e4abe488bf41b33143a7b702b26 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -44,6 +44,7 @@ class TDSimClient: "jnidebugFlag": "135", "qdebugFlag": "135", "telemetryReporting": "0", + "enableCoreFile": "1", } def getLogDir(self): @@ -151,7 +152,8 @@ class TDDnode: "udebugFlag":"135", "jnidebugFlag":"135", "qdebugFlag":"135", - "maxSQLLength":"1048576" + "maxSQLLength":"1048576", + "enableCoreFile": "1", } def init(self, path): diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 6a70a84221c5c566cd8a0aa0ad2ea806dbbb9bc6..a7cc0ccc7b8768407f87981a32ab479a74ebbf84 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -332,6 +332,14 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + def checkIn(self, sub, res): + if sub in res: + tdLog.info("sql:%s, sub:%s in result:%s" % (self.sql, sub, res)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, sub, res) + tdLog.exit("%s(%d) failed: sql:%s, sub:%s not in result:%s" % args) + def taosdStatus(self, state): tdLog.sleep(5) pstate = 0 diff --git a/tests/pytest/wal/sdbComp.py b/tests/pytest/wal/sdbComp.py index 428fbc9a145c0c3bae4507e33242ff3670c85024..90993014349e28f4d0e6a07646729e8ab04b319c 100644 --- a/tests/pytest/wal/sdbComp.py +++ b/tests/pytest/wal/sdbComp.py @@ -61,11 +61,11 @@ class TDTestCase: os.system("rm -rf %s/sim/dnode1/data/mnode_tmp/" % testPath) os.system("rm -rf %s/sim/dnode1/data/mnode_bak/" % testPath) tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb1.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2Newstab.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) tdSql.execute("use db2") diff --git a/tests/pytest/wal/sdbCompCluster.py b/tests/pytest/wal/sdbCompCluster.py index 4fa84817ec01c9e5adfdb4a76bc29a4a6c49abfc..0be0f441d032a6a7ec5083472d02c7976a84c75f 100644 --- a/tests/pytest/wal/sdbCompCluster.py +++ b/tests/pytest/wal/sdbCompCluster.py @@ -66,11 +66,11 @@ class TwoClients: os.system("rm -rf /var/lib/taos/mnode_bak/") os.system("rm -rf /var/lib/taos/mnode_temp/") tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb1.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2Newstab.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) tdSql.execute("use db2") diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py index ba80e3864aed27c091dd5ec72ca9f09ea2c36126..1a9354c61e1f4986a399a8684ccd3678c8bfaa7e 100644 --- a/tests/pytest/wal/sdbCompClusterReplica2.py +++ b/tests/pytest/wal/sdbCompClusterReplica2.py @@ -66,11 +66,11 @@ class TwoClients: os.system("rm -rf /var/lib/taos/mnode_bak/") os.system("rm -rf /var/lib/taos/mnode_temp/") tdSql.execute("drop database if exists db2") - os.system("%staosdemo -f wal/insertDataDb1Replica2.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb1Replica2.json -y " % binPath) tdSql.execute("drop database if exists db1") - os.system("%staosdemo -f wal/insertDataDb2Replica2.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2Replica2.json -y " % binPath) tdSql.execute("drop table if exists db2.stb0") - os.system("%staosdemo -f wal/insertDataDb2NewstabReplica2.json -y " % binPath) + os.system("%staosBenchmark -f wal/insertDataDb2NewstabReplica2.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) tdSql.execute("use db2") diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 2cbee8eac91719a4cbff4d9c323f1f304e8e8684..8931f2021f16674140844a1d4bf76cb9d7168b45 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -2722,6 +2722,38 @@ int stmt_funcb_autoctb_e5(TAOS_STMT *stmt) { } +int stmt_funcb_autoctb_e6(TAOS_STMT *stmt) { + char *sql = "insert into ? using stb1 tags(?,?,?,?,?,?,?,?,?) values(now,?,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("case success:failed to execute taos_stmt_prepare. code:%s\n", taos_stmt_errstr(stmt)); + } + + return 0; +} + + +int stmt_funcb_autoctb_e7(TAOS_STMT *stmt) { + char *sql = "insert into ? using stb1 tags(?,?,?,?,?,?,?,?,?) values(?,true,?,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("case success:failed to execute taos_stmt_prepare. code:%s\n", taos_stmt_errstr(stmt)); + } + + return 0; +} + + +int stmt_funcb_autoctb_e8(TAOS_STMT *stmt) { + char *sql = "insert into ? using stb1 tags(?,?,?,?,?,?,?,?,?) values(?,?,1,?,?,?,?,?,?,?)"; + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("case success:failed to execute taos_stmt_prepare. code:%s\n", taos_stmt_errstr(stmt)); + } + + return 0; +} + //300 tables 60 records int stmt_funcb1(TAOS_STMT *stmt) { @@ -4857,6 +4889,44 @@ void* runcase(void *par) { #endif + +#if 1 + prepare(taos, 1, 0); + + stmt = taos_stmt_init(taos); + + printf("e6 start\n"); + stmt_funcb_autoctb_e6(stmt); + printf("e6 end\n"); + taos_stmt_close(stmt); + +#endif + +#if 1 + prepare(taos, 1, 0); + + stmt = taos_stmt_init(taos); + + printf("e7 start\n"); + stmt_funcb_autoctb_e7(stmt); + printf("e7 end\n"); + taos_stmt_close(stmt); + +#endif + +#if 1 + prepare(taos, 1, 0); + + stmt = taos_stmt_init(taos); + + printf("e8 start\n"); + stmt_funcb_autoctb_e8(stmt); + printf("e8 end\n"); + taos_stmt_close(stmt); + +#endif + + #if 1 prepare(taos, 1, 0); diff --git a/tests/script/api/stmt.c b/tests/script/api/stmt.c index f4fb9233a83f930a808eadf2135003d0e644c597..085263e06638417dd8035a7108f8b105bc521bfa 100644 --- a/tests/script/api/stmt.c +++ b/tests/script/api/stmt.c @@ -173,6 +173,44 @@ void taos_stmt_set_tbname_tags_test() { printf("finish taos_stmt_set_tbname_tags test\n"); } +void taos_stmt_set_tbname_tags_json_test() { + printf("start taos_stmt_set_tbname_tags_json_test test\n"); + TAOS_STMT *stmt = NULL; + char * name = calloc(1, 20); + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND)); + // ASM ERROR + assert(taos_stmt_set_tbname_tags(stmt, name, tags) != 0); + void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("Cannot connect to tdengine server\n"); + exit(EXIT_FAILURE); + } + execute_simple_sql(taos, "drop database if exists stmt_test_json"); + execute_simple_sql(taos, "create database stmt_test_json"); + execute_simple_sql(taos, "use stmt_test_json"); + execute_simple_sql(taos, "create stable super(ts timestamp, c1 int) tags (jtag json)"); + stmt = taos_stmt_init(taos); + assert(stmt != NULL); + char *stmt_sql = calloc(1, 1000); + sprintf(stmt_sql, "insert into ? using super tags (?) values (?,?)"); + assert(taos_stmt_prepare(stmt, stmt_sql, 0) == 0); + assert(taos_stmt_set_tbname_tags(stmt, name, tags) != 0); + sprintf(name, "tb"); + assert(taos_stmt_set_tbname_tags(stmt, name, tags) != 0); + tags->buffer_type = TSDB_DATA_TYPE_JSON; + tags->buffer = "{\\\"key1\\\":\\\"value1\\\",\\\"key2\\\":null,\\\"key3\\\":3,\\\"key4\\\":3.2}"; + tags->buffer_length = strlen(tags->buffer); + tags->length = &tags->buffer_length; + tags->is_null = NULL; + assert(taos_stmt_set_tbname_tags(stmt, name, tags) == 0); + free(stmt_sql); + free(name); + free(tags); + assert(taos_stmt_affected_rows(stmt) == 0); + taos_stmt_close(stmt); + printf("finish taos_stmt_set_tbname_tags_json_test test\n"); +} + void taos_stmt_set_sub_tbname_test() { printf("start taos_stmt_set_sub_tbname test\n"); TAOS_STMT *stmt = NULL; @@ -492,6 +530,76 @@ void taos_stmt_use_result_query(void *taos, char *col, int type) { free(stmt_sql); } +void taos_stmt_use_result_query_json(void *taos, char *col, int type) { + TAOS_STMT *stmt = taos_stmt_init(taos); + assert(stmt != NULL); + char *stmt_sql = calloc(1, 1024); + sprintf(stmt_sql, "select * from stmt_test_json.super where jtag->? = ?"); + printf("stmt_sql: %s\n", stmt_sql); + assert(taos_stmt_prepare(stmt, stmt_sql, 0) == 0); + + + struct { + int64_t long_value; + double double_value; + char nchar_value[32]; + } v = {0}; + v.long_value = 4; + v.double_value = 3.3; + strcpy(v.nchar_value, "一二三四五六七八"); + + TAOS_BIND params[2] = {0}; + +// char jsonTag[32] = "jtag"; +// params[0].buffer_type = TSDB_DATA_TYPE_NCHAR; +// params[0].buffer_length = strlen(jsonTag); +// params[0].buffer = &jsonTag; +// params[0].length = ¶ms[0].buffer_length; +// params[0].is_null = NULL; + + params[0].buffer_type = TSDB_DATA_TYPE_NCHAR; + params[0].buffer_length = strlen(col); + params[0].buffer = col; + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + params[1].buffer_type = type; + params[1].buffer_length = sizeof(v.long_value); + params[1].buffer = &v.long_value; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + break; + case TSDB_DATA_TYPE_NCHAR: + params[1].buffer_type = type; + params[1].buffer_length = strlen(v.nchar_value); + params[1].buffer = &v.nchar_value; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + break; + case TSDB_DATA_TYPE_DOUBLE: + params[1].buffer_type = type; + params[1].buffer_length = sizeof(v.double_value); + params[1].buffer = &v.double_value; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + break; + default: + printf("Cannnot find type: %d\n", type); + break; + } + + assert(taos_stmt_bind_param(stmt, params) == 0); + assert(taos_stmt_execute(stmt) == 0); + TAOS_RES *result = taos_stmt_use_result(stmt); + assert(result != NULL); + print_result(result); + taos_free_result(result); + assert(taos_stmt_close(stmt) == 0); + free(stmt_sql); +} + void taos_stmt_use_result_test() { printf("start taos_stmt_use_result test\n"); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); @@ -534,6 +642,47 @@ void taos_stmt_use_result_test() { printf("finish taos_stmt_use_result test\n"); } +void taos_stmt_use_result_json_test() { + printf("start taos_stmt_use_result_json_test test\n"); + void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("Cannot connect to tdengine server\n"); + exit(EXIT_FAILURE); + } + execute_simple_sql(taos, "drop database if exists stmt_test_json"); + execute_simple_sql(taos, "create database stmt_test_json"); + execute_simple_sql(taos, "use stmt_test_json"); + execute_simple_sql( + taos, + "create table super(ts timestamp, c1 int) tags (jtag json)"); + execute_simple_sql(taos, + "create table t1 using super tags ('{\\\"key1\\\":\\\"一二三四五六七八\\\",\\\"key2\\\":null,\\\"key3\\\":3,\\\"key4\\\":3.1}')"); + execute_simple_sql( + taos, "insert into t1 values (1591060628000, 1)"); + execute_simple_sql( + taos, "insert into t1 values (1591060628001, 2)"); + + execute_simple_sql(taos, + "create table t2 using super tags ('{\\\"key1\\\":5,\\\"key2\\\":null,\\\"key3\\\":4,\\\"key4\\\":3.2}')"); + execute_simple_sql( + taos, "insert into t2 values (1591060628003, 21)"); + execute_simple_sql( + taos, "insert into t2 values (1591060628004, 22)"); + + execute_simple_sql(taos, + "create table t3 using super tags ('{\\\"key1\\\":\\\"一二\\\",\\\"key2\\\":null,\\\"key3\\\":null,\\\"key4\\\":3.3}')"); + execute_simple_sql( + taos, "insert into t3 values (1591060628005, 31)"); + execute_simple_sql( + taos, "insert into t3 values (1591060628006, 32)"); + + taos_stmt_use_result_query_json(taos, "key1", TSDB_DATA_TYPE_NCHAR); + taos_stmt_use_result_query_json(taos, "key3", TSDB_DATA_TYPE_BIGINT); + taos_stmt_use_result_query_json(taos, "key4", TSDB_DATA_TYPE_DOUBLE); + + printf("finish taos_stmt_use_result_json_test test\n"); +} + void taos_stmt_close_test() { printf("start taos_stmt_close test\n"); // ASM ERROR @@ -548,6 +697,7 @@ void test_api_reliability() { taos_stmt_preprare_test(); taos_stmt_set_tbname_test(); taos_stmt_set_tbname_tags_test(); + taos_stmt_set_tbname_tags_json_test(); taos_stmt_set_sub_tbname_test(); taos_stmt_bind_param_test(); taos_stmt_bind_single_param_batch_test(); @@ -557,7 +707,7 @@ void test_api_reliability() { taos_stmt_close_test(); } -void test_query() { taos_stmt_use_result_test(); } +void test_query() { taos_stmt_use_result_test(); taos_stmt_use_result_json_test(); } int main(int argc, char *argv[]) { test_api_reliability(); diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index a2b3978ad7fd6c6f2b4adf9f62b9edb4fed2a9e5..e2ab5e6ffcd9dd499fda37ac3433023d85d4a446 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -38,6 +38,12 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/math_funcs.sim +run general/compute/string_funcs.sim +run general/compute/scalar_pow.sim +run general/compute/scalar_triangle.sim +run general/compute/scalar_str_concat_len.sim +run general/compute/table_group.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim @@ -139,6 +145,7 @@ run general/parser/groupby.sim run general/parser/top_groupby.sim run general/parser/tags_dynamically_specifiy.sim run general/parser/set_tag_vals.sim +run general/parser/scalar_expression.sim #unsupport run general/parser/repeatAlter.sim #unsupport run general/parser/slimit_alter_tags.sim run general/parser/precision_ns.sim @@ -231,3 +238,4 @@ run general/parser/tbname_escape.sim run general/parser/columnName_escape.sim run general/parser/tagName_escape.sim run general/parser/interp_blocks.sim +run general/parser/create_tb_with_timestamp_tag.sim diff --git a/tests/script/general/compute/cast.sim b/tests/script/general/compute/cast.sim new file mode 100644 index 0000000000000000000000000000000000000000..729d6808a53e8db4ee17eb1e38ed94aca91ac1ef --- /dev/null +++ b/tests/script/general/compute/cast.sim @@ -0,0 +1,106 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 6 +system sh/cfg.sh -n dnode1 -c cache -v 1 +system sh/cfg.sh -n dnode1 -c minRows -v 10 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect + +sql drop database if exists db +sql create database if not exists db +sql use db +sql create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tb1 using stb1 tags(1,'1',1.0); +sql create table tb2 using stb1 tags(2,'2',2.0); +sql create table tb3 using stb1 tags(3,'3',3.0); + +sql insert into tb1 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"123","1234",1,1,1,1); +sql insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); +sql insert into tb1 values ('2021-11-11 09:00:02',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL); +sql insert into tb1 values ('2021-11-11 09:00:03',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3); +sql insert into tb1 values ('2021-11-11 09:00:04',true,4,4,4,4,4,4,"456","4567",4,4,4,4); +sql insert into tb1 values ('2021-11-11 09:00:05',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807); +sql insert into tb1 values ('2021-11-11 09:00:06',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0); + +sql insert into tb2 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tb2 values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tb2 values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tb2 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tb2 values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tb2 values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tb2 values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7); + +sql create table tbn (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned); + +sql insert into tbn values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tbn values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tbn values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tbn values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tbn values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tbn values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tbn values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7); + +run general/compute/cast_query1.sim +run general/compute/cast_query2.sim + + +sql create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tba1 using stba tags(1,'1',1.0); + +sql insert into tba1 values ('2021-11-11 09:00:00',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:01',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:02',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:04',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:05',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:06',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:07',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:08',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:09',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:10',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:11',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:12',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:13',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:14',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:15',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:16',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:17',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:18',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:19',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:20',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:21',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:22',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:23',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:24',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:25',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:26',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:27',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:28',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:29',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +run general/compute/cast_query1.sim +run general/compute/cast_query2.sim +run general/compute/cast_query3.sim + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/cast_query1.sim b/tests/script/general/compute/cast_query1.sim new file mode 100644 index 0000000000000000000000000000000000000000..de0722bcd8c3911d94b4558c302c22bbc607defd --- /dev/null +++ b/tests/script/general/compute/cast_query1.sim @@ -0,0 +1,2454 @@ +sleep 100 +sql connect + +sql use db; + +print "test negative cases" + +sql_error select cast(* as tinyint) from tb1; +sql_error select cast(* as smallint) from tb1; +sql_error select cast(* as int) from tb1; +sql_error select cast(* as bool) from tb1; +sql_error select cast(* as bigint) as a from tb1; +sql_error select cast(* as bigint) + 1 as a from tb1; +sql_error select cast(tb1.* as bigint) + 1 as a from tb1; +sql_error select cast(* as bigint) from tb1; +sql_error select cast(c1 as binary(16384)) from tb1; +sql_error select cast(c1 as nchar(16384)) from tb1; +sql_error select cast(c1 + c2 as bigint) from tb1; +sql_error select cast(13 as binary(0)) from tb1; +sql_error select cast(12 as binary(-1)) from tb1; +sql_error select cast(11 as nchar(0)) from tb1; +sql_error select cast(10 as nchar(-1)) from tb1; +sql_error select cast(11 as tinyint) from tb1; +sql_error select cast(11 as bool) from tb1; +sql_error select cast(11 as smallint) from tb1; +sql_error select cast(11 as int) from tb1; +sql_error select cast(11 as float) from tb1; +sql_error select cast(11 as double) from tb1; +sql_error select cast(11 as tinyint unsigned) from tb1; +sql_error select cast(11 as smallint unsigned) from tb1; +sql_error select cast(11 as int unsigned) from tb1; +sql_error select cast(c1 as binary(0)) from tb1; +sql_error select cast(c1 as binary(-1)) from tb1; +sql_error select cast(c1 as nchar(0)) from tb1; +sql_error select cast(c1 as nchar(-1)) from tb1; +sql_error select cast(c1 as tinyint) from tb1; +sql_error select cast(c1 as bool) from tb1; +sql_error select cast(c1 as smallint) from tb1; +sql_error select cast(c1 as int) from tb1; +sql_error select cast(c1 as float) from tb1; +sql_error select cast(c1 as double) from tb1; +sql_error select cast(c1 as tinyint unsigned) from tb1; +sql_error select cast(c1 as smallint unsigned) from tb1; +sql_error select cast(c1 as int unsigned) from tb1; +sql_error select cast(c2 as binary(0)) from tb1; +sql_error select cast(c2 as binary(-1)) from tb1; +sql_error select cast(c2 as nchar(0)) from tb1; +sql_error select cast(c2 as nchar(-1)) from tb1; +sql_error select cast(c2 as tinyint) from tb1; +sql_error select cast(c2 as bool) from tb1; +sql_error select cast(c2 as smallint) from tb1; +sql_error select cast(c2 as int) from tb1; +sql_error select cast(c2 as float) from tb1; +sql_error select cast(c2 as double) from tb1; +sql_error select cast(c2 as tinyint unsigned) from tb1; +sql_error select cast(c2 as smallint unsigned) from tb1; +sql_error select cast(c2 as int unsigned) from tb1; +sql_error select cast(c3 as binary(0)) from tb1; +sql_error select cast(c3 as binary(-1)) from tb1; +sql_error select cast(c3 as nchar(0)) from tb1; +sql_error select cast(c3 as nchar(-1)) from tb1; +sql_error select cast(c3 as tinyint) from tb1; +sql_error select cast(c3 as bool) from tb1; +sql_error select cast(c3 as smallint) from tb1; +sql_error select cast(c3 as int) from tb1; +sql_error select cast(c3 as float) from tb1; +sql_error select cast(c3 as double) from tb1; +sql_error select cast(c3 as tinyint unsigned) from tb1; +sql_error select cast(c3 as smallint unsigned) from tb1; +sql_error select cast(c3 as int unsigned) from tb1; +sql_error select cast(c4 as binary(0)) from tb1; +sql_error select cast(c4 as binary(-1)) from tb1; +sql_error select cast(c4 as nchar(0)) from tb1; +sql_error select cast(c4 as nchar(-1)) from tb1; +sql_error select cast(c4 as tinyint) from tb1; +sql_error select cast(c4 as bool) from tb1; +sql_error select cast(c4 as smallint) from tb1; +sql_error select cast(c4 as int) from tb1; +sql_error select cast(c4 as float) from tb1; +sql_error select cast(c4 as double) from tb1; +sql_error select cast(c4 as tinyint unsigned) from tb1; +sql_error select cast(c4 as smallint unsigned) from tb1; +sql_error select cast(c4 as int unsigned) from tb1; +sql_error select cast(c5 as binary(0)) from tb1; +sql_error select cast(c5 as binary(-1)) from tb1; +sql_error select cast(c5 as nchar(0)) from tb1; +sql_error select cast(c5 as nchar(-1)) from tb1; +sql_error select cast(c5 as tinyint) from tb1; +sql_error select cast(c5 as bool) from tb1; +sql_error select cast(c5 as smallint) from tb1; +sql_error select cast(c5 as int) from tb1; +sql_error select cast(c5 as float) from tb1; +sql_error select cast(c5 as double) from tb1; +sql_error select cast(c5 as tinyint unsigned) from tb1; +sql_error select cast(c5 as smallint unsigned) from tb1; +sql_error select cast(c5 as int unsigned) from tb1; +sql_error select cast(c6 as binary(0)) from tb1; +sql_error select cast(c6 as binary(-1)) from tb1; +sql_error select cast(c6 as nchar(0)) from tb1; +sql_error select cast(c6 as nchar(-1)) from tb1; +sql_error select cast(c6 as tinyint) from tb1; +sql_error select cast(c6 as bool) from tb1; +sql_error select cast(c6 as smallint) from tb1; +sql_error select cast(c6 as int) from tb1; +sql_error select cast(c6 as float) from tb1; +sql_error select cast(c6 as double) from tb1; +sql_error select cast(c6 as tinyint unsigned) from tb1; +sql_error select cast(c6 as smallint unsigned) from tb1; +sql_error select cast(c6 as int unsigned) from tb1; +sql_error select cast(c7 as binary(0)) from tb1; +sql_error select cast(c7 as binary(-1)) from tb1; +sql_error select cast(c7 as nchar(0)) from tb1; +sql_error select cast(c7 as nchar(-1)) from tb1; +sql_error select cast(c7 as tinyint) from tb1; +sql_error select cast(c7 as bool) from tb1; +sql_error select cast(c7 as smallint) from tb1; +sql_error select cast(c7 as int) from tb1; +sql_error select cast(c7 as float) from tb1; +sql_error select cast(c7 as double) from tb1; +sql_error select cast(c7 as tinyint unsigned) from tb1; +sql_error select cast(c7 as smallint unsigned) from tb1; +sql_error select cast(c7 as int unsigned) from tb1; +sql_error select cast(c8 as binary(0)) from tb1; +sql_error select cast(c8 as binary(-1)) from tb1; +sql_error select cast(c8 as nchar(0)) from tb1; +sql_error select cast(c8 as nchar(-1)) from tb1; +sql_error select cast(c8 as tinyint) from tb1; +sql_error select cast(c8 as bool) from tb1; +sql_error select cast(c8 as smallint) from tb1; +sql_error select cast(c8 as int) from tb1; +sql_error select cast(c8 as float) from tb1; +sql_error select cast(c8 as double) from tb1; +sql_error select cast(c8 as tinyint unsigned) from tb1; +sql_error select cast(c8 as smallint unsigned) from tb1; +sql_error select cast(c8 as int unsigned) from tb1; +sql_error select cast(c8 as timestamp) from tb1; +sql_error select cast(c9 as binary(0)) from tb1; +sql_error select cast(c9 as binary(-1)) from tb1; +sql_error select cast(c9 as nchar(0)) from tb1; +sql_error select cast(c9 as nchar(-1)) from tb1; +sql_error select cast(c9 as tinyint) from tb1; +sql_error select cast(c9 as bool) from tb1; +sql_error select cast(c9 as smallint) from tb1; +sql_error select cast(c9 as int) from tb1; +sql_error select cast(c9 as float) from tb1; +sql_error select cast(c9 as double) from tb1; +sql_error select cast(c9 as tinyint unsigned) from tb1; +sql_error select cast(c9 as smallint unsigned) from tb1; +sql_error select cast(c9 as int unsigned) from tb1; +sql_error select cast(c9 as timestamp) from tb1; +sql_error select cast(c9 as binary(5)) from tb1; +sql_error select cast(c10 as binary(0)) from tb1; +sql_error select cast(c10 as binary(-1)) from tb1; +sql_error select cast(c10 as nchar(0)) from tb1; +sql_error select cast(c10 as nchar(-1)) from tb1; +sql_error select cast(c10 as tinyint) from tb1; +sql_error select cast(c10 as bool) from tb1; +sql_error select cast(c10 as smallint) from tb1; +sql_error select cast(c10 as int) from tb1; +sql_error select cast(c10 as float) from tb1; +sql_error select cast(c10 as double) from tb1; +sql_error select cast(c10 as tinyint unsigned) from tb1; +sql_error select cast(c10 as smallint unsigned) from tb1; +sql_error select cast(c10 as int unsigned) from tb1; +sql_error select cast(c11 as binary(0)) from tb1; +sql_error select cast(c11 as binary(-1)) from tb1; +sql_error select cast(c11 as nchar(0)) from tb1; +sql_error select cast(c11 as nchar(-1)) from tb1; +sql_error select cast(c11 as tinyint) from tb1; +sql_error select cast(c11 as bool) from tb1; +sql_error select cast(c11 as smallint) from tb1; +sql_error select cast(c11 as int) from tb1; +sql_error select cast(c11 as float) from tb1; +sql_error select cast(c11 as double) from tb1; +sql_error select cast(c11 as tinyint unsigned) from tb1; +sql_error select cast(c11 as smallint unsigned) from tb1; +sql_error select cast(c11 as int unsigned) from tb1; +sql_error select cast(c12 as binary(0)) from tb1; +sql_error select cast(c12 as binary(-1)) from tb1; +sql_error select cast(c12 as nchar(0)) from tb1; +sql_error select cast(c12 as nchar(-1)) from tb1; +sql_error select cast(c12 as tinyint) from tb1; +sql_error select cast(c12 as bool) from tb1; +sql_error select cast(c12 as smallint) from tb1; +sql_error select cast(c12 as int) from tb1; +sql_error select cast(c12 as float) from tb1; +sql_error select cast(c12 as double) from tb1; +sql_error select cast(c12 as tinyint unsigned) from tb1; +sql_error select cast(c12 as smallint unsigned) from tb1; +sql_error select cast(c12 as int unsigned) from tb1; +sql_error select cast(c13 as binary(0)) from tb1; +sql_error select cast(c13 as binary(-1)) from tb1; +sql_error select cast(c13 as nchar(0)) from tb1; +sql_error select cast(c13 as nchar(-1)) from tb1; +sql_error select cast(c13 as tinyint) from tb1; +sql_error select cast(c13 as bool) from tb1; +sql_error select cast(c13 as smallint) from tb1; +sql_error select cast(c13 as int) from tb1; +sql_error select cast(c13 as float) from tb1; +sql_error select cast(c13 as double) from tb1; +sql_error select cast(c13 as tinyint unsigned) from tb1; +sql_error select cast(c13 as smallint unsigned) from tb1; +sql_error select cast(c13 as int unsigned) from tb1; +sql_error select cast(12345678900000000000000000 as binary(10)) from tb1; +sql_error select distinct cast("-abc" as bigint unsigned) from tb1; +sql_error select cast(c1) from tb1; +sql_error select cast(t1 as bigint) from stb1; +sql_error select cast(c2 as bigint)+avg(c2) from tb1; +sql_error select cast(c2 as bigint)+top(c2,1) from tb1; +sql_error select cast(c1 as bigint),avg(c3) from tb1; +sql_error select cast(c1 as bigint),top(c3,1) from tb1; +sql_error select cast(c2+c3 as binary(6)) from tb1 session(ts, 1s); +sql_error select cast(c2+c3 as binary(6)) from tb1 STATE_WINDOW(c1); +sql_error select cast(c2+c3 as binary(6)) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select cast(c2+c3 as binary(6)) from stb1 group by t1; +sql_error select cast(c2+c3 as binary(6)) from stb1 group by ts; +sql_error select cast(c2+c3 as binary(6)) from stb1 group by c1; +sql_error select cast(c2+c3 as binary(6)) from stb1 group by tbname; +sql_error select cast(c2+c3 as binary(6)) from tb1 order by c2; +sql_error select cast(c8 as bigint),cast(c9 as bigint(12)) from tbn; +sql_error select cast(ts as binary(10)) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select cast(a as timestamp) from (select cast(c2 as binary(2)) as a from tb1); + +print "test constant" + +sql select cast(13 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 13 then + return -1 +endi +if $data10 != 13 then + return -1 +endi +sql select cast(13 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.013@ then + return -1 +endi +if $data10 != @70-01-01 08:00:00.013@ then + return -1 +endi +sql select cast("abc" as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 0 then + return -1 +endi +if $data10 != 0 then + return -1 +endi +print "test bool column" + +sql select cast(c1 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data60 != 1 then + return -1 +endi +sql select cast(c1 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != true then + return -1 +endi +if $data10 != true then + return -1 +endi +if $data20 != true then + return -1 +endi +if $data30 != false then + return -1 +endi +if $data40 != true then + return -1 +endi +if $data50 != true then + return -1 +endi +if $data60 != true then + return -1 +endi +sql select cast(c1 as binary(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != t then + return -1 +endi +if $data10 != t then + return -1 +endi +if $data20 != t then + return -1 +endi +if $data30 != f then + return -1 +endi +if $data40 != t then + return -1 +endi +if $data50 != t then + return -1 +endi +if $data60 != t then + return -1 +endi +sql select cast(c1 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data20 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data30 != @70-01-01 08:00:00.000@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data50 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.001@ then + return -1 +endi +sql select cast(c1 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != true then + return -1 +endi +if $data10 != true then + return -1 +endi +if $data20 != true then + return -1 +endi +if $data30 != false then + return -1 +endi +if $data40 != true then + return -1 +endi +if $data50 != true then + return -1 +endi +if $data60 != true then + return -1 +endi +sql select cast(c1 as nchar(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != t then + return -1 +endi +if $data10 != t then + return -1 +endi +if $data20 != t then + return -1 +endi +if $data30 != f then + return -1 +endi +if $data40 != t then + return -1 +endi +if $data50 != t then + return -1 +endi +if $data60 != t then + return -1 +endi +sql select cast(c1 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data60 != 1 then + return -1 +endi + +print "test tinyint column" + +sql select cast(c2 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 127 then + return -1 +endi +if $data60 != -127 then + return -1 +endi +sql select cast(c2 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 127 then + return -1 +endi +if $data60 != -127 then + return -1 +endi +sql select cast(c2 as binary(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data60 != - then + return -1 +endi +sql select cast(c2 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @70-01-01 08:00:00.002@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-01-01 08:00:00.127@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.-127@ then + return -1 +endi +sql select cast(c2 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 127 then + return -1 +endi +if $data60 != -127 then + return -1 +endi +sql select cast(c2 as nchar(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data60 != - then + return -1 +endi +sql select cast(c2 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 127 then + return -1 +endi +if $data60 != 18446744073709551489 then + return -1 +endi + +print "test smallint column" + +sql select cast(c3 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +sql select cast(c3 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +sql select cast(c3 as binary(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32 then + return -1 +endi +if $data60 != -3 then + return -1 +endi +sql select cast(c3 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @70-01-01 08:00:00.003@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-01-01 08:00:32.767@ then + return -1 +endi +if $data60 != @70-01-01 07:59:28.-767@ then + return -1 +endi +sql select cast(c3 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +sql select cast(c3 as nchar(3)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 327 then + return -1 +endi +if $data60 != -32 then + return -1 +endi +sql select cast(c3 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data60 != 18446744073709518849 then + return -1 +endi + +print "test int column" + +sql select cast(c4 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 2147483647 then + return -1 +endi +if $data60 != -2147483647 then + return -1 +endi +sql select cast(c4 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 2147483647 then + return -1 +endi +if $data60 != -214748364 then + return -1 +endi +sql select cast(c4 as binary(5)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 21474 then + return -1 +endi +if $data60 != -2147 then + return -1 +endi +sql select cast(c4 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @70-01-01 08:00:00.002@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-01-26 04:31:23.647@ then + return -1 +endi +if $data60 != @69-12-07 11:28:37.-647@ then + return -1 +endi +sql select cast(c4 as nchar(13)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 2147483647 then + return -1 +endi +if $data60 != -2147483647 then + return -1 +endi +sql select cast(c4 as nchar(3)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 214 then + return -1 +endi +if $data60 != -21 then + return -1 +endi +sql select cast(c4 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 2147483647 then + return -1 +endi +if $data60 != 18446744071562067969 then + return -1 +endi + +print "test bigint column" + +sql select cast(c5 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != -9223372036854775807 then + return -1 +endi +sql select cast(c5 as binary(20)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != -9223372036854775807 then + return -1 +endi +sql select cast(c5 as binary(5)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 92233 then + return -1 +endi +if $data60 != -9223 then + return -1 +endi +sql select cast(c5 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @70-01-01 08:00:00.003@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +sql select cast(c5 as nchar(20)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != -9223372036854775807 then + return -1 +endi +sql select cast(c5 as nchar(6)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 922337 then + return -1 +endi +if $data60 != -92233 then + return -1 +endi +sql select cast(c5 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != 9223372036854775809 then + return -1 +endi + +print "test float column" + +sql select cast(c6 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != NULL then + return -1 +endi +sql select cast(c6 as binary(60)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2.000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4.000000 then + return -1 +endi +if $data50 != 340282346638528859811704183484516925440.000000 then + return -1 +endi +if $data60 != -340282346638528859811704183484516925440.000000 then + return -1 +endi +sql select cast(c6 as binary(5)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2.000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4.000 then + return -1 +endi +if $data50 != 34028 then + return -1 +endi +if $data60 != -3402 then + return -1 +endi +sql select cast(c6 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @70-01-01 08:00:00.002@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != NULL then + return -1 +endi +sql select cast(c6 as nchar(50)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2.000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4.000000 then + return -1 +endi +if $data50 != 340282346638528859811704183484516925440.000000 then + return -1 +endi +if $data60 != -340282346638528859811704183484516925440.000000 then + return -1 +endi +sql select cast(c6 as nchar(6)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.0000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2.0000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4.0000 then + return -1 +endi +if $data50 != 340282 then + return -1 +endi +if $data60 != -34028 then + return -1 +endi +sql select cast(c6 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi + + +print "test double column" + +sql select cast(c7 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != NULL then + return -1 +endi +sql select cast(c7 as binary(400)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3.000000 then + return -1 +endi +if $data40 != 4.000000 then + return -1 +endi +if $data50 != 179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000 then + return -1 +endi +if $data60 != -179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000 then + return -1 +endi +sql select cast(c7 as binary(5)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3.000 then + return -1 +endi +if $data40 != 4.000 then + return -1 +endi +if $data50 != 17976 then + return -1 +endi +if $data60 != -1797 then + return -1 +endi +sql select cast(c7 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @70-01-01 08:00:00.003@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != NULL then + return -1 +endi +sql select cast(c7 as nchar(500)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3.000000 then + return -1 +endi +if $data40 != 4.000000 then + return -1 +endi +if $data50 != 179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000 then + return -1 +endi +if $data60 != -179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000 then + return -1 +endi +sql select cast(c7 as nchar(6)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1.0000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3.0000 then + return -1 +endi +if $data40 != 4.0000 then + return -1 +endi +if $data50 != 179769 then + return -1 +endi +if $data60 != -17976 then + return -1 +endi +sql select cast(c7 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi + + +print "test binary column" + +sql select cast(c8 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 123 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 234 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 456 then + return -1 +endi +if $data50 != 567 then + return -1 +endi +if $data60 != 678 then + return -1 +endi +sql select cast(c8 as binary(3)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 123 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 234 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 456 then + return -1 +endi +if $data50 != 567 then + return -1 +endi +if $data60 != 678 then + return -1 +endi +sql select cast(c8 as binary(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 12 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 23 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 45 then + return -1 +endi +if $data50 != 56 then + return -1 +endi +if $data60 != 67 then + return -1 +endi +sql select cast(c8 as nchar(4)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 123 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 234 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 456 then + return -1 +endi +if $data50 != 567 then + return -1 +endi +if $data60 != 678 then + return -1 +endi +sql select cast(c8 as nchar(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 5 then + return -1 +endi +if $data60 != 6 then + return -1 +endi +sql select cast(c8 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 123 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 234 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 456 then + return -1 +endi +if $data50 != 567 then + return -1 +endi +if $data60 != 678 then + return -1 +endi + + +print "test nchar column" + +sql select cast(c9 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1234 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3456 then + return -1 +endi +if $data40 != 4567 then + return -1 +endi +if $data50 != 5678 then + return -1 +endi +if $data60 != 6789 then + return -1 +endi +sql select cast(c9 as nchar(5)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1234 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3456 then + return -1 +endi +if $data40 != 4567 then + return -1 +endi +if $data50 != 5678 then + return -1 +endi +if $data60 != 6789 then + return -1 +endi +sql select cast(c9 as nchar(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 12 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 34 then + return -1 +endi +if $data40 != 45 then + return -1 +endi +if $data50 != 56 then + return -1 +endi +if $data60 != 67 then + return -1 +endi +sql select cast(c9 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1234 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3456 then + return -1 +endi +if $data40 != 4567 then + return -1 +endi +if $data50 != 5678 then + return -1 +endi +if $data60 != 6789 then + return -1 +endi + + +print "test utinyint column" + +sql select cast(c10 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 254 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c10 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 254 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c10 as binary(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 2 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c10 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @70-01-01 08:00:00.002@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-01-01 08:00:00.254@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.000@ then + return -1 +endi +sql select cast(c10 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 254 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c10 as nchar(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 25 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c10 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 254 then + return -1 +endi +if $data60 != 0 then + return -1 +endi + + +print "test usmallint column" + +sql select cast(c11 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 65534 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c11 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 65534 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c11 as binary(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 6 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c11 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @70-01-01 08:00:00.003@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-01-01 08:01:05.534@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.000@ then + return -1 +endi +sql select cast(c11 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 65534 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c11 as nchar(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 65 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c11 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 65534 then + return -1 +endi +if $data60 != 0 then + return -1 +endi + + +print "test uint column" + +sql select cast(c12 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4294967294 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c12 as binary(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4294967294 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c12 as binary(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 42 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c12 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @70-01-01 08:00:00.002@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data50 != @70-02-20 01:02:47.294@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.000@ then + return -1 +endi +sql select cast(c12 as nchar(10)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4294967294 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c12 as nchar(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c12 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4294967294 then + return -1 +endi +if $data60 != 0 then + return -1 +endi + + +print "test ubigint column" + +sql select cast(c13 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c13 as binary(20)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c13 as binary(1)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c13 as timestamp) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != @70-01-01 08:00:00.001@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @70-01-01 08:00:00.003@ then + return -1 +endi +if $data40 != @70-01-01 08:00:00.004@ then + return -1 +endi +if $data60 != @70-01-01 08:00:00.000@ then + return -1 +endi +sql select cast(c13 as nchar(20)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c13 as nchar(2)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 92 then + return -1 +endi +if $data60 != 0 then + return -1 +endi +sql select cast(c13 as bigint unsigned) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 9223372036854775807 then + return -1 +endi +if $data60 != 0 then + return -1 +endi diff --git a/tests/script/general/compute/cast_query2.sim b/tests/script/general/compute/cast_query2.sim new file mode 100644 index 0000000000000000000000000000000000000000..6cbb6a11ccfab156dea06d86e27a7e7810297f4a --- /dev/null +++ b/tests/script/general/compute/cast_query2.sim @@ -0,0 +1,1265 @@ +sleep 100 +sql connect + +sql use db; + + +print "test arithmetic" + +sql select cast(c2 + c3 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 8 then + return -1 +endi +if $data50 != 32894 then + return -1 +endi +if $data60 != -32894 then + return -1 +endi +sql select cast((c2 + c3) as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 8 then + return -1 +endi +if $data50 != 32894 then + return -1 +endi +if $data60 != -32894 then + return -1 +endi +sql select cast((c2 * c3)+c4-6 as bigint) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != -4 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 14 then + return -1 +endi +if $data50 != 2151645050 then + return -1 +endi +if $data60 != -2143322244 then + return -1 +endi +sql select cast(11 as bigint)+c2 from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 12.000000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 13.000000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 15.000000000 then + return -1 +endi +if $data50 != 138.000000000 then + return -1 +endi +if $data60 != -116.000000000 then + return -1 +endi +sql select cast(c1 as bigint)+c2 from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 5.000000000 then + return -1 +endi +if $data50 != 128.000000000 then + return -1 +endi +if $data60 != -126.000000000 then + return -1 +endi +sql select cast(c2 as bigint)+11 from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 12.000000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 13.000000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 15.000000000 then + return -1 +endi +if $data50 != 138.000000000 then + return -1 +endi +if $data60 != -116.000000000 then + return -1 +endi +sql select cast(c2 as bigint)+11+floor(c2) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 13.000000000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 15.000000000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 19.000000000 then + return -1 +endi +if $data50 != 265.000000000 then + return -1 +endi +if $data60 != -243.000000000 then + return -1 +endi + +print "test function,column/tag/tbname/ts/_C0/_c0/scalar/agg/selectivity/self" + +sql select cast(c1 as bigint),c1,c2 from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data12 != NULL then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data22 != 2 then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data31 != 0 then + return -1 +endi +if $data32 != NULL then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data42 != 4 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data51 != 1 then + return -1 +endi +if $data52 != 127 then + return -1 +endi +if $data60 != 1 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data62 != -127 then + return -1 +endi +sql select cast(c1 as bigint),t1,ts,tbname,_C0,_c0 from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data03 != tb1 then + return -1 +endi +if $data04 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data12 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data13 != tb1 then + return -1 +endi +if $data14 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data21 != 1 then + return -1 +endi +if $data22 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data23 != tb1 then + return -1 +endi +if $data24 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data31 != 1 then + return -1 +endi +if $data32 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data33 != tb1 then + return -1 +endi +if $data34 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data41 != 1 then + return -1 +endi +if $data42 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data43 != tb1 then + return -1 +endi +if $data44 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data51 != 1 then + return -1 +endi +if $data52 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data53 != tb1 then + return -1 +endi +if $data54 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data60 != 1 then + return -1 +endi +if $data61 != 1 then + return -1 +endi +if $data62 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data63 != tb1 then + return -1 +endi +if $data64 != @21-11-11 09:00:06.000@ then + return -1 +endi +sql select cast(c1 as bigint),floor(c3) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data31 != 3 then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data41 != 4 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data51 != 32767 then + return -1 +endi +if $data60 != 1 then + return -1 +endi +if $data61 != -32767 then + return -1 +endi +sql select cast(c1 as bigint),cast(c2+c3 as binary(6)) from tb1; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 2.0000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != 1 then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != 0 then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != 1 then + return -1 +endi +if $data41 != 8.0000 then + return -1 +endi +if $data50 != 1 then + return -1 +endi +if $data51 != 32894. then + return -1 +endi +if $data60 != 1 then + return -1 +endi +if $data61 != -32894 then + return -1 +endi + +sql select cast(c2+c3 as binary(6)) from tb1 where c2 is not null and c3 is not null; +if $rows != 4 then + return -1 +endi +if $data00 != 2.0000 then + return -1 +endi +if $data10 != 8.0000 then + return -1 +endi +if $data20 != 32894. then + return -1 +endi +if $data30 != -32894 then + return -1 +endi +sql select cast(c2 as binary(6)) from tb1 order by ts desc; +if $rows != 7 then + return -1 +endi +if $data00 != -127 then + return -1 +endi +if $data10 != 127 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 2 then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != 1 then + return -1 +endi + +sql select cast(c2+c3 as binary(6)) from tb1 order by ts desc; +if $rows != 7 then + return -1 +endi +if $data00 != -32894 then + return -1 +endi +if $data10 != 32894. then + return -1 +endi +if $data20 != 8.0000 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != NULL then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != 2.0000 then + return -1 +endi +sql select cast(c2+c3 as binary(6)) from tb1 order by ts desc limit 3 offset 2; +if $rows != 3 then + return -1 +endi +if $data00 != 8.0000 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi + +sql select cast(c2 as binary(2)) from stb1; +if $rows != 14 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 12 then + return -1 +endi +if $data60 != -1 then + return -1 +endi +if $data70 != 1 then + return -1 +endi +if $data80 != 2 then + return -1 +endi +if $data90 != 3 then + return -1 +endi +sql select cast(c2 as binary(2)) from stb1 order by ts desc; +if $rows != 14 then + return -1 +endi +if $data00 != -1 then +if $data00 != 7 then +return -1 +endi +endi +if $data10 != 7 then +if $data10 != -1 then + return -1 +endi +endi +if $data20 != 6 then +if $data20 != 12 then + return -1 +endi +endi +if $data30 != 12 then +if $data30 != 6 then + return -1 +endi +endi +if $data40 != 4 then +if $data40 != 5 then + return -1 +endi +endi +if $data50 != 5 then +if $data50 != 4 then + return -1 +endi +endi +if $data60 != 4 then +if $data60 != NULL then + return -1 +endi +endi +if $data70 != NULL then +if $data70 != 4 then + return -1 +endi +endi +if $data80 != 2 then +if $data80 != 3 then + return -1 +endi +endi +if $data90 != 3 then +if $data90 != 2 then + return -1 +endi +endi + +sql select cast(c4 as bigint),t1 from stb1 order by ts desc; +if $rows != 14 then + return -1 +endi +if $data00 != -2147483647 then +if $data00 != 7 then + return -1 +endi +endi +if $data01 != 1 then +if $data01 != 2 then + return -1 +endi +endi +if $data10 != 7 then +if $data10 != -2147483647 then + return -1 +endi +endi +if $data11 != 1 then +if $data11 != 2 then + return -1 +endi +endi +if $data20 != 6 then +if $data20 != 2147483647 then + return -1 +endi +endi +if $data21 != 2 then +if $data21 != 1 then + return -1 +endi +endi +if $data30 != 2147483647 then +if $data30 != 6 then + return -1 +endi +endi +if $data31 != 1 then +if $data31 != 2 then + return -1 +endi +endi +if $data40 != 4 then +if $data40 != 5 then + return -1 +endi +endi +if $data41 != 1 then +if $data41 != 2 then + return -1 +endi +endi +if $data50 != 5 then +if $data50 != 4 then + return -1 +endi +endi +if $data51 != 1 then +if $data51 != 2 then + return -1 +endi +endi +if $data60 != 4 then +if $data60 != NULL then + return -1 +endi +endi +if $data61 != 2 then +if $data61 != 1 then + return -1 +endi +endi +if $data70 != NULL then +if $data70 != 4 then + return -1 +endi +endi +if $data71 != 1 then +if $data71 != 2 then + return -1 +endi +endi +if $data80 != 2 then +if $data80 != 2 then + return -1 +endi +endi +if $data81 != 1 then +if $data81 != 2 then + return -1 +endi +endi +if $data90 != 2 then + return -1 +endi +if $data91 != 2 then +if $data91 != 1 then + return -1 +endi +endi + +sql select cast(c3 as bigint),tbname from stb1; +if $rows != 14 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != tb1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != tb1 then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != tb1 then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data31 != tb1 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data41 != tb1 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data51 != tb1 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +if $data61 != tb1 then + return -1 +endi +if $data70 != 1 then + return -1 +endi +if $data71 != tb2 then + return -1 +endi +if $data80 != 2 then + return -1 +endi +if $data81 != tb2 then + return -1 +endi +if $data90 != 3 then + return -1 +endi +if $data91 != tb2 then + return -1 +endi + +sql select cast(c3 as bigint),tbname from stb1 where t1 > 1; +if $rows != 7 then + return -1 +endi + +sql select cast(c8 as bigint),cast(c9 as bigint) from tbn; +if $rows != 7 then + return -1 +endi +if $data00 != 111 then + return -1 +endi +if $data01 != 1111 then + return -1 +endi +if $data10 != 222 then + return -1 +endi +if $data11 != 2222 then + return -1 +endi +if $data20 != 333 then + return -1 +endi +if $data21 != 3333 then + return -1 +endi +if $data30 != 444 then + return -1 +endi +if $data31 != 4444 then + return -1 +endi +if $data40 != 555 then + return -1 +endi +if $data41 != 5555 then + return -1 +endi +if $data50 != 666 then + return -1 +endi +if $data51 != 6666 then + return -1 +endi +if $data60 != 777 then + return -1 +endi +if $data61 != 7777 then + return -1 +endi + +sql select cast(c8 as bigint),cast(c9 as bigint) from tbn order by ts desc; +if $rows != 7 then + return -1 +endi +if $data00 != 777 then + return -1 +endi +if $data01 != 7777 then + return -1 +endi +if $data10 != 666 then + return -1 +endi +if $data11 != 6666 then + return -1 +endi +if $data20 != 555 then + return -1 +endi +if $data21 != 5555 then + return -1 +endi +if $data30 != 444 then + return -1 +endi +if $data31 != 4444 then + return -1 +endi +if $data40 != 333 then + return -1 +endi +if $data41 != 3333 then + return -1 +endi +if $data50 != 222 then + return -1 +endi +if $data51 != 2222 then + return -1 +endi +if $data60 != 111 then + return -1 +endi +if $data61 != 1111 then + return -1 +endi + +sql select cast(cast(c8 as binary(2)) as bigint) from tbn; +if $rows != 7 then + return -1 +endi +if $data00 != 11 then + return -1 +endi +if $data10 != 22 then + return -1 +endi +if $data20 != 33 then + return -1 +endi +if $data30 != 44 then + return -1 +endi +if $data40 != 55 then + return -1 +endi +if $data50 != 66 then + return -1 +endi +if $data60 != 77 then + return -1 +endi + +sql select cast(cast(cast(cast(ts as bigint) as binary(5)) as bigint)+cast(cast(cast(ts as bigint) as binary(2)) as bigint) as bigint) from tbn; +if $rows != 7 then + return -1 +endi +if $data00 != 16381 then + return -1 +endi +if $data10 != 16381 then + return -1 +endi +if $data20 != 16381 then + return -1 +endi +if $data30 != 16381 then + return -1 +endi +if $data40 != 16381 then + return -1 +endi +if $data50 != 16381 then + return -1 +endi +if $data60 != 16381 then + return -1 +endi + +sql select cast(cast(cast(cast(ts as bigint) as binary(5)) as bigint)+cast(cast(cast(ts as bigint) as binary(2)) as bigint) as bigint) from tb3; +if $rows != 0 then + return -1 +endi + +sql select cast(a as bigint) from (select avg(c2) as a from stb1 interval(1s)); +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != 4 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 66 then + return -1 +endi +if $data60 != -60 then + return -1 +endi + +sql select cast(c2 as binary(10)) from (select * from stb1); +if $rows != 14 then + return -1 +endi + +sql select cast(a as binary(10)) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $rows != 10 then + return -1 +endi +if $data00 != 1.000000 then + return -1 +endi +if $data10 != 2.000000 then + return -1 +endi +if $data20 != 2.500000 then + return -1 +endi +if $data30 != 4.000000 then + return -1 +endi +if $data40 != 4.500000 then + return -1 +endi +if $data50 != 66.500000 then + return -1 +endi +if $data60 != -60.000000 then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi + +sql select cast(a as bigint) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $rows != 10 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != 4 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 66 then + return -1 +endi +if $data60 != -60 then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi + +sql select cast(a as bigint) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $rows != 10 then + return -1 +endi +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != -60 then + return -1 +endi +if $data40 != 66 then + return -1 +endi +if $data50 != 4 then + return -1 +endi +if $data60 != 4 then + return -1 +endi +if $data70 != 2 then + return -1 +endi +if $data80 != 2 then + return -1 +endi +if $data90 != 1 then + return -1 +endi + +sql select cast(a as bigint) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $rows != 10 then + return -1 +endi +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 66 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4 then + return -1 +endi +if $data60 != 2 then + return -1 +endi +if $data70 != 2 then + return -1 +endi +if $data80 != 1 then + return -1 +endi +if $data90 != -60 then + return -1 +endi + +sql select cast(a as bigint) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $rows != 10 then + return -1 +endi +if $data00 != -60 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 4 then + return -1 +endi +if $data60 != 66 then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi + +sql select cast(a as bigint) from (select cast(c2 as binary(2)) as a from tb1); +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 12 then + return -1 +endi +if $data60 != -1 then + return -1 +endi + +sql select cast(tb1.c3 as binary(10)),cast(tb2.c3 as binary(10)) from tb1,tb2 where tb1.ts=tb2.ts; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data41 != 5 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +if $data61 != 7 then + return -1 +endi + +sql select cast(c3 as binary(10)) from tb1 union all select cast(c3 as binary(10)) from tb2; +if $rows != 14 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != 3 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data50 != 32767 then + return -1 +endi +if $data60 != -32767 then + return -1 +endi +if $data70 != 1 then + return -1 +endi +if $data80 != 2 then + return -1 +endi +if $data90 != 3 then + return -1 +endi + + + \ No newline at end of file diff --git a/tests/script/general/compute/cast_query3.sim b/tests/script/general/compute/cast_query3.sim new file mode 100644 index 0000000000000000000000000000000000000000..3b5ecc41b32a6ed5646f08fa979e85b62ba32467 --- /dev/null +++ b/tests/script/general/compute/cast_query3.sim @@ -0,0 +1,63 @@ +sleep 100 +sql connect + +sql use db; + +sql select cast(stb1.c4 as binary(10)),cast(stba.c5 as binary(10)) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $rows != 7 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != 4 then + return -1 +endi +if $data41 != 5 then + return -1 +endi +if $data50 != 2147483647 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data60 != -214748364 then + return -1 +endi +if $data61 != 7 then + return -1 +endi + +sql select cast(c4 as binary(10)) as a from stb1 union all select cast(c5 as binary(10)) as a from stba; +if $rows != 44 then + return -1 +endi + +sql select cast(c2 as bigint) from stba; +if $rows != 30 then + return -1 +endi + + \ No newline at end of file diff --git a/tests/script/general/compute/csum.sim b/tests/script/general/compute/csum.sim index b350e4f403a02702741e0f10ab91fb9799e776d3..e7a2c2065029ab58e92c0d5643262a01875843be 100644 --- a/tests/script/general/compute/csum.sim +++ b/tests/script/general/compute/csum.sim @@ -101,9 +101,16 @@ if $data11 != -2 then endi print ==========>TD10758 +sql create database groupby_tbname +sql use groupby_tbname sql create stable st(ts timestamp, c1 int) tags(t int); sql create table ct1 using st tags(1) sql insert into ct1 values(now, 1)(now+1s, 2)(now+2s, 3) +sql create table ct2 using st tags(2) +sql insert into ct2 values(now, 21)(now+1s, 22)(now+2s, 23) +sql create table ct3 using st tags(3) +sql insert into ct3 values(now, 31)(now+1s, 32)(now+2s, 33) + sql select csum(c1),ts,tbname,t from ct1 print $data10 , $data11 , $data12, $data13, $data14 if $data13 != ct1 then @@ -169,6 +176,81 @@ if $data14 != 1 then return -1 endi +sql select mavg(c1,2),tbname from st group by tbname +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select diff(c1),tbname from st group by tbname +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select csum(c1),tbname from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select csum(c1),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi + +sql select mavg(c1,2),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi +sql select diff(c1),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi +sql drop database groupby_tbname print =============== clear sql drop database $db diff --git a/tests/script/general/compute/math_abs.sim b/tests/script/general/compute/math_abs.sim new file mode 100644 index 0000000000000000000000000000000000000000..dfa073e9c5f9505330f26eb52a097136d6154e60 --- /dev/null +++ b/tests/script/general/compute/math_abs.sim @@ -0,0 +1,1068 @@ +sleep 100 +sql connect +sql use db + +print execute sql select abs(*) from tb1; +sql_error select abs(*) from tb1; +print execute sql select abs(*) from tb1; +sql_error select abs(*) from tb1; +print execute sql select abs(*) from tb1; +sql_error select abs(*) from tb1; +print execute sql select abs(*) from tb1; +sql_error select abs(*) from tb1; +print execute sql select abs(*) as a from tb1; +sql_error select abs(*) as a from tb1; +print execute sql select abs(*) + 1 as a from tb1; +sql_error select abs(*) + 1 as a from tb1; +print execute sql select abs(tb1.*) + 1 as a from tb1; +sql_error select abs(tb1.*) + 1 as a from tb1; +print execute sql select abs(*) from tb1; +sql_error select abs(*) from tb1; +print execute sql select abs(c1) from tb1; +sql_error select abs(c1) from tb1; +print execute sql select abs(c1) from tb1; +sql_error select abs(c1) from tb1; +print execute sql select abs(c1 + c2) from tb1; +sql_error select abs(c1 + c2) from tb1; +print execute sql select abs(13) from tb1; +sql select abs(13) from tb1; +if $data00 != @13@ then + return -1 +endi +if $data10 != @13@ then + return -1 +endi +if $data20 != @13@ then + return -1 +endi +if $data30 != @13@ then + return -1 +endi +if $data40 != @13@ then + return -1 +endi +if $data50 != @13@ then + return -1 +endi +if $data60 != @13@ then + return -1 +endi +print execute sql select abs(c1) from tb1; +sql_error select abs(c1) from tb1; +print execute sql select abs(c2) from tb1; +sql select abs(c2) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @127@ then + return -1 +endi +if $data60 != @127@ then + return -1 +endi +print execute sql select abs(c3) from tb1; +sql select abs(c3) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @32767@ then + return -1 +endi +if $data60 != @32767@ then + return -1 +endi +print execute sql select abs(c4) from tb1; +sql select abs(c4) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @2147483647@ then + return -1 +endi +if $data60 != @2147483647@ then + return -1 +endi +print execute sql select abs(c5) from tb1; +sql select abs(c5) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @9223372036854775807@ then + return -1 +endi +if $data60 != @9223372036854775807@ then + return -1 +endi +print execute sql select abs(c6) from tb1; +sql select abs(c6) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2.000000000@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4.000000000@ then + return -1 +endi +if $data50 != @340282346638528859811704183484516925440.000000000@ then + return -1 +endi +if $data60 != @340282346638528859811704183484516925440.000000000@ then + return -1 +endi +print execute sql select abs(c7) from tb1; +sql select abs(c7) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3.000000000@ then + return -1 +endi +if $data40 != @4.000000000@ then + return -1 +endi +if $data50 != @179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +if $data60 != @179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +print execute sql select abs(c8) from tb1; +sql_error select abs(c8) from tb1; +print execute sql select abs(c9) from tb1; +sql_error select abs(c9) from tb1; +print execute sql select abs(c10) from tb1; +sql select abs(c10) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @254@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select abs(c11) from tb1; +sql select abs(c11) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @65534@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select abs(c12) from tb1; +sql select abs(c12) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @4294967294@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select abs(c13) from tb1; +sql select abs(c13) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @9223372036854775807@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select abs(12345678900000000000000000) from tb1; +sql_error select abs(12345678900000000000000000) from tb1; +print execute sql select distinct abs(123) from tb1; +sql_error select distinct abs(123) from tb1; +print execute sql select abs(t1) from stb1; +sql_error select abs(t1) from stb1; +print execute sql select abs(c1),avg(c3) from tb1; +sql_error select abs(c1),avg(c3) from tb1; +print execute sql select abs(c1),top(c3,1) from tb1; +sql_error select abs(c1),top(c3,1) from tb1; +print execute sql select abs(c2+c3) from tb1 session(ts, 1s); +sql_error select abs(c2+c3) from tb1 session(ts, 1s); +print execute sql select abs(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select abs(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select abs(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select abs(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select abs(c2+c3) from stb1 group by t1; +sql_error select abs(c2+c3) from stb1 group by t1; +print execute sql select abs(c2+c3) from stb1 group by ts; +sql_error select abs(c2+c3) from stb1 group by ts; +print execute sql select abs(c2+c3) from stb1 group by c1; +sql_error select abs(c2+c3) from stb1 group by c1; +print execute sql select abs(c2+c3) from stb1 group by tbname; +sql_error select abs(c2+c3) from stb1 group by tbname; +print execute sql select abs(c2+c3) from tb1 order by c2; +sql_error select abs(c2+c3) from tb1 order by c2; +print execute sql select abs(c8),abs(c9) from tbn; +sql_error select abs(c8),abs(c9) from tbn; +print execute sql select abs(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select abs(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select abs(a) from (select abs(c2) as a from tb1); +sql select abs(a) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @127@ then + return -1 +endi +if $data60 != @127@ then + return -1 +endi +print execute sql select abs("abc") from tb1; +sql_error select abs("abc") from tb1; +print execute sql select abs(c2 + c3) from tb1; +sql select abs(c2 + c3) from tb1; +if $data00 != @2.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @8.000000000@ then + return -1 +endi +if $data50 != @32894.000000000@ then + return -1 +endi +if $data60 != @32894.000000000@ then + return -1 +endi +print execute sql select abs((c2 + c3)) from tb1; +sql select abs((c2 + c3)) from tb1; +if $data00 != @2.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @8.000000000@ then + return -1 +endi +if $data50 != @32894.000000000@ then + return -1 +endi +if $data60 != @32894.000000000@ then + return -1 +endi +print execute sql select abs((c2 * c3)+c4-6) from tb1; +sql select abs((c2 * c3)+c4-6) from tb1; +if $data00 != @4.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @14.000000000@ then + return -1 +endi +if $data50 != @2151645050.000000000@ then + return -1 +endi +if $data60 != @2143322244.000000000@ then + return -1 +endi +print execute sql select abs(11)+c2 from tb1; +sql select abs(11)+c2 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @13.000000000@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @15.000000000@ then + return -1 +endi +if $data50 != @138.000000000@ then + return -1 +endi +if $data60 != @-116.000000000@ then + return -1 +endi +print execute sql select abs(c1)+c2 from tb1; +sql_error select abs(c1)+c2 from tb1; +print execute sql select abs(c2)+11 from tb1; +sql select abs(c2)+11 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @13.000000000@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @15.000000000@ then + return -1 +endi +if $data50 != @138.000000000@ then + return -1 +endi +if $data60 != @138.000000000@ then + return -1 +endi +print execute sql select abs(c1),c1,c2 from tb1; +sql_error select abs(c1),c1,c2 from tb1; +print execute sql select abs(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select abs(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select abs(c1),floor(c3) from tb1; +sql_error select abs(c1),floor(c3) from tb1; +print execute sql select abs(c1),abs(c2+c3) from tb1; +sql_error select abs(c1),abs(c2+c3) from tb1; +print execute sql select abs(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select abs(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @2.000000000@ then + return -1 +endi +if $data10 != @8.000000000@ then + return -1 +endi +if $data20 != @32894.000000000@ then + return -1 +endi +if $data30 != @32894.000000000@ then + return -1 +endi +print execute sql select abs(c2) from tb1 order by ts desc; +sql select abs(c2) from tb1 order by ts desc; +if $data00 != @127@ then + return -1 +endi +if $data10 != @127@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2@ then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != @1@ then + return -1 +endi +print execute sql select abs(c2+c3) from tb1 order by ts desc; +sql select abs(c2+c3) from tb1 order by ts desc; +if $data00 != @32894.000000000@ then + return -1 +endi +if $data10 != @32894.000000000@ then + return -1 +endi +if $data20 != @8.000000000@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != NULL then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != @2.000000000@ then + return -1 +endi +print execute sql select abs(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select abs(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @8.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +print execute sql select abs(c2) from stb1; +sql select abs(c2) from stb1; +if $data00 != @1@ then +if $data10 != NULL then + return -1 +endi +endi +if $data20 != @2@ then +if $data30 != NULL then + return -1 +endi +endi +if $data40 != @4@ then +if $data50 != @127@ then + return -1 +endi +endi +if $data60 != @127@ then +if $data70 != @1@ then + return -1 +endi +endi +if $data80 != @2@ then +if $data90 != @3@ then + return -1 +endi +endi +print execute sql select abs(c2) from stb1 order by ts desc; +sql select abs(c2) from stb1 order by ts desc; +if $data00 != @7@ then +if $data00 != @127@ then + return -1 +endi +endi +if $data20 != @6@ then +if $data20 != @127@ then + return -1 +endi +endi +if $data40 != @5@ then +if $data40 != @4@ then + return -1 +endi +endi +if $data60 != @4@ then +if $data60 != NULL then + return -1 +endi +endi +if $data80 != @3@ then +if $data80 != @2@ then + return -1 +endi +endi +print execute sql select abs(c4),t1 from stb1 order by ts desc; +sql select abs(c4),t1 from stb1 order by ts desc; +if $data00 != @7@ then +if $data00 != @2147483647@ then + return -1 +endi +endi +if $data01 != @2@ then +if $data01 != @1@ then + return -1 +endi +endi +if $data20 != @6@ then +if $data20 != @2147483647@ then + return -1 +endi +endi +if $data21 != @2@ then +if $data21 != @1@ then + return -1 +endi +endi +if $data40 != @5@ then +if $data40 != @4@ then + return -1 +endi +endi +if $data41 != @2@ then +if $data41 != @1@ then + return -1 +endi +endi +if $data60 != @4@ then +if $data60 != NULL then + return -1 +endi +endi +if $data61 != @2@ then +if $data61 != @1@ then + return -1 +endi +endi +if $data80 != @2@ then +if $data80 != @2@ then + return -1 +endi +endi +if $data81 != @2@ then +if $data81 != @1@ then + return -1 +endi +endi +print execute sql select abs(c3),tbname from stb1; +sql select abs(c3),tbname from stb1; +if $data00 != @1@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @32767@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @32767@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @2@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @3@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select abs(c3),tbname from stb1 where t1 > 1; +sql select abs(c3),tbname from stb1 where t1 > 1; +if $data00 != @1@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @2@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @5@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @6@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @7@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select abs(c8),abs(c9) from tbn; +sql_error select abs(c8),abs(c9) from tbn; +print execute sql select abs(c8),abs(c9) from tbn order by ts desc; +sql_error select abs(c8),abs(c9) from tbn order by ts desc; +print execute sql select abs(abs(c8)) from tbn; +sql_error select abs(abs(c8)) from tbn; +print execute sql select abs(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select abs(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @2.000000000@ then + return -1 +endi +if $data20 != @2.500000000@ then + return -1 +endi +if $data30 != @4.000000000@ then + return -1 +endi +if $data40 != @4.500000000@ then + return -1 +endi +if $data50 != @66.500000000@ then + return -1 +endi +if $data60 != @60.000000000@ then + return -1 +endi +print execute sql select abs(c2) from (select * from stb1); +sql select abs(c2) from (select * from stb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @127@ then + return -1 +endi +if $data60 != @127@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @2@ then + return -1 +endi +if $data90 != @3@ then + return -1 +endi +print execute sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @2.000000000@ then + return -1 +endi +if $data20 != @2.500000000@ then + return -1 +endi +if $data30 != @4.000000000@ then + return -1 +endi +if $data40 != @4.500000000@ then + return -1 +endi +if $data50 != @66.500000000@ then + return -1 +endi +if $data60 != @60.000000000@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @2.000000000@ then + return -1 +endi +if $data20 != @2.500000000@ then + return -1 +endi +if $data30 != @4.000000000@ then + return -1 +endi +if $data40 != @4.500000000@ then + return -1 +endi +if $data50 != @66.500000000@ then + return -1 +endi +if $data60 != @60.000000000@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @60.000000000@ then + return -1 +endi +if $data40 != @66.500000000@ then + return -1 +endi +if $data50 != @4.500000000@ then + return -1 +endi +if $data60 != @4.000000000@ then + return -1 +endi +if $data70 != @2.500000000@ then + return -1 +endi +if $data80 != @2.000000000@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @66.500000000@ then + return -1 +endi +if $data40 != @4.500000000@ then + return -1 +endi +if $data50 != @4.000000000@ then + return -1 +endi +if $data60 != @2.500000000@ then + return -1 +endi +if $data70 != @2.000000000@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @60.000000000@ then + return -1 +endi +print execute sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @60.000000000@ then + return -1 +endi +if $data10 != @1.000000000@ then + return -1 +endi +if $data20 != @2.000000000@ then + return -1 +endi +if $data30 != @2.500000000@ then + return -1 +endi +if $data40 != @4.000000000@ then + return -1 +endi +if $data50 != @4.500000000@ then + return -1 +endi +if $data60 != @66.500000000@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select abs(a) from (select abs(c2) as a from tb1); +sql select abs(a) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @127@ then + return -1 +endi +if $data60 != @127@ then + return -1 +endi +print execute sql select abs(tb1.c3),abs(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select abs(tb1.c3),abs(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @2@ then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != @3@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @5@ then + return -1 +endi +if $data50 != @32767@ then + return -1 +endi +if $data51 != @6@ then + return -1 +endi +if $data60 != @32767@ then + return -1 +endi +if $data61 != @7@ then + return -1 +endi +print execute sql select abs(c3) from tb1 union all select abs(c3) from tb2; +sql select abs(c3) from tb1 union all select abs(c3) from tb2; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @32767@ then + return -1 +endi +if $data60 != @32767@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @2@ then + return -1 +endi +if $data90 != @3@ then + return -1 +endi diff --git a/tests/script/general/compute/math_abs2.sim b/tests/script/general/compute/math_abs2.sim new file mode 100644 index 0000000000000000000000000000000000000000..cd419bffcf960e637b370988c71dfe98e002d093 --- /dev/null +++ b/tests/script/general/compute/math_abs2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select abs(stb1.c4),abs(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select abs(stb1.c4),abs(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @2@ then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data21 != @3@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @5@ then + return -1 +endi +if $data50 != @2147483647@ then + return -1 +endi +if $data51 != @6@ then + return -1 +endi +if $data60 != @2147483647@ then + return -1 +endi +if $data61 != @7@ then + return -1 +endi +print execute sql select abs(c4) as a from stb1 union all select abs(c5) as a from stba; +sql select abs(c4) as a from stb1 union all select abs(c5) as a from stba; +if $data00 != @1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @2147483647@ then + return -1 +endi +if $data60 != @2147483647@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @2@ then + return -1 +endi +if $data90 != @2@ then + return -1 +endi +print execute sql select abs(c2) from stba; +sql select abs(c2) from stba; +if $data00 != @1@ then + return -1 +endi +if $data10 != @2@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data40 != @5@ then + return -1 +endi +if $data50 != @6@ then + return -1 +endi +if $data60 != @7@ then + return -1 +endi +if $data70 != @8@ then + return -1 +endi +if $data80 != @9@ then + return -1 +endi +if $data90 != @0@ then + return -1 +endi +print execute sql select abs(min(c2)) from tba1; +sql select abs(min(c2)) from tba1; +if $data00 != @0@ then + return -1 +endi +print execute sql select abs(max(c2)) from tba1; +sql select abs(max(c2)) from tba1; +if $data00 != @9@ then + return -1 +endi +print execute sql select abs(count(c2)) from tba1; +sql select abs(count(c2)) from tba1; +if $data00 != @30@ then + return -1 +endi +print execute sql select abs(sum(c2)) from tba1; +sql select abs(sum(c2)) from tba1; +if $data00 != @135@ then + return -1 +endi +print execute sql select abs(avg(c2)) from tba1; +sql select abs(avg(c2)) from tba1; +if $data00 != @4.500000000@ then + return -1 +endi +print execute sql select abs(percentile(c2, 10)) from tba1; +sql select abs(percentile(c2, 10)) from tba1; +if $data00 != @0.900000000@ then + return -1 +endi +print execute sql select abs(apercentile(c2, 10)) from tba1; +sql select abs(apercentile(c2, 10)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select abs(stddev(c2)) from tba1; +sql select abs(stddev(c2)) from tba1; +if $data00 != @2.872281323@ then + return -1 +endi +print execute sql select abs(spread(c2)) from tba1; +sql select abs(spread(c2)) from tba1; +if $data00 != @9.000000000@ then + return -1 +endi +print execute sql select abs(twa(c2)) from tba1; +sql select abs(twa(c2)) from tba1; +if $data00 != @4.637931034@ then + return -1 +endi +print execute sql select abs(leastsquares(c2, 1, 1)) from tba1; +sql_error select abs(leastsquares(c2, 1, 1)) from tba1; +print execute sql select abs(interp(c2)) from tba1 every(1s) +sql select abs(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @2@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @3@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @5@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @6@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @7@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @8@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @9@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0@ then + return -1 +endi +print execute sql select abs(interp(c2)) from stba every(1s) group by tbname; +sql select abs(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @2@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @3@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @5@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @6@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @7@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @8@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @9@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select abs(elapsed(ts)) from tba1; +sql select abs(elapsed(ts)) from tba1; +if $data00 != @29000.000000000@ then + return -1 +endi +print execute sql select abs(rate(c2)) from tba1; +sql select abs(rate(c2)) from tba1; +if $data00 != @0.896551724@ then + return -1 +endi +print execute sql select abs(irate(c2)) from tba1; +sql select abs(irate(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select abs(first(c2)) from tba1; +sql select abs(first(c2)) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select abs(last(c2)) from tba1; +sql select abs(last(c2)) from tba1; +if $data00 != @0@ then + return -1 +endi +print execute sql select abs(last_row(c2)) from tba1; +sql select abs(last_row(c2)) from tba1; +if $data00 != @0@ then + return -1 +endi +print execute sql select abs(top(c2, 1)) from tba1; +sql_error select abs(top(c2, 1)) from tba1; +print execute sql select abs(bottom(c2, 1)) from tba1; +sql_error select abs(bottom(c2, 1)) from tba1; +print execute sql select abs(leastsquares(c2, 1, 1)) from tba1; +sql_error select abs(leastsquares(c2, 1, 1)) from tba1; +print execute sql select abs(derivative(c2, 1s, 0)) from tba1; +sql_error select abs(derivative(c2, 1s, 0)) from tba1; +print execute sql select abs(diff(c2)) from tba1; +sql_error select abs(diff(c2)) from tba1; +print execute sql select abs(csum(c2)) from tba1; +sql_error select abs(csum(c2)) from tba1; +print execute sql select abs(mavg(c2,2)) from tba1; +sql_error select abs(mavg(c2,2)) from tba1; +print execute sql select abs(sample(c2,2)) from tba1; +sql_error select abs(sample(c2,2)) from tba1; +print execute sql select abs(_block_dist()) from tba1; +sql_error select abs(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_acos.sim b/tests/script/general/compute/math_acos.sim new file mode 100644 index 0000000000000000000000000000000000000000..b91af7e647a210451474b80b3af307ba7a02cb88 --- /dev/null +++ b/tests/script/general/compute/math_acos.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) as a from tb1; +sql_error select acos(*) as a from tb1; +print execute sql select acos(*) + 1 as a from tb1; +sql_error select acos(*) + 1 as a from tb1; +print execute sql select acos(tb1.*) + 1 as a from tb1; +sql_error select acos(tb1.*) + 1 as a from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c1 + c2) from tb1; +sql_error select acos(c1 + c2) from tb1; +print execute sql select acos(13) from tb1; +sql select acos(13) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c2) from tb1; +sql select acos(c2) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c3) from tb1; +sql select acos(c3) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c4) from tb1; +sql select acos(c4) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c5) from tb1; +sql select acos(c5) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c6) from tb1; +sql select acos(c6) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c7) from tb1; +sql select acos(c7) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c8) from tb1; +sql_error select acos(c8) from tb1; +print execute sql select acos(c9) from tb1; +sql_error select acos(c9) from tb1; +print execute sql select acos(c10) from tb1; +sql select acos(c10) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c11) from tb1; +sql select acos(c11) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c12) from tb1; +sql select acos(c12) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c13) from tb1; +sql select acos(c13) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(12345678900000000000000000) from tb1; +sql_error select acos(12345678900000000000000000) from tb1; +print execute sql select distinct acos(123) from tb1; +sql_error select distinct acos(123) from tb1; +print execute sql select acos(t1) from stb1; +sql_error select acos(t1) from stb1; +print execute sql select acos(c1),avg(c3) from tb1; +sql_error select acos(c1),avg(c3) from tb1; +print execute sql select acos(c1),top(c3,1) from tb1; +sql_error select acos(c1),top(c3,1) from tb1; +print execute sql select acos(c2+c3) from tb1 session(ts, 1s); +sql_error select acos(c2+c3) from tb1 session(ts, 1s); +print execute sql select acos(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select acos(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select acos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select acos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select acos(c2+c3) from stb1 group by t1; +sql_error select acos(c2+c3) from stb1 group by t1; +print execute sql select acos(c2+c3) from stb1 group by ts; +sql_error select acos(c2+c3) from stb1 group by ts; +print execute sql select acos(c2+c3) from stb1 group by c1; +sql_error select acos(c2+c3) from stb1 group by c1; +print execute sql select acos(c2+c3) from stb1 group by tbname; +sql_error select acos(c2+c3) from stb1 group by tbname; +print execute sql select acos(c2+c3) from tb1 order by c2; +sql_error select acos(c2+c3) from tb1 order by c2; +print execute sql select acos(c8),acos(c9) from tbn; +sql_error select acos(c8),acos(c9) from tbn; +print execute sql select acos(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select acos(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select acos(a) from (select acos(c2) as a from tb1); +sql select acos(a) from (select acos(c2) as a from tb1); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos("abc") from tb1; +sql_error select acos("abc") from tb1; +print execute sql select acos(c2 + c3) from tb1; +sql select acos(c2 + c3) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos((c2 + c3)) from tb1; +sql select acos((c2 + c3)) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos((c2 * c3)+c4-6) from tb1; +sql select acos((c2 * c3)+c4-6) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(11)+c2 from tb1; +sql select acos(11)+c2 from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1)+c2 from tb1; +sql_error select acos(c1)+c2 from tb1; +print execute sql select acos(c2)+11 from tb1; +sql select acos(c2)+11 from tb1; +if $data00 != @11.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1),c1,c2 from tb1; +sql_error select acos(c1),c1,c2 from tb1; +print execute sql select acos(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select acos(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select acos(c1),floor(c3) from tb1; +sql_error select acos(c1),floor(c3) from tb1; +print execute sql select acos(c1),acos(c2+c3) from tb1; +sql_error select acos(c1),acos(c2+c3) from tb1; +print execute sql select acos(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select acos(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from tb1 order by ts desc; +sql select acos(c2) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select acos(c2+c3) from tb1 order by ts desc; +sql select acos(c2+c3) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select acos(c2) from stb1; +sql select acos(c2) from stb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from stb1 order by ts desc; +sql select acos(c2) from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data60 != @nan@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +print execute sql select acos(c4),t1 from stb1 order by ts desc; +sql select acos(c4),t1 from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @nan@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select acos(c3),tbname from stb1; +sql select acos(c3),tbname from stb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select acos(c3),tbname from stb1 where t1 > 1; +sql select acos(c3),tbname from stb1 where t1 > 1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select acos(c8),acos(c9) from tbn; +sql_error select acos(c8),acos(c9) from tbn; +print execute sql select acos(c8),acos(c9) from tbn order by ts desc; +sql_error select acos(c8),acos(c9) from tbn order by ts desc; +print execute sql select acos(acos(c8)) from tbn; +sql_error select acos(acos(c8)) from tbn; +print execute sql select acos(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select acos(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from (select * from stb1); +sql select acos(c2) from (select * from stb1); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @0.000000000@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @0.000000000@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select acos(c2) as a from tb1); +sql select acos(a) from (select acos(c2) as a from tb1); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select acos(c3) from tb1 union all select acos(c3) from tb2; +sql select acos(c3) from tb1 union all select acos(c3) from tb2; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi diff --git a/tests/script/general/compute/math_acos2.sim b/tests/script/general/compute/math_acos2.sim new file mode 100644 index 0000000000000000000000000000000000000000..01a83eb4d46f1ae637cd2cde0dec412cb81b7497 --- /dev/null +++ b/tests/script/general/compute/math_acos2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select acos(c4) as a from stb1 union all select acos(c5) as a from stba; +sql select acos(c4) as a from stb1 union all select acos(c5) as a from stba; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from stba; +sql select acos(c2) from stba; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @1.570796327@ then + return -1 +endi +print execute sql select acos(min(c2)) from tba1; +sql select acos(min(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(max(c2)) from tba1; +sql select acos(max(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(count(c2)) from tba1; +sql select acos(count(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(sum(c2)) from tba1; +sql select acos(sum(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(avg(c2)) from tba1; +sql select acos(avg(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(percentile(c2, 10)) from tba1; +sql select acos(percentile(c2, 10)) from tba1; +if $data00 != @0.451026812@ then + return -1 +endi +print execute sql select acos(apercentile(c2, 10)) from tba1; +sql select acos(apercentile(c2, 10)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(stddev(c2)) from tba1; +sql select acos(stddev(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(spread(c2)) from tba1; +sql select acos(spread(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(twa(c2)) from tba1; +sql select acos(twa(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(leastsquares(c2, 1, 1)) from tba1; +sql_error select acos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select acos(interp(c2)) from tba1 every(1s) +sql select acos(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.570796327@ then + return -1 +endi +print execute sql select acos(interp(c2)) from stba every(1s) group by tbname; +sql select acos(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.570796327@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select acos(elapsed(ts)) from tba1; +sql select acos(elapsed(ts)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(rate(c2)) from tba1; +sql select acos(rate(c2)) from tba1; +if $data00 != @0.458874205@ then + return -1 +endi +print execute sql select acos(irate(c2)) from tba1; +sql select acos(irate(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(first(c2)) from tba1; +sql select acos(first(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select acos(last(c2)) from tba1; +sql select acos(last(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(last_row(c2)) from tba1; +sql select acos(last_row(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(top(c2, 1)) from tba1; +sql_error select acos(top(c2, 1)) from tba1; +print execute sql select acos(bottom(c2, 1)) from tba1; +sql_error select acos(bottom(c2, 1)) from tba1; +print execute sql select acos(leastsquares(c2, 1, 1)) from tba1; +sql_error select acos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select acos(derivative(c2, 1s, 0)) from tba1; +sql_error select acos(derivative(c2, 1s, 0)) from tba1; +print execute sql select acos(diff(c2)) from tba1; +sql_error select acos(diff(c2)) from tba1; +print execute sql select acos(csum(c2)) from tba1; +sql_error select acos(csum(c2)) from tba1; +print execute sql select acos(mavg(c2,2)) from tba1; +sql_error select acos(mavg(c2,2)) from tba1; +print execute sql select acos(sample(c2,2)) from tba1; +sql_error select acos(sample(c2,2)) from tba1; +print execute sql select acos(_block_dist()) from tba1; +sql_error select acos(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_asin.sim b/tests/script/general/compute/math_asin.sim new file mode 100644 index 0000000000000000000000000000000000000000..dee8bffaf35135fca1d31878dabc25efd2a3146a --- /dev/null +++ b/tests/script/general/compute/math_asin.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select asin(*) from tb1; +sql_error select asin(*) from tb1; +print execute sql select asin(*) from tb1; +sql_error select asin(*) from tb1; +print execute sql select asin(*) from tb1; +sql_error select asin(*) from tb1; +print execute sql select asin(*) from tb1; +sql_error select asin(*) from tb1; +print execute sql select asin(*) as a from tb1; +sql_error select asin(*) as a from tb1; +print execute sql select asin(*) + 1 as a from tb1; +sql_error select asin(*) + 1 as a from tb1; +print execute sql select asin(tb1.*) + 1 as a from tb1; +sql_error select asin(tb1.*) + 1 as a from tb1; +print execute sql select asin(*) from tb1; +sql_error select asin(*) from tb1; +print execute sql select asin(c1) from tb1; +sql_error select asin(c1) from tb1; +print execute sql select asin(c1) from tb1; +sql_error select asin(c1) from tb1; +print execute sql select asin(c1 + c2) from tb1; +sql_error select asin(c1 + c2) from tb1; +print execute sql select asin(13) from tb1; +sql select asin(13) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c1) from tb1; +sql_error select asin(c1) from tb1; +print execute sql select asin(c2) from tb1; +sql select asin(c2) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c3) from tb1; +sql select asin(c3) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c4) from tb1; +sql select asin(c4) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c5) from tb1; +sql select asin(c5) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c6) from tb1; +sql select asin(c6) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c7) from tb1; +sql select asin(c7) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c8) from tb1; +sql_error select asin(c8) from tb1; +print execute sql select asin(c9) from tb1; +sql_error select asin(c9) from tb1; +print execute sql select asin(c10) from tb1; +sql select asin(c10) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select asin(c11) from tb1; +sql select asin(c11) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select asin(c12) from tb1; +sql select asin(c12) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select asin(c13) from tb1; +sql select asin(c13) from tb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select asin(12345678900000000000000000) from tb1; +sql_error select asin(12345678900000000000000000) from tb1; +print execute sql select distinct asin(123) from tb1; +sql_error select distinct asin(123) from tb1; +print execute sql select asin(t1) from stb1; +sql_error select asin(t1) from stb1; +print execute sql select asin(c1),avg(c3) from tb1; +sql_error select asin(c1),avg(c3) from tb1; +print execute sql select asin(c1),top(c3,1) from tb1; +sql_error select asin(c1),top(c3,1) from tb1; +print execute sql select asin(c2+c3) from tb1 session(ts, 1s); +sql_error select asin(c2+c3) from tb1 session(ts, 1s); +print execute sql select asin(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select asin(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select asin(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select asin(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select asin(c2+c3) from stb1 group by t1; +sql_error select asin(c2+c3) from stb1 group by t1; +print execute sql select asin(c2+c3) from stb1 group by ts; +sql_error select asin(c2+c3) from stb1 group by ts; +print execute sql select asin(c2+c3) from stb1 group by c1; +sql_error select asin(c2+c3) from stb1 group by c1; +print execute sql select asin(c2+c3) from stb1 group by tbname; +sql_error select asin(c2+c3) from stb1 group by tbname; +print execute sql select asin(c2+c3) from tb1 order by c2; +sql_error select asin(c2+c3) from tb1 order by c2; +print execute sql select asin(c8),asin(c9) from tbn; +sql_error select asin(c8),asin(c9) from tbn; +print execute sql select asin(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select asin(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select asin(a) from (select asin(c2) as a from tb1); +sql select asin(a) from (select asin(c2) as a from tb1); +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin("abc") from tb1; +sql_error select asin("abc") from tb1; +print execute sql select asin(c2 + c3) from tb1; +sql select asin(c2 + c3) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin((c2 + c3)) from tb1; +sql select asin((c2 + c3)) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin((c2 * c3)+c4-6) from tb1; +sql select asin((c2 * c3)+c4-6) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(11)+c2 from tb1; +sql select asin(11)+c2 from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c1)+c2 from tb1; +sql_error select asin(c1)+c2 from tb1; +print execute sql select asin(c2)+11 from tb1; +sql select asin(c2)+11 from tb1; +if $data00 != @12.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c1),c1,c2 from tb1; +sql_error select asin(c1),c1,c2 from tb1; +print execute sql select asin(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select asin(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select asin(c1),floor(c3) from tb1; +sql_error select asin(c1),floor(c3) from tb1; +print execute sql select asin(c1),asin(c2+c3) from tb1; +sql_error select asin(c1),asin(c2+c3) from tb1; +print execute sql select asin(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select asin(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +print execute sql select asin(c2) from tb1 order by ts desc; +sql select asin(c2) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select asin(c2+c3) from tb1 order by ts desc; +sql select asin(c2+c3) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select asin(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select asin(c2) from stb1; +sql select asin(c2) from stb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @1.570796327@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select asin(c2) from stb1 order by ts desc; +sql select asin(c2) from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data60 != @NULL@ then + if $data60 != @nan@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +print execute sql select asin(c4),t1 from stb1 order by ts desc; +sql select asin(c4),t1 from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data01 != @1@ then + if $data01 != @2@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data21 != @1@ then + if $data21 != @2@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data41 != @1@ then + if $data41 != @2@ then + return -1 + endi +endi +if $data60 != @NULL@ then + if $data60 != @nan@ then + return -1 + endi +endi +if $data61 != @1@ then + if $data61 != @2@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +if $data81 != @1@ then + if $data81 != @2@ then + return -1 + endi +endi +print execute sql select asin(c3),tbname from stb1; +sql select asin(c3),tbname from stb1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1.570796327@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select asin(c3),tbname from stb1 where t1 > 1; +sql select asin(c3),tbname from stb1 where t1 > 1; +if $data00 != @1.570796327@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select asin(c8),asin(c9) from tbn; +sql_error select asin(c8),asin(c9) from tbn; +print execute sql select asin(c8),asin(c9) from tbn order by ts desc; +sql_error select asin(c8),asin(c9) from tbn order by ts desc; +print execute sql select asin(asin(c8)) from tbn; +sql_error select asin(asin(c8)) from tbn; +print execute sql select asin(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select asin(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(c2) from (select * from stb1); +sql select asin(c2) from (select * from stb1); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @1.570796327@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @1.570796327@ then + return -1 +endi +print execute sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @1.570796327@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @1.570796327@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select asin(a) from (select asin(c2) as a from tb1); +sql select asin(a) from (select asin(c2) as a from tb1); +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select asin(tb1.c3),asin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select asin(tb1.c3),asin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1.570796327@ then + return -1 +endi +if $data01 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select asin(c3) from tb1 union all select asin(c3) from tb2; +sql select asin(c3) from tb1 union all select asin(c3) from tb2; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @1.570796327@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi diff --git a/tests/script/general/compute/math_asin2.sim b/tests/script/general/compute/math_asin2.sim new file mode 100644 index 0000000000000000000000000000000000000000..98b4eb05bcb24b9fb962dbbdd6e8ddfa3e55280f --- /dev/null +++ b/tests/script/general/compute/math_asin2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select asin(stb1.c4),asin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select asin(stb1.c4),asin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1.570796327@ then + return -1 +endi +if $data01 != @1.570796327@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select asin(c4) as a from stb1 union all select asin(c5) as a from stba; +sql select asin(c4) as a from stb1 union all select asin(c5) as a from stba; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @1.570796327@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select asin(c2) from stba; +sql select asin(c2) from stba; +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select asin(min(c2)) from tba1; +sql select asin(min(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select asin(max(c2)) from tba1; +sql select asin(max(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(count(c2)) from tba1; +sql select asin(count(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(sum(c2)) from tba1; +sql select asin(sum(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(avg(c2)) from tba1; +sql select asin(avg(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(percentile(c2, 10)) from tba1; +sql select asin(percentile(c2, 10)) from tba1; +if $data00 != @1.119769515@ then + return -1 +endi +print execute sql select asin(apercentile(c2, 10)) from tba1; +sql select asin(apercentile(c2, 10)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select asin(stddev(c2)) from tba1; +sql select asin(stddev(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(spread(c2)) from tba1; +sql select asin(spread(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(twa(c2)) from tba1; +sql select asin(twa(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(leastsquares(c2, 1, 1)) from tba1; +sql_error select asin(leastsquares(c2, 1, 1)) from tba1; +print execute sql select asin(interp(c2)) from tba1 every(1s) +sql select asin(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.570796327@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +print execute sql select asin(interp(c2)) from stba every(1s) group by tbname; +sql select asin(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.570796327@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select asin(elapsed(ts)) from tba1; +sql select asin(elapsed(ts)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select asin(rate(c2)) from tba1; +sql select asin(rate(c2)) from tba1; +if $data00 != @1.111922122@ then + return -1 +endi +print execute sql select asin(irate(c2)) from tba1; +sql select asin(irate(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select asin(first(c2)) from tba1; +sql select asin(first(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select asin(last(c2)) from tba1; +sql select asin(last(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select asin(last_row(c2)) from tba1; +sql select asin(last_row(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select asin(top(c2, 1)) from tba1; +sql_error select asin(top(c2, 1)) from tba1; +print execute sql select asin(bottom(c2, 1)) from tba1; +sql_error select asin(bottom(c2, 1)) from tba1; +print execute sql select asin(leastsquares(c2, 1, 1)) from tba1; +sql_error select asin(leastsquares(c2, 1, 1)) from tba1; +print execute sql select asin(derivative(c2, 1s, 0)) from tba1; +sql_error select asin(derivative(c2, 1s, 0)) from tba1; +print execute sql select asin(diff(c2)) from tba1; +sql_error select asin(diff(c2)) from tba1; +print execute sql select asin(csum(c2)) from tba1; +sql_error select asin(csum(c2)) from tba1; +print execute sql select asin(mavg(c2,2)) from tba1; +sql_error select asin(mavg(c2,2)) from tba1; +print execute sql select asin(sample(c2,2)) from tba1; +sql_error select asin(sample(c2,2)) from tba1; +print execute sql select asin(_block_dist()) from tba1; +sql_error select asin(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_atan.sim b/tests/script/general/compute/math_atan.sim new file mode 100644 index 0000000000000000000000000000000000000000..b91af7e647a210451474b80b3af307ba7a02cb88 --- /dev/null +++ b/tests/script/general/compute/math_atan.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(*) as a from tb1; +sql_error select acos(*) as a from tb1; +print execute sql select acos(*) + 1 as a from tb1; +sql_error select acos(*) + 1 as a from tb1; +print execute sql select acos(tb1.*) + 1 as a from tb1; +sql_error select acos(tb1.*) + 1 as a from tb1; +print execute sql select acos(*) from tb1; +sql_error select acos(*) from tb1; +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c1 + c2) from tb1; +sql_error select acos(c1 + c2) from tb1; +print execute sql select acos(13) from tb1; +sql select acos(13) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1) from tb1; +sql_error select acos(c1) from tb1; +print execute sql select acos(c2) from tb1; +sql select acos(c2) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c3) from tb1; +sql select acos(c3) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c4) from tb1; +sql select acos(c4) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c5) from tb1; +sql select acos(c5) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c6) from tb1; +sql select acos(c6) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c7) from tb1; +sql select acos(c7) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c8) from tb1; +sql_error select acos(c8) from tb1; +print execute sql select acos(c9) from tb1; +sql_error select acos(c9) from tb1; +print execute sql select acos(c10) from tb1; +sql select acos(c10) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c11) from tb1; +sql select acos(c11) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c12) from tb1; +sql select acos(c12) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(c13) from tb1; +sql select acos(c13) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @1.570796327@ then + return -1 +endi +print execute sql select acos(12345678900000000000000000) from tb1; +sql_error select acos(12345678900000000000000000) from tb1; +print execute sql select distinct acos(123) from tb1; +sql_error select distinct acos(123) from tb1; +print execute sql select acos(t1) from stb1; +sql_error select acos(t1) from stb1; +print execute sql select acos(c1),avg(c3) from tb1; +sql_error select acos(c1),avg(c3) from tb1; +print execute sql select acos(c1),top(c3,1) from tb1; +sql_error select acos(c1),top(c3,1) from tb1; +print execute sql select acos(c2+c3) from tb1 session(ts, 1s); +sql_error select acos(c2+c3) from tb1 session(ts, 1s); +print execute sql select acos(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select acos(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select acos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select acos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select acos(c2+c3) from stb1 group by t1; +sql_error select acos(c2+c3) from stb1 group by t1; +print execute sql select acos(c2+c3) from stb1 group by ts; +sql_error select acos(c2+c3) from stb1 group by ts; +print execute sql select acos(c2+c3) from stb1 group by c1; +sql_error select acos(c2+c3) from stb1 group by c1; +print execute sql select acos(c2+c3) from stb1 group by tbname; +sql_error select acos(c2+c3) from stb1 group by tbname; +print execute sql select acos(c2+c3) from tb1 order by c2; +sql_error select acos(c2+c3) from tb1 order by c2; +print execute sql select acos(c8),acos(c9) from tbn; +sql_error select acos(c8),acos(c9) from tbn; +print execute sql select acos(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select acos(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select acos(a) from (select acos(c2) as a from tb1); +sql select acos(a) from (select acos(c2) as a from tb1); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos("abc") from tb1; +sql_error select acos("abc") from tb1; +print execute sql select acos(c2 + c3) from tb1; +sql select acos(c2 + c3) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos((c2 + c3)) from tb1; +sql select acos((c2 + c3)) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos((c2 * c3)+c4-6) from tb1; +sql select acos((c2 * c3)+c4-6) from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(11)+c2 from tb1; +sql select acos(11)+c2 from tb1; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1)+c2 from tb1; +sql_error select acos(c1)+c2 from tb1; +print execute sql select acos(c2)+11 from tb1; +sql select acos(c2)+11 from tb1; +if $data00 != @11.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c1),c1,c2 from tb1; +sql_error select acos(c1),c1,c2 from tb1; +print execute sql select acos(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select acos(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select acos(c1),floor(c3) from tb1; +sql_error select acos(c1),floor(c3) from tb1; +print execute sql select acos(c1),acos(c2+c3) from tb1; +sql_error select acos(c1),acos(c2+c3) from tb1; +print execute sql select acos(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select acos(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from tb1 order by ts desc; +sql select acos(c2) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select acos(c2+c3) from tb1 order by ts desc; +sql select acos(c2+c3) from tb1 order by ts desc; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select acos(c2) from stb1; +sql select acos(c2) from stb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from stb1 order by ts desc; +sql select acos(c2) from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data60 != @nan@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +print execute sql select acos(c4),t1 from stb1 order by ts desc; +sql select acos(c4),t1 from stb1 order by ts desc; +if $data00 != @nan@ then + if $data00 != @nan@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @nan@ then + if $data20 != @nan@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @nan@ then + if $data40 != @nan@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @nan@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @nan@ then + if $data80 != @nan@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select acos(c3),tbname from stb1; +sql select acos(c3),tbname from stb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select acos(c3),tbname from stb1 where t1 > 1; +sql select acos(c3),tbname from stb1 where t1 > 1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select acos(c8),acos(c9) from tbn; +sql_error select acos(c8),acos(c9) from tbn; +print execute sql select acos(c8),acos(c9) from tbn order by ts desc; +sql_error select acos(c8),acos(c9) from tbn order by ts desc; +print execute sql select acos(acos(c8)) from tbn; +sql_error select acos(acos(c8)) from tbn; +print execute sql select acos(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select acos(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from (select * from stb1); +sql select acos(c2) from (select * from stb1); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @0.000000000@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @nan@ then + return -1 +endi +if $data10 != @0.000000000@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select acos(a) from (select acos(c2) as a from tb1); +sql select acos(a) from (select acos(c2) as a from tb1); +if $data00 != @1.570796327@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +print execute sql select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select acos(c3) from tb1 union all select acos(c3) from tb2; +sql select acos(c3) from tb1 union all select acos(c3) from tb2; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi diff --git a/tests/script/general/compute/math_atan2.sim b/tests/script/general/compute/math_atan2.sim new file mode 100644 index 0000000000000000000000000000000000000000..01a83eb4d46f1ae637cd2cde0dec412cb81b7497 --- /dev/null +++ b/tests/script/general/compute/math_atan2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @0.000000000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +print execute sql select acos(c4) as a from stb1 union all select acos(c5) as a from stba; +sql select acos(c4) as a from stb1 union all select acos(c5) as a from stba; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @0.000000000@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @nan@ then + return -1 +endi +print execute sql select acos(c2) from stba; +sql select acos(c2) from stba; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @nan@ then + return -1 +endi +if $data20 != @nan@ then + return -1 +endi +if $data30 != @nan@ then + return -1 +endi +if $data40 != @nan@ then + return -1 +endi +if $data50 != @nan@ then + return -1 +endi +if $data60 != @nan@ then + return -1 +endi +if $data70 != @nan@ then + return -1 +endi +if $data80 != @nan@ then + return -1 +endi +if $data90 != @1.570796327@ then + return -1 +endi +print execute sql select acos(min(c2)) from tba1; +sql select acos(min(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(max(c2)) from tba1; +sql select acos(max(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(count(c2)) from tba1; +sql select acos(count(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(sum(c2)) from tba1; +sql select acos(sum(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(avg(c2)) from tba1; +sql select acos(avg(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(percentile(c2, 10)) from tba1; +sql select acos(percentile(c2, 10)) from tba1; +if $data00 != @0.451026812@ then + return -1 +endi +print execute sql select acos(apercentile(c2, 10)) from tba1; +sql select acos(apercentile(c2, 10)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(stddev(c2)) from tba1; +sql select acos(stddev(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(spread(c2)) from tba1; +sql select acos(spread(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(twa(c2)) from tba1; +sql select acos(twa(c2)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(leastsquares(c2, 1, 1)) from tba1; +sql_error select acos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select acos(interp(c2)) from tba1 every(1s) +sql select acos(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.570796327@ then + return -1 +endi +print execute sql select acos(interp(c2)) from stba every(1s) group by tbname; +sql select acos(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @nan@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @nan@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @nan@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @nan@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @nan@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @nan@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @nan@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @nan@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.570796327@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select acos(elapsed(ts)) from tba1; +sql select acos(elapsed(ts)) from tba1; +if $data00 != @nan@ then + return -1 +endi +print execute sql select acos(rate(c2)) from tba1; +sql select acos(rate(c2)) from tba1; +if $data00 != @0.458874205@ then + return -1 +endi +print execute sql select acos(irate(c2)) from tba1; +sql select acos(irate(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(first(c2)) from tba1; +sql select acos(first(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select acos(last(c2)) from tba1; +sql select acos(last(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(last_row(c2)) from tba1; +sql select acos(last_row(c2)) from tba1; +if $data00 != @1.570796327@ then + return -1 +endi +print execute sql select acos(top(c2, 1)) from tba1; +sql_error select acos(top(c2, 1)) from tba1; +print execute sql select acos(bottom(c2, 1)) from tba1; +sql_error select acos(bottom(c2, 1)) from tba1; +print execute sql select acos(leastsquares(c2, 1, 1)) from tba1; +sql_error select acos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select acos(derivative(c2, 1s, 0)) from tba1; +sql_error select acos(derivative(c2, 1s, 0)) from tba1; +print execute sql select acos(diff(c2)) from tba1; +sql_error select acos(diff(c2)) from tba1; +print execute sql select acos(csum(c2)) from tba1; +sql_error select acos(csum(c2)) from tba1; +print execute sql select acos(mavg(c2,2)) from tba1; +sql_error select acos(mavg(c2,2)) from tba1; +print execute sql select acos(sample(c2,2)) from tba1; +sql_error select acos(sample(c2,2)) from tba1; +print execute sql select acos(_block_dist()) from tba1; +sql_error select acos(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_cos.sim b/tests/script/general/compute/math_cos.sim new file mode 100644 index 0000000000000000000000000000000000000000..b10845ce2e42849924c1d8c9888b2943d138504a --- /dev/null +++ b/tests/script/general/compute/math_cos.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select cos(*) from tb1; +sql_error select cos(*) from tb1; +print execute sql select cos(*) from tb1; +sql_error select cos(*) from tb1; +print execute sql select cos(*) from tb1; +sql_error select cos(*) from tb1; +print execute sql select cos(*) from tb1; +sql_error select cos(*) from tb1; +print execute sql select cos(*) as a from tb1; +sql_error select cos(*) as a from tb1; +print execute sql select cos(*) + 1 as a from tb1; +sql_error select cos(*) + 1 as a from tb1; +print execute sql select cos(tb1.*) + 1 as a from tb1; +sql_error select cos(tb1.*) + 1 as a from tb1; +print execute sql select cos(*) from tb1; +sql_error select cos(*) from tb1; +print execute sql select cos(c1) from tb1; +sql_error select cos(c1) from tb1; +print execute sql select cos(c1) from tb1; +sql_error select cos(c1) from tb1; +print execute sql select cos(c1 + c2) from tb1; +sql_error select cos(c1 + c2) from tb1; +print execute sql select cos(13) from tb1; +sql select cos(13) from tb1; +if $data00 != @0.907446781@ then + return -1 +endi +if $data10 != @0.907446781@ then + return -1 +endi +if $data20 != @0.907446781@ then + return -1 +endi +if $data30 != @0.907446781@ then + return -1 +endi +if $data40 != @0.907446781@ then + return -1 +endi +if $data50 != @0.907446781@ then + return -1 +endi +if $data60 != @0.907446781@ then + return -1 +endi +print execute sql select cos(c1) from tb1; +sql_error select cos(c1) from tb1; +print execute sql select cos(c2) from tb1; +sql select cos(c2) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.232359102@ then + return -1 +endi +if $data60 != @0.232359102@ then + return -1 +endi +print execute sql select cos(c3) from tb1; +sql select cos(c3) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.982263352@ then + return -1 +endi +if $data60 != @0.982263352@ then + return -1 +endi +print execute sql select cos(c4) from tb1; +sql select cos(c4) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @-0.688836692@ then + return -1 +endi +if $data60 != @-0.688836692@ then + return -1 +endi +print execute sql select cos(c5) from tb1; +sql select cos(c5) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.011800077@ then + return -1 +endi +if $data60 != @0.011800077@ then + return -1 +endi +print execute sql select cos(c6) from tb1; +sql select cos(c6) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.853021040@ then + return -1 +endi +if $data60 != @0.853021040@ then + return -1 +endi +print execute sql select cos(c7) from tb1; +sql select cos(c7) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.488025633@ then + return -1 +endi +if $data60 != @0.488025633@ then + return -1 +endi +print execute sql select cos(c8) from tb1; +sql_error select cos(c8) from tb1; +print execute sql select cos(c9) from tb1; +sql_error select cos(c9) from tb1; +print execute sql select cos(c10) from tb1; +sql select cos(c10) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @-0.892018495@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select cos(c11) from tb1; +sql select cos(c11) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.929682584@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select cos(c12) from tb1; +sql select cos(c12) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @-0.051008024@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select cos(c13) from tb1; +sql select cos(c13) from tb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.011800077@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select cos(12345678900000000000000000) from tb1; +sql_error select cos(12345678900000000000000000) from tb1; +print execute sql select distinct cos(123) from tb1; +sql_error select distinct cos(123) from tb1; +print execute sql select cos(t1) from stb1; +sql_error select cos(t1) from stb1; +print execute sql select cos(c1),avg(c3) from tb1; +sql_error select cos(c1),avg(c3) from tb1; +print execute sql select cos(c1),top(c3,1) from tb1; +sql_error select cos(c1),top(c3,1) from tb1; +print execute sql select cos(c2+c3) from tb1 session(ts, 1s); +sql_error select cos(c2+c3) from tb1 session(ts, 1s); +print execute sql select cos(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select cos(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select cos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select cos(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select cos(c2+c3) from stb1 group by t1; +sql_error select cos(c2+c3) from stb1 group by t1; +print execute sql select cos(c2+c3) from stb1 group by ts; +sql_error select cos(c2+c3) from stb1 group by ts; +print execute sql select cos(c2+c3) from stb1 group by c1; +sql_error select cos(c2+c3) from stb1 group by c1; +print execute sql select cos(c2+c3) from stb1 group by tbname; +sql_error select cos(c2+c3) from stb1 group by tbname; +print execute sql select cos(c2+c3) from tb1 order by c2; +sql_error select cos(c2+c3) from tb1 order by c2; +print execute sql select cos(c8),cos(c9) from tbn; +sql_error select cos(c8),cos(c9) from tbn; +print execute sql select cos(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select cos(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select cos(a) from (select cos(c2) as a from tb1); +sql select cos(a) from (select cos(c2) as a from tb1); +if $data00 != @0.857553216@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.914653326@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.793873449@ then + return -1 +endi +if $data50 != @0.973125864@ then + return -1 +endi +if $data60 != @0.973125864@ then + return -1 +endi +print execute sql select cos("abc") from tb1; +sql_error select cos("abc") from tb1; +print execute sql select cos(c2 + c3) from tb1; +sql select cos(c2 + c3) from tb1; +if $data00 != @-0.416146837@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.145500034@ then + return -1 +endi +if $data50 != @0.045863318@ then + return -1 +endi +if $data60 != @0.045863318@ then + return -1 +endi +print execute sql select cos((c2 + c3)) from tb1; +sql select cos((c2 + c3)) from tb1; +if $data00 != @-0.416146837@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.145500034@ then + return -1 +endi +if $data50 != @0.045863318@ then + return -1 +endi +if $data60 != @0.045863318@ then + return -1 +endi +print execute sql select cos((c2 * c3)+c4-6) from tb1; +sql select cos((c2 * c3)+c4-6) from tb1; +if $data00 != @-0.653643621@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.136737218@ then + return -1 +endi +if $data50 != @-0.996514711@ then + return -1 +endi +if $data60 != @0.134138861@ then + return -1 +endi +print execute sql select cos(11)+c2 from tb1; +sql select cos(11)+c2 from tb1; +if $data00 != @1.004425698@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2.004425698@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4.004425698@ then + return -1 +endi +if $data50 != @127.004425698@ then + return -1 +endi +if $data60 != @-126.995574302@ then + return -1 +endi +print execute sql select cos(c1)+c2 from tb1; +sql_error select cos(c1)+c2 from tb1; +print execute sql select cos(c2)+11 from tb1; +sql select cos(c2)+11 from tb1; +if $data00 != @11.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @10.583853163@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @10.346356379@ then + return -1 +endi +if $data50 != @11.232359102@ then + return -1 +endi +if $data60 != @11.232359102@ then + return -1 +endi +print execute sql select cos(c1),c1,c2 from tb1; +sql_error select cos(c1),c1,c2 from tb1; +print execute sql select cos(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select cos(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select cos(c1),floor(c3) from tb1; +sql_error select cos(c1),floor(c3) from tb1; +print execute sql select cos(c1),cos(c2+c3) from tb1; +sql_error select cos(c1),cos(c2+c3) from tb1; +print execute sql select cos(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select cos(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @-0.416146837@ then + return -1 +endi +if $data10 != @-0.145500034@ then + return -1 +endi +if $data20 != @0.045863318@ then + return -1 +endi +if $data30 != @0.045863318@ then + return -1 +endi +print execute sql select cos(c2) from tb1 order by ts desc; +sql select cos(c2) from tb1 order by ts desc; +if $data00 != @0.232359102@ then + return -1 +endi +if $data10 != @0.232359102@ then + return -1 +endi +if $data20 != @-0.653643621@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.416146837@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.540302306@ then + return -1 +endi +print execute sql select cos(c2+c3) from tb1 order by ts desc; +sql select cos(c2+c3) from tb1 order by ts desc; +if $data00 != @0.045863318@ then + return -1 +endi +if $data10 != @0.045863318@ then + return -1 +endi +if $data20 != @-0.145500034@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @-0.416146837@ then + return -1 +endi +print execute sql select cos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select cos(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @-0.145500034@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select cos(c2) from stb1; +sql select cos(c2) from stb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.232359102@ then + return -1 +endi +if $data60 != @0.232359102@ then + return -1 +endi +if $data70 != @0.540302306@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data90 != @-0.989992497@ then + return -1 +endi +print execute sql select cos(c2) from stb1 order by ts desc; +sql select cos(c2) from stb1 order by ts desc; +if $data00 != @0.753902254@ then + if $data00 != @0.232359102@ then + return -1 + endi +endi +if $data20 != @0.960170287@ then + if $data20 != @0.232359102@ then + return -1 + endi +endi +if $data40 != @0.283662185@ then + if $data40 != @-0.653643621@ then + return -1 + endi +endi +if $data60 != @-0.653643621@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @-0.989992497@ then + if $data80 != @-0.416146837@ then + return -1 + endi +endi +print execute sql select cos(c4),t1 from stb1 order by ts desc; +sql select cos(c4),t1 from stb1 order by ts desc; +if $data00 != @0.753902254@ then + if $data00 != @-0.688836692@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @0.960170287@ then + if $data20 != @-0.688836692@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @0.283662185@ then + if $data40 != @-0.653643621@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @-0.653643621@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @-0.416146837@ then + if $data80 != @-0.416146837@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select cos(c3),tbname from stb1; +sql select cos(c3),tbname from stb1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @0.982263352@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @0.982263352@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @0.540302306@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @-0.989992497@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select cos(c3),tbname from stb1 where t1 > 1; +sql select cos(c3),tbname from stb1 where t1 > 1; +if $data00 != @0.540302306@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @-0.416146837@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @-0.989992497@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @-0.653643621@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @0.283662185@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @0.960170287@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @0.753902254@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select cos(c8),cos(c9) from tbn; +sql_error select cos(c8),cos(c9) from tbn; +print execute sql select cos(c8),cos(c9) from tbn order by ts desc; +sql_error select cos(c8),cos(c9) from tbn order by ts desc; +print execute sql select cos(cos(c8)) from tbn; +sql_error select cos(cos(c8)) from tbn; +print execute sql select cos(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select cos(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @-0.416146837@ then + return -1 +endi +if $data20 != @-0.801143616@ then + return -1 +endi +if $data30 != @-0.653643621@ then + return -1 +endi +if $data40 != @-0.210795799@ then + return -1 +endi +if $data50 != @-0.864543874@ then + return -1 +endi +if $data60 != @-0.952412980@ then + return -1 +endi +print execute sql select cos(c2) from (select * from stb1); +sql select cos(c2) from (select * from stb1); +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.232359102@ then + return -1 +endi +if $data60 != @0.232359102@ then + return -1 +endi +if $data70 != @0.540302306@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data90 != @-0.989992497@ then + return -1 +endi +print execute sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @-0.416146837@ then + return -1 +endi +if $data20 != @-0.801143616@ then + return -1 +endi +if $data30 != @-0.653643621@ then + return -1 +endi +if $data40 != @-0.210795799@ then + return -1 +endi +if $data50 != @-0.864543874@ then + return -1 +endi +if $data60 != @-0.952412980@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @-0.416146837@ then + return -1 +endi +if $data20 != @-0.801143616@ then + return -1 +endi +if $data30 != @-0.653643621@ then + return -1 +endi +if $data40 != @-0.210795799@ then + return -1 +endi +if $data50 != @-0.864543874@ then + return -1 +endi +if $data60 != @-0.952412980@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.952412980@ then + return -1 +endi +if $data40 != @-0.864543874@ then + return -1 +endi +if $data50 != @-0.210795799@ then + return -1 +endi +if $data60 != @-0.653643621@ then + return -1 +endi +if $data70 != @-0.801143616@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data90 != @0.540302306@ then + return -1 +endi +print execute sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.864543874@ then + return -1 +endi +if $data40 != @-0.210795799@ then + return -1 +endi +if $data50 != @-0.653643621@ then + return -1 +endi +if $data60 != @-0.801143616@ then + return -1 +endi +if $data70 != @-0.416146837@ then + return -1 +endi +if $data80 != @0.540302306@ then + return -1 +endi +if $data90 != @-0.952412980@ then + return -1 +endi +print execute sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @-0.952412980@ then + return -1 +endi +if $data10 != @0.540302306@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @-0.801143616@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @-0.210795799@ then + return -1 +endi +if $data60 != @-0.864543874@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select cos(a) from (select cos(c2) as a from tb1); +sql select cos(a) from (select cos(c2) as a from tb1); +if $data00 != @0.857553216@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.914653326@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.793873449@ then + return -1 +endi +if $data50 != @0.973125864@ then + return -1 +endi +if $data60 != @0.973125864@ then + return -1 +endi +print execute sql select cos(tb1.c3),cos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select cos(tb1.c3),cos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @0.540302306@ then + return -1 +endi +if $data01 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @-0.416146837@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @-0.989992497@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data31 != @-0.653643621@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data41 != @0.283662185@ then + return -1 +endi +if $data50 != @0.982263352@ then + return -1 +endi +if $data51 != @0.960170287@ then + return -1 +endi +if $data60 != @0.982263352@ then + return -1 +endi +if $data61 != @0.753902254@ then + return -1 +endi +print execute sql select cos(c3) from tb1 union all select cos(c3) from tb2; +sql select cos(c3) from tb1 union all select cos(c3) from tb2; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.982263352@ then + return -1 +endi +if $data60 != @0.982263352@ then + return -1 +endi +if $data70 != @0.540302306@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data90 != @-0.989992497@ then + return -1 +endi diff --git a/tests/script/general/compute/math_cos2.sim b/tests/script/general/compute/math_cos2.sim new file mode 100644 index 0000000000000000000000000000000000000000..ab263cfac0fc55c41a9e7a4b1ea4d1588e38bb54 --- /dev/null +++ b/tests/script/general/compute/math_cos2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select cos(stb1.c4),cos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select cos(stb1.c4),cos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @0.540302306@ then + return -1 +endi +if $data01 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @-0.416146837@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data21 != @-0.989992497@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @-0.653643621@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data41 != @0.283662185@ then + return -1 +endi +if $data50 != @-0.688836692@ then + return -1 +endi +if $data51 != @0.960170287@ then + return -1 +endi +if $data60 != @-0.688836692@ then + return -1 +endi +if $data61 != @0.753902254@ then + return -1 +endi +print execute sql select cos(c4) as a from stb1 union all select cos(c5) as a from stba; +sql select cos(c4) as a from stb1 union all select cos(c5) as a from stba; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @-0.688836692@ then + return -1 +endi +if $data60 != @-0.688836692@ then + return -1 +endi +if $data70 != @0.540302306@ then + return -1 +endi +if $data80 != @-0.416146837@ then + return -1 +endi +if $data90 != @-0.416146837@ then + return -1 +endi +print execute sql select cos(c2) from stba; +sql select cos(c2) from stba; +if $data00 != @0.540302306@ then + return -1 +endi +if $data10 != @-0.416146837@ then + return -1 +endi +if $data20 != @-0.989992497@ then + return -1 +endi +if $data30 != @-0.653643621@ then + return -1 +endi +if $data40 != @0.283662185@ then + return -1 +endi +if $data50 != @0.960170287@ then + return -1 +endi +if $data60 != @0.753902254@ then + return -1 +endi +if $data70 != @-0.145500034@ then + return -1 +endi +if $data80 != @-0.911130262@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select cos(min(c2)) from tba1; +sql select cos(min(c2)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select cos(max(c2)) from tba1; +sql select cos(max(c2)) from tba1; +if $data00 != @-0.911130262@ then + return -1 +endi +print execute sql select cos(count(c2)) from tba1; +sql select cos(count(c2)) from tba1; +if $data00 != @0.154251450@ then + return -1 +endi +print execute sql select cos(sum(c2)) from tba1; +sql select cos(sum(c2)) from tba1; +if $data00 != @-0.996087835@ then + return -1 +endi +print execute sql select cos(avg(c2)) from tba1; +sql select cos(avg(c2)) from tba1; +if $data00 != @-0.210795799@ then + return -1 +endi +print execute sql select cos(percentile(c2, 10)) from tba1; +sql select cos(percentile(c2, 10)) from tba1; +if $data00 != @0.621609968@ then + return -1 +endi +print execute sql select cos(apercentile(c2, 10)) from tba1; +sql select cos(apercentile(c2, 10)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select cos(stddev(c2)) from tba1; +sql select cos(stddev(c2)) from tba1; +if $data00 != @-0.963954358@ then + return -1 +endi +print execute sql select cos(spread(c2)) from tba1; +sql select cos(spread(c2)) from tba1; +if $data00 != @-0.911130262@ then + return -1 +endi +print execute sql select cos(twa(c2)) from tba1; +sql select cos(twa(c2)) from tba1; +if $data00 != @-0.074389166@ then + return -1 +endi +print execute sql select cos(leastsquares(c2, 1, 1)) from tba1; +sql_error select cos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select cos(interp(c2)) from tba1 every(1s) +sql select cos(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.540302306@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @-0.416146837@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @-0.989992497@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @-0.653643621@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @0.283662185@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @0.960170287@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.753902254@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @-0.145500034@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @-0.911130262@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.000000000@ then + return -1 +endi +print execute sql select cos(interp(c2)) from stba every(1s) group by tbname; +sql select cos(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.540302306@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @-0.416146837@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @-0.989992497@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @-0.653643621@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @0.283662185@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @0.960170287@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.753902254@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @-0.145500034@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @-0.911130262@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1.000000000@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select cos(elapsed(ts)) from tba1; +sql select cos(elapsed(ts)) from tba1; +if $data00 != @-0.999127122@ then + return -1 +endi +print execute sql select cos(rate(c2)) from tba1; +sql select cos(rate(c2)) from tba1; +if $data00 != @0.624307395@ then + return -1 +endi +print execute sql select cos(irate(c2)) from tba1; +sql select cos(irate(c2)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select cos(first(c2)) from tba1; +sql select cos(first(c2)) from tba1; +if $data00 != @0.540302306@ then + return -1 +endi +print execute sql select cos(last(c2)) from tba1; +sql select cos(last(c2)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select cos(last_row(c2)) from tba1; +sql select cos(last_row(c2)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select cos(top(c2, 1)) from tba1; +sql_error select cos(top(c2, 1)) from tba1; +print execute sql select cos(bottom(c2, 1)) from tba1; +sql_error select cos(bottom(c2, 1)) from tba1; +print execute sql select cos(leastsquares(c2, 1, 1)) from tba1; +sql_error select cos(leastsquares(c2, 1, 1)) from tba1; +print execute sql select cos(derivative(c2, 1s, 0)) from tba1; +sql_error select cos(derivative(c2, 1s, 0)) from tba1; +print execute sql select cos(diff(c2)) from tba1; +sql_error select cos(diff(c2)) from tba1; +print execute sql select cos(csum(c2)) from tba1; +sql_error select cos(csum(c2)) from tba1; +print execute sql select cos(mavg(c2,2)) from tba1; +sql_error select cos(mavg(c2,2)) from tba1; +print execute sql select cos(sample(c2,2)) from tba1; +sql_error select cos(sample(c2,2)) from tba1; +print execute sql select cos(_block_dist()) from tba1; +sql_error select cos(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_funcs.sim b/tests/script/general/compute/math_funcs.sim new file mode 100644 index 0000000000000000000000000000000000000000..0912595655838bd58a15d418029293fcb4185813 --- /dev/null +++ b/tests/script/general/compute/math_funcs.sim @@ -0,0 +1,130 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 6 +system sh/cfg.sh -n dnode1 -c cache -v 1 +system sh/cfg.sh -n dnode1 -c minRows -v 10 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect + +sql drop database if exists db +sql create database if not exists db +sql use db +sql create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tb1 using stb1 tags(1,'1',1.0); +sql create table tb2 using stb1 tags(2,'2',2.0); +sql create table tb3 using stb1 tags(3,'3',3.0); + +sql insert into tb1 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"123","1234",1,1,1,1); +sql insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); +sql insert into tb1 values ('2021-11-11 09:00:02',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL); +sql insert into tb1 values ('2021-11-11 09:00:03',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3); +sql insert into tb1 values ('2021-11-11 09:00:04',true,4,4,4,4,4,4,"456","4567",4,4,4,4); +sql insert into tb1 values ('2021-11-11 09:00:05',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807); +sql insert into tb1 values ('2021-11-11 09:00:06',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0); + +sql insert into tb2 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tb2 values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tb2 values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tb2 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tb2 values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tb2 values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tb2 values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7); + +sql create table tbn (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned); + +sql insert into tbn values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tbn values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tbn values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tbn values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tbn values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tbn values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tbn values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7); + +run general/compute/math_sqrt.sim +run general/compute/math_abs.sim +run general/compute/math_asin.sim +run general/compute/math_acos.sim +run general/compute/math_atan.sim +run general/compute/math_sin.sim +run general/compute/math_cos.sim +run general/compute/math_tan.sim +run general/compute/math_pow.sim +run general/compute/math_log.sim + + +sql create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tba1 using stba tags(1,'1',1.0); + +sql insert into tba1 values ('2021-11-11 09:00:00',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:01',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:02',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:04',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:05',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:06',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:07',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:08',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:09',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:10',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:11',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:12',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:13',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:14',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:15',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:16',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:17',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:18',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:19',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:20',true, 1,1,1,1,1,1,"111","1111",1,1,1,1); +sql insert into tba1 values ('2021-11-11 09:00:21',true, 2,2,2,2,2,2,"222","2222",2,2,2,2); +sql insert into tba1 values ('2021-11-11 09:00:22',true, 3,3,2,3,3,3,"333","3333",3,3,3,3); +sql insert into tba1 values ('2021-11-11 09:00:23',false,4,4,4,4,4,4,"444","4444",4,4,4,4); +sql insert into tba1 values ('2021-11-11 09:00:24',true, 5,5,5,5,5,5,"555","5555",5,5,5,5); +sql insert into tba1 values ('2021-11-11 09:00:25',true, 6,6,6,6,6,6,"666","6666",6,6,6,6); +sql insert into tba1 values ('2021-11-11 09:00:26',true, 7,7,7,7,7,7,"777","7777",7,7,7,7); +sql insert into tba1 values ('2021-11-11 09:00:27',true, 8,8,8,8,8,8,"888","8888",8,8,8,8); +sql insert into tba1 values ('2021-11-11 09:00:28',true, 9,9,9,9,9,9,"999","9999",9,9,9,9); +sql insert into tba1 values ('2021-11-11 09:00:29',true, 0,0,0,0,0,0,"000","0000",0,0,0,0); + +run general/compute/math_sqrt.sim +run general/compute/math_sqrt2.sim +run general/compute/math_abs.sim +run general/compute/math_abs2.sim +run general/compute/math_asin.sim +run general/compute/math_asin2.sim +run general/compute/math_acos.sim +run general/compute/math_acos2.sim +run general/compute/math_atan.sim +run general/compute/math_atan2.sim +run general/compute/math_sin.sim +run general/compute/math_sin2.sim +run general/compute/math_cos.sim +run general/compute/math_cos2.sim +run general/compute/math_tan.sim +run general/compute/math_tan2.sim +run general/compute/math_pow.sim +run general/compute/math_pow2.sim +run general/compute/math_log.sim +run general/compute/math_log2.sim +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/math_log.sim b/tests/script/general/compute/math_log.sim new file mode 100644 index 0000000000000000000000000000000000000000..d8bb47d69811a516bdfecfe13de3eca374c76e36 --- /dev/null +++ b/tests/script/general/compute/math_log.sim @@ -0,0 +1,818 @@ +sleep 100 +sql connect +sql use db + +print execute sql select log(c2,*) from tb1; +sql_error select log(c2,*) from tb1; +print execute sql select log(c2,*) from tb1; +sql_error select log(c2,*) from tb1; +print execute sql select log(c2,*) from tb1; +sql_error select log(c2,*) from tb1; +print execute sql select log(c2,*) from tb1; +sql_error select log(c2,*) from tb1; +print execute sql select log(c2,*) as a from tb1; +sql_error select log(c2,*) as a from tb1; +print execute sql select log(c2,*) + 1 as a from tb1; +sql_error select log(c2,*) + 1 as a from tb1; +print execute sql select log(c2,tb1.*) + 1 as a from tb1; +sql_error select log(c2,tb1.*) + 1 as a from tb1; +print execute sql select log(c2,*) from tb1; +sql_error select log(c2,*) from tb1; +print execute sql select log(c2,c1) from tb1; +sql_error select log(c2,c1) from tb1; +print execute sql select log(c2,c1) from tb1; +sql_error select log(c2,c1) from tb1; +print execute sql select log(c2,c1 + c2) from tb1; +sql_error select log(c2,c1 + c2) from tb1; +print execute sql select log(c2,13) from tb1; +sql select log(c2,13) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.270238154@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.540476309@ then + return -1 +endi +if $data50 != @1.888609252@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c1) from tb1; +sql_error select log(c2,c1) from tb1; +print execute sql select log(c2,c2) from tb1; +sql select log(c2,c2) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @1.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c3) from tb1; +sql select log(c2,c3) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.465913680@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c4) from tb1; +sql select log(c2,c4) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.225441442@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c5) from tb1; +sql select log(c2,c5) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.110931503@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c6) from tb1; +sql select log(c2,c6) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.054599099@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c7) from tb1; +sql select log(c2,c7) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.006824887@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c8) from tb1; +sql_error select log(c2,c8) from tb1; +print execute sql select log(c2,c9) from tb1; +sql_error select log(c2,c9) from tb1; +print execute sql select log(c2,c10) from tb1; +sql select log(c2,c10) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.874822948@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c11) from tb1; +sql select log(c2,c11) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.436793995@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c12) from tb1; +sql select log(c2,c12) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.218396396@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c13) from tb1; +sql select log(c2,c13) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.110931503@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,12345678900000000000000000) from tb1; +sql_error select log(c2,12345678900000000000000000) from tb1; +print execute sql select distinct log(c2,123) from tb1; +sql_error select distinct log(c2,123) from tb1; +print execute sql select log(c2,t1) from stb1; +sql_error select log(c2,t1) from stb1; +print execute sql select log(c2,c1),avg(c3) from tb1; +sql_error select log(c2,c1),avg(c3) from tb1; +print execute sql select log(c2,c1),top(c3,1) from tb1; +sql_error select log(c2,c1),top(c3,1) from tb1; +print execute sql select log(c2,c2+c3) from tb1 session(ts, 1s); +sql_error select log(c2,c2+c3) from tb1 session(ts, 1s); +print execute sql select log(c2,c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select log(c2,c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select log(c2,c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select log(c2,c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select log(c2,c2+c3) from stb1 group by t1; +sql_error select log(c2,c2+c3) from stb1 group by t1; +print execute sql select log(c2,c2+c3) from stb1 group by ts; +sql_error select log(c2,c2+c3) from stb1 group by ts; +print execute sql select log(c2,c2+c3) from stb1 group by c1; +sql_error select log(c2,c2+c3) from stb1 group by c1; +print execute sql select log(c2,c2+c3) from stb1 group by tbname; +sql_error select log(c2,c2+c3) from stb1 group by tbname; +print execute sql select log(c2,c2+c3) from tb1 order by c2; +sql_error select log(c2,c2+c3) from tb1 order by c2; +print execute sql select log(c2,c8),log(c2,c9) from tbn; +sql_error select log(c2,c8),log(c2,c9) from tbn; +print execute sql select log(c2,ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select log(c2,ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select log(c2,a) from (select log(c2,c2) as a from tb1); +sql_error select log(c2,a) from (select log(c2,c2) as a from tb1); +print execute sql select log(c2,"abc") from tb1; +sql_error select log(c2,"abc") from tb1; +print execute sql select log(c2,c2 + c3) from tb1; +sql select log(c2,c2 + c3) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.666666667@ then + return -1 +endi +if $data50 != @0.465740397@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,(c2 + c3)) from tb1; +sql select log(c2,(c2 + c3)) from tb1; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.666666667@ then + return -1 +endi +if $data50 != @0.465740397@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,(c2 * c3)+c4-6) from tb1; +sql select log(c2,(c2 * c3)+c4-6) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.525299070@ then + return -1 +endi +if $data50 != @0.225421132@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,11)+c2 from tb1; +sql select log(c2,11)+c2 from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2.289064826@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4.578129653@ then + return -1 +endi +if $data50 != @129.020182925@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c1)+c2 from tb1; +sql_error select log(c2,c1)+c2 from tb1; +print execute sql select log(c2,c2)+11 from tb1; +sql select log(c2,c2)+11 from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @12.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @12.000000000@ then + return -1 +endi +if $data50 != @12.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c1),c1,c2 from tb1; +sql_error select log(c2,c1),c1,c2 from tb1; +print execute sql select log(c2,c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select log(c2,c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select log(c2,c1),floor(c3) from tb1; +sql_error select log(c2,c1),floor(c3) from tb1; +print execute sql select log(c2,c1),log(c2,c2+c3) from tb1; +sql_error select log(c2,c1),log(c2,c2+c3) from tb1; +print execute sql select log(c2,c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select log(c2,c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @0.666666667@ then + return -1 +endi +if $data20 != @0.465740397@ then + return -1 +endi +if $data30 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c2) from tb1 order by ts desc; +sql select log(c2,c2) from tb1 order by ts desc; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @1.000000000@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select log(c2,c2+c3) from tb1 order by ts desc; +sql select log(c2,c2+c3) from tb1 order by ts desc; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @0.465740397@ then + return -1 +endi +if $data20 != @0.666666667@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select log(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select log(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @0.666666667@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select log(c2,c2) from stb1; +sql select log(c2,c2) from stb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @1.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @-nan@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select log(c2,c2) from stb1 order by ts desc; +sql select log(c2,c2) from stb1 order by ts desc; +if $data00 != @1.000000000@ then + if $data00 != @-nan@ then + return -1 + endi +endi +if $data20 != @1.000000000@ then + if $data20 != @1.000000000@ then + return -1 + endi +endi +if $data40 != @1.000000000@ then + if $data40 != @1.000000000@ then + return -1 + endi +endi +if $data60 != @1.000000000@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @1.000000000@ then + if $data80 != @1.000000000@ then + return -1 + endi +endi +print execute sql select log(c2,c4),t1 from stb1 order by ts desc; +sql select log(c2,c4),t1 from stb1 order by ts desc; +if $data00 != @1.000000000@ then + if $data00 != @-nan@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @1.000000000@ then + if $data20 != @0.225441442@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @1.000000000@ then + if $data40 != @1.000000000@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @1.000000000@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @1.584962501@ then + if $data80 != @1.000000000@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select log(c2,c3),tbname from stb1; +sql select log(c2,c3),tbname from stb1; +if $data00 != @-nan@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @0.465913680@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @-nan@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select log(c2,c3),tbname from stb1 where t1 > 1; +sql select log(c2,c3),tbname from stb1 where t1 > 1; +if $data00 != @-nan@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @1.000000000@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @1.000000000@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @1.000000000@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select log(c2,c8),log(c2,c9) from tbn; +sql_error select log(c2,c8),log(c2,c9) from tbn; +print execute sql select log(c2,c8),log(c2,c9) from tbn order by ts desc; +sql_error select log(c2,c8),log(c2,c9) from tbn order by ts desc; +print execute sql select log(c2,log(c2,c8)) from tbn; +sql_error select log(c2,log(c2,c8)) from tbn; +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select log(c2,a) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select log(c2,c2) from (select * from stb1); +sql select log(c2,c2) from (select * from stb1); +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @1.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @-nan@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql_error select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql_error select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql_error select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql_error select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +print execute sql select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql_error select log(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +print execute sql select log(c2,a) from (select log(c2,c2) as a from tb1); +sql_error select log(c2,a) from (select log(c2,c2) as a from tb1); +print execute sql select log(c2,tb1.c3),log(c2,tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql_error select log(c2,tb1.c3),log(c2,tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +print execute sql select log(c2,c3) from tb1 union all select log(c2,c3) from tb2; +sql select log(c2,c3) from tb1 union all select log(c2,c3) from tb2; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.465913680@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @-nan@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi diff --git a/tests/script/general/compute/math_log2.sim b/tests/script/general/compute/math_log2.sim new file mode 100644 index 0000000000000000000000000000000000000000..25722ceae1796f281edaa9ea19db99c59672e7fc --- /dev/null +++ b/tests/script/general/compute/math_log2.sim @@ -0,0 +1,126 @@ +sleep 100 +sql connect +sql use db + +print execute sql select log(c2,stb1.c4),log(c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql_error select log(c2,stb1.c4),log(c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +print execute sql select log(c2,c4) as a from stb1 union all select log(c2,c5) as a from stba; +sql select log(c2,c4) as a from stb1 union all select log(c2,c5) as a from stba; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.225441442@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @-nan@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @1.584962501@ then + return -1 +endi +print execute sql select log(c2,c2) from stba; +sql select log(c2,c2) from stba; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @1.000000000@ then + return -1 +endi +if $data20 != @1.000000000@ then + return -1 +endi +if $data30 != @1.000000000@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @1.000000000@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @-nan@ then + return -1 +endi +print execute sql select log(c2,min(c2)) from tba1; +sql_error select log(c2,min(c2)) from tba1; +print execute sql select log(c2,max(c2)) from tba1; +sql_error select log(c2,max(c2)) from tba1; +print execute sql select log(c2,count(c2)) from tba1; +sql_error select log(c2,count(c2)) from tba1; +print execute sql select log(c2,sum(c2)) from tba1; +sql_error select log(c2,sum(c2)) from tba1; +print execute sql select log(c2,avg(c2)) from tba1; +sql_error select log(c2,avg(c2)) from tba1; +print execute sql select log(c2,percentile(c2, 10)) from tba1; +sql_error select log(c2,percentile(c2, 10)) from tba1; +print execute sql select log(c2,apercentile(c2, 10)) from tba1; +sql_error select log(c2,apercentile(c2, 10)) from tba1; +print execute sql select log(c2,stddev(c2)) from tba1; +sql_error select log(c2,stddev(c2)) from tba1; +print execute sql select log(c2,spread(c2)) from tba1; +sql_error select log(c2,spread(c2)) from tba1; +print execute sql select log(c2,twa(c2)) from tba1; +sql_error select log(c2,twa(c2)) from tba1; +print execute sql select log(c2,leastsquares(c2, 1, 1)) from tba1; +sql_error select log(c2,leastsquares(c2, 1, 1)) from tba1; +print execute sql select log(c2,interp(c2)) from tba1 every(1s) +sql_error select log(c2,interp(c2)) from tba1 every(1s) +print execute sql select log(c2,interp(c2)) from stba every(1s) group by tbname; +sql_error select log(c2,interp(c2)) from stba every(1s) group by tbname; +print execute sql select log(c2,elapsed(ts)) from tba1; +sql_error select log(c2,elapsed(ts)) from tba1; +print execute sql select log(c2,rate(c2)) from tba1; +sql_error select log(c2,rate(c2)) from tba1; +print execute sql select log(c2,irate(c2)) from tba1; +sql_error select log(c2,irate(c2)) from tba1; +print execute sql select log(c2,first(c2)) from tba1; +sql_error select log(c2,first(c2)) from tba1; +print execute sql select log(c2,last(c2)) from tba1; +sql_error select log(c2,last(c2)) from tba1; +print execute sql select log(c2,last_row(c2)) from tba1; +sql_error select log(c2,last_row(c2)) from tba1; +print execute sql select log(c2,top(c2, 1)) from tba1; +sql_error select log(c2,top(c2, 1)) from tba1; +print execute sql select log(c2,bottom(c2, 1)) from tba1; +sql_error select log(c2,bottom(c2, 1)) from tba1; +print execute sql select log(c2,leastsquares(c2, 1, 1)) from tba1; +sql_error select log(c2,leastsquares(c2, 1, 1)) from tba1; +print execute sql select log(c2,derivative(c2, 1s, 0)) from tba1; +sql_error select log(c2,derivative(c2, 1s, 0)) from tba1; +print execute sql select log(c2,diff(c2)) from tba1; +sql_error select log(c2,diff(c2)) from tba1; +print execute sql select log(c2,csum(c2)) from tba1; +sql_error select log(c2,csum(c2)) from tba1; +print execute sql select log(c2,mavg(c2,2)) from tba1; +sql_error select log(c2,mavg(c2,2)) from tba1; +print execute sql select log(c2,sample(c2,2)) from tba1; +sql_error select log(c2,sample(c2,2)) from tba1; +print execute sql select log(c2,_block_dist()) from tba1; +sql_error select log(c2,_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_pow.sim b/tests/script/general/compute/math_pow.sim new file mode 100644 index 0000000000000000000000000000000000000000..d3725e650b5840d2044f60f2b890223039c73fb1 --- /dev/null +++ b/tests/script/general/compute/math_pow.sim @@ -0,0 +1,818 @@ +sleep 100 +sql connect +sql use db + +print execute sql select pow(c2,*) from tb1; +sql_error select pow(c2,*) from tb1; +print execute sql select pow(c2,*) from tb1; +sql_error select pow(c2,*) from tb1; +print execute sql select pow(c2,*) from tb1; +sql_error select pow(c2,*) from tb1; +print execute sql select pow(c2,*) from tb1; +sql_error select pow(c2,*) from tb1; +print execute sql select pow(c2,*) as a from tb1; +sql_error select pow(c2,*) as a from tb1; +print execute sql select pow(c2,*) + 1 as a from tb1; +sql_error select pow(c2,*) + 1 as a from tb1; +print execute sql select pow(c2,tb1.*) + 1 as a from tb1; +sql_error select pow(c2,tb1.*) + 1 as a from tb1; +print execute sql select pow(c2,*) from tb1; +sql_error select pow(c2,*) from tb1; +print execute sql select pow(c2,c1) from tb1; +sql_error select pow(c2,c1) from tb1; +print execute sql select pow(c2,c1) from tb1; +sql_error select pow(c2,c1) from tb1; +print execute sql select pow(c2,c1 + c2) from tb1; +sql_error select pow(c2,c1 + c2) from tb1; +print execute sql select pow(c2,13) from tb1; +sql select pow(c2,13) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @8192.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @67108864.000000000@ then + return -1 +endi +if $data50 != @2235879388560037013354446848.000000000@ then + return -1 +endi +if $data60 != @-2235879388560037013354446848.000000000@ then + return -1 +endi +print execute sql select pow(c2,c1) from tb1; +sql_error select pow(c2,c1) from tb1; +print execute sql select pow(c2,c2) from tb1; +sql select pow(c2,c2) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c3) from tb1; +sql select pow(c2,c3) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c4) from tb1; +sql select pow(c2,c4) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c5) from tb1; +sql select pow(c2,c5) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c6) from tb1; +sql select pow(c2,c6) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c7) from tb1; +sql select pow(c2,c7) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c8) from tb1; +sql_error select pow(c2,c8) from tb1; +print execute sql select pow(c2,c9) from tb1; +sql_error select pow(c2,c9) from tb1; +print execute sql select pow(c2,c10) from tb1; +sql select pow(c2,c10) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,c11) from tb1; +sql select pow(c2,c11) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,c12) from tb1; +sql select pow(c2,c12) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,c13) from tb1; +sql select pow(c2,c13) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,12345678900000000000000000) from tb1; +sql_error select pow(c2,12345678900000000000000000) from tb1; +print execute sql select distinct pow(c2,123) from tb1; +sql_error select distinct pow(c2,123) from tb1; +print execute sql select pow(c2,t1) from stb1; +sql_error select pow(c2,t1) from stb1; +print execute sql select pow(c2,c1),avg(c3) from tb1; +sql_error select pow(c2,c1),avg(c3) from tb1; +print execute sql select pow(c2,c1),top(c3,1) from tb1; +sql_error select pow(c2,c1),top(c3,1) from tb1; +print execute sql select pow(c2,c2+c3) from tb1 session(ts, 1s); +sql_error select pow(c2,c2+c3) from tb1 session(ts, 1s); +print execute sql select pow(c2,c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select pow(c2,c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select pow(c2,c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select pow(c2,c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select pow(c2,c2+c3) from stb1 group by t1; +sql_error select pow(c2,c2+c3) from stb1 group by t1; +print execute sql select pow(c2,c2+c3) from stb1 group by ts; +sql_error select pow(c2,c2+c3) from stb1 group by ts; +print execute sql select pow(c2,c2+c3) from stb1 group by c1; +sql_error select pow(c2,c2+c3) from stb1 group by c1; +print execute sql select pow(c2,c2+c3) from stb1 group by tbname; +sql_error select pow(c2,c2+c3) from stb1 group by tbname; +print execute sql select pow(c2,c2+c3) from tb1 order by c2; +sql_error select pow(c2,c2+c3) from tb1 order by c2; +print execute sql select pow(c2,c8),pow(c2,c9) from tbn; +sql_error select pow(c2,c8),pow(c2,c9) from tbn; +print execute sql select pow(c2,ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select pow(c2,ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select pow(c2,a) from (select pow(c2,c2) as a from tb1); +sql_error select pow(c2,a) from (select pow(c2,c2) as a from tb1); +print execute sql select pow(c2,"abc") from tb1; +sql_error select pow(c2,"abc") from tb1; +print execute sql select pow(c2,c2 + c3) from tb1; +sql select pow(c2,c2 + c3) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @65536.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,(c2 + c3)) from tb1; +sql select pow(c2,(c2 + c3)) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @65536.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,(c2 * c3)+c4-6) from tb1; +sql select pow(c2,(c2 * c3)+c4-6) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @268435456.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,11)+c2 from tb1; +sql select pow(c2,11)+c2 from tb1; +if $data00 != @2.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2050.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4194308.000000000@ then + return -1 +endi +if $data50 != @138624799340320985710592.000000000@ then + return -1 +endi +if $data60 != @-138624799340320985710592.000000000@ then + return -1 +endi +print execute sql select pow(c2,c1)+c2 from tb1; +sql_error select pow(c2,c1)+c2 from tb1; +print execute sql select pow(c2,c2)+11 from tb1; +sql select pow(c2,c2)+11 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @15.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @267.000000000@ then + return -1 +endi +if $data50 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 +endi +if $data60 != @11.000000000@ then + return -1 +endi +print execute sql select pow(c2,c1),c1,c2 from tb1; +sql_error select pow(c2,c1),c1,c2 from tb1; +print execute sql select pow(c2,c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select pow(c2,c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select pow(c2,c1),floor(c3) from tb1; +sql_error select pow(c2,c1),floor(c3) from tb1; +print execute sql select pow(c2,c1),pow(c2,c2+c3) from tb1; +sql_error select pow(c2,c1),pow(c2,c2+c3) from tb1; +print execute sql select pow(c2,c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select pow(c2,c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @65536.000000000@ then + return -1 +endi +if $data20 != @inf@ then + return -1 +endi +if $data30 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c2,c2) from tb1 order by ts desc; +sql select pow(c2,c2) from tb1 order by ts desc; +if $data00 != @-0.000000000@ then + return -1 +endi +if $data10 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 +endi +if $data20 != @256.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4.000000000@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,c2+c3) from tb1 order by ts desc; +sql select pow(c2,c2+c3) from tb1 order by ts desc; +if $data00 != @0.000000000@ then + return -1 +endi +if $data10 != @inf@ then + return -1 +endi +if $data20 != @65536.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select pow(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @65536.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select pow(c2,c2) from stb1; +sql select pow(c2,c2) from stb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @4.000000000@ then + return -1 +endi +if $data90 != @27.000000000@ then + return -1 +endi +print execute sql select pow(c2,c2) from stb1 order by ts desc; +sql select pow(c2,c2) from stb1 order by ts desc; +if $data00 != @823543.000000000@ then + if $data00 != @-0.000000000@ then + return -1 + endi +endi +if $data20 != @46656.000000000@ then + if $data20 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 + endi +endi +if $data40 != @3125.000000000@ then + if $data40 != @256.000000000@ then + return -1 + endi +endi +if $data60 != @256.000000000@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @27.000000000@ then + if $data80 != @4.000000000@ then + return -1 + endi +endi +print execute sql select pow(c2,c4),t1 from stb1 order by ts desc; +sql select pow(c2,c4),t1 from stb1 order by ts desc; +if $data00 != @823543.000000000@ then + if $data00 != @-0.000000000@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @46656.000000000@ then + if $data20 != @inf@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @3125.000000000@ then + if $data40 != @256.000000000@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @256.000000000@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @9.000000000@ then + if $data80 != @4.000000000@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select pow(c2,c3),tbname from stb1; +sql select pow(c2,c3),tbname from stb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @4.000000000@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @27.000000000@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select pow(c2,c3),tbname from stb1 where t1 > 1; +sql select pow(c2,c3),tbname from stb1 where t1 > 1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @4.000000000@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @27.000000000@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @256.000000000@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @3125.000000000@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @46656.000000000@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @823543.000000000@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select pow(c2,c8),pow(c2,c9) from tbn; +sql_error select pow(c2,c8),pow(c2,c9) from tbn; +print execute sql select pow(c2,c8),pow(c2,c9) from tbn order by ts desc; +sql_error select pow(c2,c8),pow(c2,c9) from tbn order by ts desc; +print execute sql select pow(c2,pow(c2,c8)) from tbn; +sql_error select pow(c2,pow(c2,c8)) from tbn; +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select pow(c2,c2) from (select * from stb1); +sql select pow(c2,c2) from (select * from stb1); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @1524307411995722675302034438441039929638133977529255166488930956750631981496706568536830801887788243768717590100078636207586147400582763177366354597171395424352865739982333218252608952069997640848048552956054564019622410252548018541775390307932677684494157475792027648.000000000@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @4.000000000@ then + return -1 +endi +if $data90 != @27.000000000@ then + return -1 +endi +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +print execute sql select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql_error select pow(c2,a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +print execute sql select pow(c2,a) from (select pow(c2,c2) as a from tb1); +sql_error select pow(c2,a) from (select pow(c2,c2) as a from tb1); +print execute sql select pow(c2,tb1.c3),pow(c2,tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql_error select pow(c2,tb1.c3),pow(c2,tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +print execute sql select pow(c2,c3) from tb1 union all select pow(c2,c3) from tb2; +sql select pow(c2,c3) from tb1 union all select pow(c2,c3) from tb2; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @4.000000000@ then + return -1 +endi +if $data90 != @27.000000000@ then + return -1 +endi diff --git a/tests/script/general/compute/math_pow2.sim b/tests/script/general/compute/math_pow2.sim new file mode 100644 index 0000000000000000000000000000000000000000..416f989f1288ea1f21ee792b63fb75ee46510d2b --- /dev/null +++ b/tests/script/general/compute/math_pow2.sim @@ -0,0 +1,126 @@ +sleep 100 +sql connect +sql use db + +print execute sql select pow(c2,stb1.c4),pow(c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql_error select pow(c2,stb1.c4),pow(c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +print execute sql select pow(c2,c4) as a from stb1 union all select pow(c2,c5) as a from stba; +sql select pow(c2,c4) as a from stb1 union all select pow(c2,c5) as a from stba; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @256.000000000@ then + return -1 +endi +if $data50 != @inf@ then + return -1 +endi +if $data60 != @-0.000000000@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @4.000000000@ then + return -1 +endi +if $data90 != @9.000000000@ then + return -1 +endi +print execute sql select pow(c2,c2) from stba; +sql select pow(c2,c2) from stba; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @4.000000000@ then + return -1 +endi +if $data20 != @27.000000000@ then + return -1 +endi +if $data30 != @256.000000000@ then + return -1 +endi +if $data40 != @3125.000000000@ then + return -1 +endi +if $data50 != @46656.000000000@ then + return -1 +endi +if $data60 != @823543.000000000@ then + return -1 +endi +if $data70 != @16777216.000000000@ then + return -1 +endi +if $data80 != @387420489.000000000@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select pow(c2,min(c2)) from tba1; +sql_error select pow(c2,min(c2)) from tba1; +print execute sql select pow(c2,max(c2)) from tba1; +sql_error select pow(c2,max(c2)) from tba1; +print execute sql select pow(c2,count(c2)) from tba1; +sql_error select pow(c2,count(c2)) from tba1; +print execute sql select pow(c2,sum(c2)) from tba1; +sql_error select pow(c2,sum(c2)) from tba1; +print execute sql select pow(c2,avg(c2)) from tba1; +sql_error select pow(c2,avg(c2)) from tba1; +print execute sql select pow(c2,percentile(c2, 10)) from tba1; +sql_error select pow(c2,percentile(c2, 10)) from tba1; +print execute sql select pow(c2,apercentile(c2, 10)) from tba1; +sql_error select pow(c2,apercentile(c2, 10)) from tba1; +print execute sql select pow(c2,stddev(c2)) from tba1; +sql_error select pow(c2,stddev(c2)) from tba1; +print execute sql select pow(c2,spread(c2)) from tba1; +sql_error select pow(c2,spread(c2)) from tba1; +print execute sql select pow(c2,twa(c2)) from tba1; +sql_error select pow(c2,twa(c2)) from tba1; +print execute sql select pow(c2,leastsquares(c2, 1, 1)) from tba1; +sql_error select pow(c2,leastsquares(c2, 1, 1)) from tba1; +print execute sql select pow(c2,interp(c2)) from tba1 every(1s) +sql_error select pow(c2,interp(c2)) from tba1 every(1s) +print execute sql select pow(c2,interp(c2)) from stba every(1s) group by tbname; +sql_error select pow(c2,interp(c2)) from stba every(1s) group by tbname; +print execute sql select pow(c2,elapsed(ts)) from tba1; +sql_error select pow(c2,elapsed(ts)) from tba1; +print execute sql select pow(c2,rate(c2)) from tba1; +sql_error select pow(c2,rate(c2)) from tba1; +print execute sql select pow(c2,irate(c2)) from tba1; +sql_error select pow(c2,irate(c2)) from tba1; +print execute sql select pow(c2,first(c2)) from tba1; +sql_error select pow(c2,first(c2)) from tba1; +print execute sql select pow(c2,last(c2)) from tba1; +sql_error select pow(c2,last(c2)) from tba1; +print execute sql select pow(c2,last_row(c2)) from tba1; +sql_error select pow(c2,last_row(c2)) from tba1; +print execute sql select pow(c2,top(c2, 1)) from tba1; +sql_error select pow(c2,top(c2, 1)) from tba1; +print execute sql select pow(c2,bottom(c2, 1)) from tba1; +sql_error select pow(c2,bottom(c2, 1)) from tba1; +print execute sql select pow(c2,leastsquares(c2, 1, 1)) from tba1; +sql_error select pow(c2,leastsquares(c2, 1, 1)) from tba1; +print execute sql select pow(c2,derivative(c2, 1s, 0)) from tba1; +sql_error select pow(c2,derivative(c2, 1s, 0)) from tba1; +print execute sql select pow(c2,diff(c2)) from tba1; +sql_error select pow(c2,diff(c2)) from tba1; +print execute sql select pow(c2,csum(c2)) from tba1; +sql_error select pow(c2,csum(c2)) from tba1; +print execute sql select pow(c2,mavg(c2,2)) from tba1; +sql_error select pow(c2,mavg(c2,2)) from tba1; +print execute sql select pow(c2,sample(c2,2)) from tba1; +sql_error select pow(c2,sample(c2,2)) from tba1; +print execute sql select pow(c2,_block_dist()) from tba1; +sql_error select pow(c2,_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_sin.sim b/tests/script/general/compute/math_sin.sim new file mode 100644 index 0000000000000000000000000000000000000000..d014324fa110a2d2267f15a0c26cab4e15c6feb1 --- /dev/null +++ b/tests/script/general/compute/math_sin.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select sin(*) from tb1; +sql_error select sin(*) from tb1; +print execute sql select sin(*) from tb1; +sql_error select sin(*) from tb1; +print execute sql select sin(*) from tb1; +sql_error select sin(*) from tb1; +print execute sql select sin(*) from tb1; +sql_error select sin(*) from tb1; +print execute sql select sin(*) as a from tb1; +sql_error select sin(*) as a from tb1; +print execute sql select sin(*) + 1 as a from tb1; +sql_error select sin(*) + 1 as a from tb1; +print execute sql select sin(tb1.*) + 1 as a from tb1; +sql_error select sin(tb1.*) + 1 as a from tb1; +print execute sql select sin(*) from tb1; +sql_error select sin(*) from tb1; +print execute sql select sin(c1) from tb1; +sql_error select sin(c1) from tb1; +print execute sql select sin(c1) from tb1; +sql_error select sin(c1) from tb1; +print execute sql select sin(c1 + c2) from tb1; +sql_error select sin(c1 + c2) from tb1; +print execute sql select sin(13) from tb1; +sql select sin(13) from tb1; +if $data00 != @0.420167037@ then + return -1 +endi +if $data10 != @0.420167037@ then + return -1 +endi +if $data20 != @0.420167037@ then + return -1 +endi +if $data30 != @0.420167037@ then + return -1 +endi +if $data40 != @0.420167037@ then + return -1 +endi +if $data50 != @0.420167037@ then + return -1 +endi +if $data60 != @0.420167037@ then + return -1 +endi +print execute sql select sin(c1) from tb1; +sql_error select sin(c1) from tb1; +print execute sql select sin(c2) from tb1; +sql select sin(c2) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.972630067@ then + return -1 +endi +if $data60 != @-0.972630067@ then + return -1 +endi +print execute sql select sin(c3) from tb1; +sql select sin(c3) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.187506554@ then + return -1 +endi +if $data60 != @-0.187506554@ then + return -1 +endi +print execute sql select sin(c4) from tb1; +sql select sin(c4) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @-0.724916555@ then + return -1 +endi +if $data60 != @0.724916555@ then + return -1 +endi +print execute sql select sin(c5) from tb1; +sql select sin(c5) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.999930377@ then + return -1 +endi +if $data60 != @-0.999930377@ then + return -1 +endi +print execute sql select sin(c6) from tb1; +sql select sin(c6) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @-0.521876523@ then + return -1 +endi +if $data60 != @0.521876523@ then + return -1 +endi +print execute sql select sin(c7) from tb1; +sql select sin(c7) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.872829297@ then + return -1 +endi +if $data60 != @-0.872829297@ then + return -1 +endi +print execute sql select sin(c8) from tb1; +sql_error select sin(c8) from tb1; +print execute sql select sin(c9) from tb1; +sql_error select sin(c9) from tb1; +print execute sql select sin(c10) from tb1; +sql select sin(c10) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.451998898@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sin(c11) from tb1; +sql select sin(c11) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.368361632@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sin(c12) from tb1; +sql select sin(c12) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.998698243@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sin(c13) from tb1; +sql select sin(c13) from tb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.999930377@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sin(12345678900000000000000000) from tb1; +sql_error select sin(12345678900000000000000000) from tb1; +print execute sql select distinct sin(123) from tb1; +sql_error select distinct sin(123) from tb1; +print execute sql select sin(t1) from stb1; +sql_error select sin(t1) from stb1; +print execute sql select sin(c1),avg(c3) from tb1; +sql_error select sin(c1),avg(c3) from tb1; +print execute sql select sin(c1),top(c3,1) from tb1; +sql_error select sin(c1),top(c3,1) from tb1; +print execute sql select sin(c2+c3) from tb1 session(ts, 1s); +sql_error select sin(c2+c3) from tb1 session(ts, 1s); +print execute sql select sin(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select sin(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select sin(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select sin(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select sin(c2+c3) from stb1 group by t1; +sql_error select sin(c2+c3) from stb1 group by t1; +print execute sql select sin(c2+c3) from stb1 group by ts; +sql_error select sin(c2+c3) from stb1 group by ts; +print execute sql select sin(c2+c3) from stb1 group by c1; +sql_error select sin(c2+c3) from stb1 group by c1; +print execute sql select sin(c2+c3) from stb1 group by tbname; +sql_error select sin(c2+c3) from stb1 group by tbname; +print execute sql select sin(c2+c3) from tb1 order by c2; +sql_error select sin(c2+c3) from tb1 order by c2; +print execute sql select sin(c8),sin(c9) from tbn; +sql_error select sin(c8),sin(c9) from tbn; +print execute sql select sin(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select sin(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select sin(a) from (select sin(c2) as a from tb1); +sql select sin(a) from (select sin(c2) as a from tb1); +if $data00 != @0.745624142@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.789072344@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.686600261@ then + return -1 +endi +if $data50 != @0.826369634@ then + return -1 +endi +if $data60 != @-0.826369634@ then + return -1 +endi +print execute sql select sin("abc") from tb1; +sql_error select sin("abc") from tb1; +print execute sql select sin(c2 + c3) from tb1; +sql select sin(c2 + c3) from tb1; +if $data00 != @0.909297427@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.989358247@ then + return -1 +endi +if $data50 != @0.998947724@ then + return -1 +endi +if $data60 != @-0.998947724@ then + return -1 +endi +print execute sql select sin((c2 + c3)) from tb1; +sql select sin((c2 + c3)) from tb1; +if $data00 != @0.909297427@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.989358247@ then + return -1 +endi +if $data50 != @0.998947724@ then + return -1 +endi +if $data60 != @-0.998947724@ then + return -1 +endi +print execute sql select sin((c2 * c3)+c4-6) from tb1; +sql select sin((c2 * c3)+c4-6) from tb1; +if $data00 != @0.756802495@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.990607356@ then + return -1 +endi +if $data50 != @0.083417203@ then + return -1 +endi +if $data60 != @0.990962545@ then + return -1 +endi +print execute sql select sin(11)+c2 from tb1; +sql select sin(11)+c2 from tb1; +if $data00 != @0.000009793@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.000009793@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @3.000009793@ then + return -1 +endi +if $data50 != @126.000009793@ then + return -1 +endi +if $data60 != @-127.999990207@ then + return -1 +endi +print execute sql select sin(c1)+c2 from tb1; +sql_error select sin(c1)+c2 from tb1; +print execute sql select sin(c2)+11 from tb1; +sql select sin(c2)+11 from tb1; +if $data00 != @11.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @11.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @10.243197505@ then + return -1 +endi +if $data50 != @11.972630067@ then + return -1 +endi +if $data60 != @10.027369933@ then + return -1 +endi +print execute sql select sin(c1),c1,c2 from tb1; +sql_error select sin(c1),c1,c2 from tb1; +print execute sql select sin(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select sin(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select sin(c1),floor(c3) from tb1; +sql_error select sin(c1),floor(c3) from tb1; +print execute sql select sin(c1),sin(c2+c3) from tb1; +sql_error select sin(c1),sin(c2+c3) from tb1; +print execute sql select sin(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select sin(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @0.909297427@ then + return -1 +endi +if $data10 != @0.989358247@ then + return -1 +endi +if $data20 != @0.998947724@ then + return -1 +endi +if $data30 != @-0.998947724@ then + return -1 +endi +print execute sql select sin(c2) from tb1 order by ts desc; +sql select sin(c2) from tb1 order by ts desc; +if $data00 != @-0.972630067@ then + return -1 +endi +if $data10 != @0.972630067@ then + return -1 +endi +if $data20 != @-0.756802495@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @0.909297427@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.841470985@ then + return -1 +endi +print execute sql select sin(c2+c3) from tb1 order by ts desc; +sql select sin(c2+c3) from tb1 order by ts desc; +if $data00 != @-0.998947724@ then + return -1 +endi +if $data10 != @0.998947724@ then + return -1 +endi +if $data20 != @0.989358247@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @0.909297427@ then + return -1 +endi +print execute sql select sin(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select sin(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @0.989358247@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select sin(c2) from stb1; +sql select sin(c2) from stb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.972630067@ then + return -1 +endi +if $data60 != @-0.972630067@ then + return -1 +endi +if $data70 != @0.841470985@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data90 != @0.141120008@ then + return -1 +endi +print execute sql select sin(c2) from stb1 order by ts desc; +sql select sin(c2) from stb1 order by ts desc; +if $data00 != @0.656986599@ then + if $data00 != @-0.972630067@ then + return -1 + endi +endi +if $data20 != @-0.279415498@ then + if $data20 != @0.972630067@ then + return -1 + endi +endi +if $data40 != @-0.958924275@ then + if $data40 != @-0.756802495@ then + return -1 + endi +endi +if $data60 != @-0.756802495@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @0.141120008@ then + if $data80 != @0.909297427@ then + return -1 + endi +endi +print execute sql select sin(c4),t1 from stb1 order by ts desc; +sql select sin(c4),t1 from stb1 order by ts desc; +if $data00 != @0.656986599@ then + if $data00 != @0.724916555@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @-0.279415498@ then + if $data20 != @-0.724916555@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @-0.958924275@ then + if $data40 != @-0.756802495@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @-0.756802495@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @0.909297427@ then + if $data80 != @0.909297427@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select sin(c3),tbname from stb1; +sql select sin(c3),tbname from stb1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @0.187506554@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @-0.187506554@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @0.841470985@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @0.141120008@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select sin(c3),tbname from stb1 where t1 > 1; +sql select sin(c3),tbname from stb1 where t1 > 1; +if $data00 != @0.841470985@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @0.141120008@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @-0.756802495@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @-0.958924275@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @-0.279415498@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @0.656986599@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select sin(c8),sin(c9) from tbn; +sql_error select sin(c8),sin(c9) from tbn; +print execute sql select sin(c8),sin(c9) from tbn order by ts desc; +sql_error select sin(c8),sin(c9) from tbn order by ts desc; +print execute sql select sin(sin(c8)) from tbn; +sql_error select sin(sin(c8)) from tbn; +print execute sql select sin(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select sin(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data20 != @0.598472144@ then + return -1 +endi +if $data30 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.977530118@ then + return -1 +endi +if $data50 != @-0.502557350@ then + return -1 +endi +if $data60 != @0.304810621@ then + return -1 +endi +print execute sql select sin(c2) from (select * from stb1); +sql select sin(c2) from (select * from stb1); +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.972630067@ then + return -1 +endi +if $data60 != @-0.972630067@ then + return -1 +endi +if $data70 != @0.841470985@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data90 != @0.141120008@ then + return -1 +endi +print execute sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data20 != @0.598472144@ then + return -1 +endi +if $data30 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.977530118@ then + return -1 +endi +if $data50 != @-0.502557350@ then + return -1 +endi +if $data60 != @0.304810621@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data20 != @0.598472144@ then + return -1 +endi +if $data30 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.977530118@ then + return -1 +endi +if $data50 != @-0.502557350@ then + return -1 +endi +if $data60 != @0.304810621@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.304810621@ then + return -1 +endi +if $data40 != @-0.502557350@ then + return -1 +endi +if $data50 != @-0.977530118@ then + return -1 +endi +if $data60 != @-0.756802495@ then + return -1 +endi +if $data70 != @0.598472144@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data90 != @0.841470985@ then + return -1 +endi +print execute sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.502557350@ then + return -1 +endi +if $data40 != @-0.977530118@ then + return -1 +endi +if $data50 != @-0.756802495@ then + return -1 +endi +if $data60 != @0.598472144@ then + return -1 +endi +if $data70 != @0.909297427@ then + return -1 +endi +if $data80 != @0.841470985@ then + return -1 +endi +if $data90 != @0.304810621@ then + return -1 +endi +print execute sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @0.304810621@ then + return -1 +endi +if $data10 != @0.841470985@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @0.598472144@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @-0.977530118@ then + return -1 +endi +if $data60 != @-0.502557350@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select sin(a) from (select sin(c2) as a from tb1); +sql select sin(a) from (select sin(c2) as a from tb1); +if $data00 != @0.745624142@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.789072344@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.686600261@ then + return -1 +endi +if $data50 != @0.826369634@ then + return -1 +endi +if $data60 != @-0.826369634@ then + return -1 +endi +print execute sql select sin(tb1.c3),sin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select sin(tb1.c3),sin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @0.841470985@ then + return -1 +endi +if $data01 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @0.909297427@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @0.141120008@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data31 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data41 != @-0.958924275@ then + return -1 +endi +if $data50 != @0.187506554@ then + return -1 +endi +if $data51 != @-0.279415498@ then + return -1 +endi +if $data60 != @-0.187506554@ then + return -1 +endi +if $data61 != @0.656986599@ then + return -1 +endi +print execute sql select sin(c3) from tb1 union all select sin(c3) from tb2; +sql select sin(c3) from tb1 union all select sin(c3) from tb2; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.141120008@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @0.187506554@ then + return -1 +endi +if $data60 != @-0.187506554@ then + return -1 +endi +if $data70 != @0.841470985@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data90 != @0.141120008@ then + return -1 +endi diff --git a/tests/script/general/compute/math_sin2.sim b/tests/script/general/compute/math_sin2.sim new file mode 100644 index 0000000000000000000000000000000000000000..f19fa3c835fcd117260a845109cd6676d99647d8 --- /dev/null +++ b/tests/script/general/compute/math_sin2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select sin(stb1.c4),sin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select sin(stb1.c4),sin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @0.841470985@ then + return -1 +endi +if $data01 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @0.909297427@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data21 != @0.141120008@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data41 != @-0.958924275@ then + return -1 +endi +if $data50 != @-0.724916555@ then + return -1 +endi +if $data51 != @-0.279415498@ then + return -1 +endi +if $data60 != @0.724916555@ then + return -1 +endi +if $data61 != @0.656986599@ then + return -1 +endi +print execute sql select sin(c4) as a from stb1 union all select sin(c5) as a from stba; +sql select sin(c4) as a from stb1 union all select sin(c5) as a from stba; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @0.909297427@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-0.756802495@ then + return -1 +endi +if $data50 != @-0.724916555@ then + return -1 +endi +if $data60 != @0.724916555@ then + return -1 +endi +if $data70 != @0.841470985@ then + return -1 +endi +if $data80 != @0.909297427@ then + return -1 +endi +if $data90 != @0.909297427@ then + return -1 +endi +print execute sql select sin(c2) from stba; +sql select sin(c2) from stba; +if $data00 != @0.841470985@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data20 != @0.141120008@ then + return -1 +endi +if $data30 != @-0.756802495@ then + return -1 +endi +if $data40 != @-0.958924275@ then + return -1 +endi +if $data50 != @-0.279415498@ then + return -1 +endi +if $data60 != @0.656986599@ then + return -1 +endi +if $data70 != @0.989358247@ then + return -1 +endi +if $data80 != @0.412118485@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select sin(min(c2)) from tba1; +sql select sin(min(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sin(max(c2)) from tba1; +sql select sin(max(c2)) from tba1; +if $data00 != @0.412118485@ then + return -1 +endi +print execute sql select sin(count(c2)) from tba1; +sql select sin(count(c2)) from tba1; +if $data00 != @-0.988031624@ then + return -1 +endi +print execute sql select sin(sum(c2)) from tba1; +sql select sin(sum(c2)) from tba1; +if $data00 != @0.088368686@ then + return -1 +endi +print execute sql select sin(avg(c2)) from tba1; +sql select sin(avg(c2)) from tba1; +if $data00 != @-0.977530118@ then + return -1 +endi +print execute sql select sin(percentile(c2, 10)) from tba1; +sql select sin(percentile(c2, 10)) from tba1; +if $data00 != @0.783326910@ then + return -1 +endi +print execute sql select sin(apercentile(c2, 10)) from tba1; +sql select sin(apercentile(c2, 10)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sin(stddev(c2)) from tba1; +sql select sin(stddev(c2)) from tba1; +if $data00 != @0.266067654@ then + return -1 +endi +print execute sql select sin(spread(c2)) from tba1; +sql select sin(spread(c2)) from tba1; +if $data00 != @0.412118485@ then + return -1 +endi +print execute sql select sin(twa(c2)) from tba1; +sql select sin(twa(c2)) from tba1; +if $data00 != @-0.997229288@ then + return -1 +endi +print execute sql select sin(leastsquares(c2, 1, 1)) from tba1; +sql_error select sin(leastsquares(c2, 1, 1)) from tba1; +print execute sql select sin(interp(c2)) from tba1 every(1s) +sql select sin(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.841470985@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @0.909297427@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @0.141120008@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @-0.756802495@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @-0.958924275@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @-0.279415498@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.656986599@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @0.989358247@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @0.412118485@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +print execute sql select sin(interp(c2)) from stba every(1s) group by tbname; +sql select sin(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @0.841470985@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @0.909297427@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @0.141120008@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @-0.756802495@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @-0.958924275@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @-0.279415498@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.656986599@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @0.989358247@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @0.412118485@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select sin(elapsed(ts)) from tba1; +sql select sin(elapsed(ts)) from tba1; +if $data00 != @0.041773129@ then + return -1 +endi +print execute sql select sin(rate(c2)) from tba1; +sql select sin(rate(c2)) from tba1; +if $data00 != @0.781178774@ then + return -1 +endi +print execute sql select sin(irate(c2)) from tba1; +sql select sin(irate(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sin(first(c2)) from tba1; +sql select sin(first(c2)) from tba1; +if $data00 != @0.841470985@ then + return -1 +endi +print execute sql select sin(last(c2)) from tba1; +sql select sin(last(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sin(last_row(c2)) from tba1; +sql select sin(last_row(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sin(top(c2, 1)) from tba1; +sql_error select sin(top(c2, 1)) from tba1; +print execute sql select sin(bottom(c2, 1)) from tba1; +sql_error select sin(bottom(c2, 1)) from tba1; +print execute sql select sin(leastsquares(c2, 1, 1)) from tba1; +sql_error select sin(leastsquares(c2, 1, 1)) from tba1; +print execute sql select sin(derivative(c2, 1s, 0)) from tba1; +sql_error select sin(derivative(c2, 1s, 0)) from tba1; +print execute sql select sin(diff(c2)) from tba1; +sql_error select sin(diff(c2)) from tba1; +print execute sql select sin(csum(c2)) from tba1; +sql_error select sin(csum(c2)) from tba1; +print execute sql select sin(mavg(c2,2)) from tba1; +sql_error select sin(mavg(c2,2)) from tba1; +print execute sql select sin(sample(c2,2)) from tba1; +sql_error select sin(sample(c2,2)) from tba1; +print execute sql select sin(_block_dist()) from tba1; +sql_error select sin(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_sqrt.sim b/tests/script/general/compute/math_sqrt.sim new file mode 100644 index 0000000000000000000000000000000000000000..0174bfd617b043197a6e97210bbabf02b959deea --- /dev/null +++ b/tests/script/general/compute/math_sqrt.sim @@ -0,0 +1,1076 @@ + +sleep 100 +sql connect + +sql use db; + +print execute sql select sqrt(*) from tb1; +sql_error select sqrt(*) from tb1; +print execute sql select sqrt(*) from tb1; +sql_error select sqrt(*) from tb1; +print execute sql select sqrt(*) from tb1; +sql_error select sqrt(*) from tb1; +print execute sql select sqrt(*) from tb1; +sql_error select sqrt(*) from tb1; +print execute sql select sqrt(*) as a from tb1; +sql_error select sqrt(*) as a from tb1; +print execute sql select sqrt(*) + 1 as a from tb1; +sql_error select sqrt(*) + 1 as a from tb1; +print execute sql select sqrt(tb1.*) + 1 as a from tb1; +sql_error select sqrt(tb1.*) + 1 as a from tb1; +print execute sql select sqrt(*) from tb1; +sql_error select sqrt(*) from tb1; +print execute sql select sqrt(c1) from tb1; +sql_error select sqrt(c1) from tb1; +print execute sql select sqrt(c1) from tb1; +sql_error select sqrt(c1) from tb1; +print execute sql select sqrt(c1 + c2) from tb1; +sql_error select sqrt(c1 + c2) from tb1; +print execute sql select sqrt(13) from tb1; +sql select sqrt(13) from tb1; +print $data00 +if $data00 != @3.605551275@ then + return -1 +endi +if $data10 != @3.605551275@ then + return -1 +endi +if $data20 != @3.605551275@ then + return -1 +endi +if $data30 != @3.605551275@ then + return -1 +endi +if $data40 != @3.605551275@ then + return -1 +endi +if $data50 != @3.605551275@ then + return -1 +endi +if $data60 != @3.605551275@ then + return -1 +endi +print execute sql select sqrt(c1) from tb1; +sql_error select sqrt(c1) from tb1; +print execute sql select sqrt(c2) from tb1; +sql select sqrt(c2) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @11.269427670@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c3) from tb1; +sql select sqrt(c3) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @181.016573827@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c4) from tb1; +sql select sqrt(c4) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @46340.950001052@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c5) from tb1; +sql select sqrt(c5) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @3037000499.976049900@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c6) from tb1; +sql select sqrt(c6) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @18446743523953729536.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c7) from tb1; +sql select sqrt(c7) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @13407796239501851766774967791213869355880985313973964850809468725636027220114995122003349472475914321949847847995869038367123793747113409741287235182919680.000000000@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c8) from tb1; +sql_error select sqrt(c8) from tb1; +print execute sql select sqrt(c9) from tb1; +sql_error select sqrt(c9) from tb1; +print execute sql select sqrt(c10) from tb1; +sql select sqrt(c10) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @15.937377451@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(c11) from tb1; +sql select sqrt(c11) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @255.996093720@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(c12) from tb1; +sql select sqrt(c12) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @65535.999984741@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(c13) from tb1; +sql select sqrt(c13) from tb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @3037000499.976049900@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(12345678900000000000000000) from tb1; +sql_error select sqrt(12345678900000000000000000) from tb1; +print execute sql select distinct sqrt(123) from tb1; +sql_error select distinct sqrt(123) from tb1; +print execute sql select sqrt(t1) from stb1; +sql_error select sqrt(t1) from stb1; +print execute sql select sqrt(c1),avg(c3) from tb1; +sql_error select sqrt(c1),avg(c3) from tb1; +print execute sql select sqrt(c1),top(c3,1) from tb1; +sql_error select sqrt(c1),top(c3,1) from tb1; +print execute sql select sqrt(c2+c3) from tb1 session(ts, 1s); +sql_error select sqrt(c2+c3) from tb1 session(ts, 1s); +print execute sql select sqrt(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select sqrt(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select sqrt(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select sqrt(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select sqrt(c2+c3) from stb1 group by t1; +sql_error select sqrt(c2+c3) from stb1 group by t1; +print execute sql select sqrt(c2+c3) from stb1 group by ts; +sql_error select sqrt(c2+c3) from stb1 group by ts; +print execute sql select sqrt(c2+c3) from stb1 group by c1; +sql_error select sqrt(c2+c3) from stb1 group by c1; +print execute sql select sqrt(c2+c3) from stb1 group by tbname; +sql_error select sqrt(c2+c3) from stb1 group by tbname; +print execute sql select sqrt(c2+c3) from tb1 order by c2; +sql_error select sqrt(c2+c3) from tb1 order by c2; +print execute sql select sqrt(c8),sqrt(c9) from tbn; +sql_error select sqrt(c8),sqrt(c9) from tbn; +print execute sql select sqrt(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select sqrt(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select sqrt(a) from (select sqrt(c2) as a from tb1); +sql select sqrt(a) from (select sqrt(c2) as a from tb1); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.189207115@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @1.414213562@ then + return -1 +endi +if $data50 != @3.356996823@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt("abc") from tb1; +sql_error select sqrt("abc") from tb1; +print execute sql select sqrt(c2 + c3) from tb1; +sql select sqrt(c2 + c3) from tb1; +if $data00 != @1.414213562@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.828427125@ then + return -1 +endi +if $data50 != @181.367031183@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt((c2 + c3)) from tb1; +sql select sqrt((c2 + c3)) from tb1; +if $data00 != @1.414213562@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.828427125@ then + return -1 +endi +if $data50 != @181.367031183@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt((c2 * c3)+c4-6) from tb1; +sql select sqrt((c2 * c3)+c4-6) from tb1; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @3.741657387@ then + return -1 +endi +if $data50 != @46385.828115924@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(11)+c2 from tb1; +sql select sqrt(11)+c2 from tb1; +if $data00 != @4.316624790@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @5.316624790@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @7.316624790@ then + return -1 +endi +if $data50 != @130.316624790@ then + return -1 +endi +if $data60 != @-123.683375210@ then + return -1 +endi +print execute sql select sqrt(c1)+c2 from tb1; +sql_error select sqrt(c1)+c2 from tb1; +print execute sql select sqrt(c2)+11 from tb1; +sql select sqrt(c2)+11 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @12.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @13.000000000@ then + return -1 +endi +if $data50 != @22.269427670@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c1),c1,c2 from tb1; +sql_error select sqrt(c1),c1,c2 from tb1; +print execute sql select sqrt(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select sqrt(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select sqrt(c1),floor(c3) from tb1; +sql_error select sqrt(c1),floor(c3) from tb1; +print execute sql select sqrt(c1),sqrt(c2+c3) from tb1; +sql_error select sqrt(c1),sqrt(c2+c3) from tb1; +print execute sql select sqrt(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select sqrt(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @1.414213562@ then + return -1 +endi +if $data10 != @2.828427125@ then + return -1 +endi +if $data20 != @181.367031183@ then + return -1 +endi +if $data30 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c2) from tb1 order by ts desc; +sql select sqrt(c2) from tb1 order by ts desc; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @11.269427670@ then + return -1 +endi +if $data20 != @2.000000000@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @1.414213562@ then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != @1.000000000@ then + return -1 +endi +print execute sql select sqrt(c2+c3) from tb1 order by ts desc; +sql select sqrt(c2+c3) from tb1 order by ts desc; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @181.367031183@ then + return -1 +endi +if $data20 != @2.828427125@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != NULL then + return -1 +endi +if $data50 != NULL then + return -1 +endi +if $data60 != @1.414213562@ then + return -1 +endi +print execute sql select sqrt(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select sqrt(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @2.828427125@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +print execute sql select sqrt(c2) from stb1; +sql select sqrt(c2) from stb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @11.269427670@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data90 != @1.732050808@ then + return -1 +endi +print execute sql select sqrt(c2) from stb1 order by ts desc; +sql select sqrt(c2) from stb1 order by ts desc; +if $data00 != @2.645751311@ then +if $data00 != @-nan@ then + return -1 +endi +endi +if $data20 != @2.449489743@ then +if $data20 != @11.269427670@ then + return -1 +endi +endi +if $data40 != @2.236067977@ then +if $data40 != @2.000000000@ then + return -1 +endi +endi +if $data60 != @2.000000000@ then +if $data60 != NULL then + return -1 +endi +endi +if $data80 != @1.732050808@ then +if $data80 != @1.414213562@ then + return -1 +endi +endi +print execute sql select sqrt(c4),t1,c4 from stb1 order by ts desc; +sql select sqrt(c4),t1,c4 from stb1 order by ts desc; +if $data00 != @2.645751311@ then +if $data00 != @-nan@ then + return -1 +endi +endi +if $data01 != @2@ then +if $data01 != @1@ then + return -1 +endi +endi +if $data20 != @2.449489743@ then +if $data20 != @46340.950001052@ then + return -1 +endi +endi +if $data21 != @2@ then +if $data21 != @1@ then + return -1 +endi +endi +if $data40 != @2.236067977@ then +if $data40 != @2.000000000@ then + return -1 +endi +endi +if $data41 != @2@ then +if $data41 != @1@ then + return -1 +endi +endi +if $data60 != @2.000000000@ then +if $data60 != NULL then + return -1 +endi +endi +if $data61 != @2@ then +if $data61 != @1@ then + return -1 +endi +endi +if $data80 != @1.414213562@ then +if $data80 != @1.414213562@ then + return -1 +endi +endi +if $data81 != @2@ then +if $data81 != @1@ then + return -1 +endi +endi +print execute sql select sqrt(c3),tbname from stb1; +sql select sqrt(c3),tbname from stb1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @181.016573827@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @1.732050808@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select sqrt(c3),tbname from stb1 where t1 > 1; +sql select sqrt(c3),tbname from stb1 where t1 > 1; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @1.414213562@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @1.732050808@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @2.000000000@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @2.236067977@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @2.449489743@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @2.645751311@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select sqrt(c8),sqrt(c9) from tbn; +sql_error select sqrt(c8),sqrt(c9) from tbn; +print execute sql select sqrt(c8),sqrt(c9) from tbn order by ts desc; +sql_error select sqrt(c8),sqrt(c9) from tbn order by ts desc; +print execute sql select sqrt(sqrt(c8)) from tbn; +sql_error select sqrt(sqrt(c8)) from tbn; +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select sqrt(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @1.414213562@ then + return -1 +endi +if $data20 != @1.581138830@ then + return -1 +endi +if $data30 != @2.000000000@ then + return -1 +endi +if $data40 != @2.121320344@ then + return -1 +endi +if $data50 != @8.154753215@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(c2) from (select * from stb1); +sql select sqrt(c2) from (select * from stb1); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @11.269427670@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data90 != @1.732050808@ then + return -1 +endi +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @1.414213562@ then + return -1 +endi +if $data20 != @1.581138830@ then + return -1 +endi +if $data30 != @2.000000000@ then + return -1 +endi +if $data40 != @2.121320344@ then + return -1 +endi +if $data50 != @8.154753215@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @1.414213562@ then + return -1 +endi +if $data20 != @1.581138830@ then + return -1 +endi +if $data30 != @2.000000000@ then + return -1 +endi +if $data40 != @2.121320344@ then + return -1 +endi +if $data50 != @8.154753215@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @-nan@ then + return -1 +endi +if $data40 != @8.154753215@ then + return -1 +endi +if $data50 != @2.121320344@ then + return -1 +endi +if $data60 != @2.000000000@ then + return -1 +endi +if $data70 != @1.581138830@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data90 != @1.000000000@ then + return -1 +endi +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != NULL then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @8.154753215@ then + return -1 +endi +if $data40 != @2.121320344@ then + return -1 +endi +if $data50 != @2.000000000@ then + return -1 +endi +if $data60 != @1.581138830@ then + return -1 +endi +if $data70 != @1.414213562@ then + return -1 +endi +if $data80 != @1.000000000@ then + return -1 +endi +if $data90 != @-nan@ then + return -1 +endi +print execute sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @1.000000000@ then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != @1.581138830@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @2.121320344@ then + return -1 +endi +if $data60 != @8.154753215@ then + return -1 +endi +if $data70 != NULL then + return -1 +endi +if $data80 != NULL then + return -1 +endi +if $data90 != NULL then + return -1 +endi +print execute sql select sqrt(a) from (select sqrt(c2) as a from tb1); +sql select sqrt(a) from (select sqrt(c2) as a from tb1); +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.189207115@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @1.414213562@ then + return -1 +endi +if $data50 != @3.356996823@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +print execute sql select sqrt(tb1.c3),sqrt(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select sqrt(tb1.c3),sqrt(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @1.414213562@ then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data21 != @1.732050808@ then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data31 != @2.000000000@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data41 != @2.236067977@ then + return -1 +endi +if $data50 != @181.016573827@ then + return -1 +endi +if $data51 != @2.449489743@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data61 != @2.645751311@ then + return -1 +endi +print execute sql select sqrt(c3) from tb1 union all select sqrt(c3) from tb2; +sql select sqrt(c3) from tb1 union all select sqrt(c3) from tb2; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != NULL then + return -1 +endi +if $data30 != @1.732050808@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @181.016573827@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data90 != @1.732050808@ then + return -1 +endi diff --git a/tests/script/general/compute/math_sqrt2.sim b/tests/script/general/compute/math_sqrt2.sim new file mode 100644 index 0000000000000000000000000000000000000000..8b0958881e7028368b486114b40f55001685d024 --- /dev/null +++ b/tests/script/general/compute/math_sqrt2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db; + +print execute sql select sqrt(stb1.c4),sqrt(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select sqrt(stb1.c4),sqrt(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1.000000000@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data11 != @1.414213562@ then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data21 != @1.732050808@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data31 != @2.000000000@ then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data41 != @2.236067977@ then + return -1 +endi +if $data50 != @46340.950001052@ then + return -1 +endi +if $data51 != @2.449489743@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data61 != @2.645751311@ then + return -1 +endi +print execute sql select sqrt(c4) as a from stb1 union all select sqrt(c5) as a from stba; +sql select sqrt(c4) as a from stb1 union all select sqrt(c5) as a from stba; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != NULL then + return -1 +endi +if $data20 != @1.414213562@ then + return -1 +endi +if $data30 != NULL then + return -1 +endi +if $data40 != @2.000000000@ then + return -1 +endi +if $data50 != @46340.950001052@ then + return -1 +endi +if $data60 != @-nan@ then + return -1 +endi +if $data70 != @1.000000000@ then + return -1 +endi +if $data80 != @1.414213562@ then + return -1 +endi +if $data90 != @1.414213562@ then + return -1 +endi +print execute sql select sqrt(c2) from stba; +sql select sqrt(c2) from stba; +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @1.414213562@ then + return -1 +endi +if $data20 != @1.732050808@ then + return -1 +endi +if $data30 != @2.000000000@ then + return -1 +endi +if $data40 != @2.236067977@ then + return -1 +endi +if $data50 != @2.449489743@ then + return -1 +endi +if $data60 != @2.645751311@ then + return -1 +endi +if $data70 != @2.828427125@ then + return -1 +endi +if $data80 != @3.000000000@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(min(c2)) from tba1; +sql select sqrt(min(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(max(c2)) from tba1; +sql select sqrt(max(c2)) from tba1; +if $data00 != @3.000000000@ then + return -1 +endi +print execute sql select sqrt(count(c2)) from tba1; +sql select sqrt(count(c2)) from tba1; +if $data00 != @5.477225575@ then + return -1 +endi +print execute sql select sqrt(sum(c2)) from tba1; +sql select sqrt(sum(c2)) from tba1; +if $data00 != @11.618950039@ then + return -1 +endi +print execute sql select sqrt(avg(c2)) from tba1; +sql select sqrt(avg(c2)) from tba1; +if $data00 != @2.121320344@ then + return -1 +endi +print execute sql select sqrt(percentile(c2, 10)) from tba1; +sql select sqrt(percentile(c2, 10)) from tba1; +if $data00 != @0.948683298@ then + return -1 +endi +print execute sql select sqrt(apercentile(c2, 10)) from tba1; +sql select sqrt(apercentile(c2, 10)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(stddev(c2)) from tba1; +sql select sqrt(stddev(c2)) from tba1; +if $data00 != @1.694780612@ then + return -1 +endi +print execute sql select sqrt(spread(c2)) from tba1; +sql select sqrt(spread(c2)) from tba1; +if $data00 != @3.000000000@ then + return -1 +endi +print execute sql select sqrt(twa(c2)) from tba1; +sql select sqrt(twa(c2)) from tba1; +if $data00 != @2.153585623@ then + return -1 +endi +print execute sql select sqrt(leastsquares(c2, 1, 1)) from tba1; +sql_error select sqrt(leastsquares(c2, 1, 1)) from tba1; +print execute sql select sqrt(interp(c2)) from tba1 every(1s) +sql select sqrt(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @1.414213562@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @1.732050808@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @2.000000000@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @2.236067977@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @2.449489743@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @2.645751311@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @2.828427125@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @3.000000000@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(interp(c2)) from stba every(1s) group by tbname; +sql select sqrt(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @1.414213562@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @1.732050808@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @2.000000000@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @2.236067977@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @2.449489743@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @2.645751311@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @2.828427125@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @3.000000000@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select sqrt(elapsed(ts)) from tba1; +sql select sqrt(elapsed(ts)) from tba1; +if $data00 != @170.293863659@ then + return -1 +endi +print execute sql select sqrt(rate(c2)) from tba1; +sql select sqrt(rate(c2)) from tba1; +if $data00 != @0.946864153@ then + return -1 +endi +print execute sql select sqrt(irate(c2)) from tba1; +sql select sqrt(irate(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(first(c2)) from tba1; +sql select sqrt(first(c2)) from tba1; +if $data00 != @1.000000000@ then + return -1 +endi +print execute sql select sqrt(last(c2)) from tba1; +sql select sqrt(last(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(last_row(c2)) from tba1; +sql select sqrt(last_row(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select sqrt(top(c2, 1)) from tba1; +sql_error select sqrt(top(c2, 1)) from tba1; +print execute sql select sqrt(bottom(c2, 1)) from tba1; +sql_error select sqrt(bottom(c2, 1)) from tba1; +print execute sql select sqrt(leastsquares(c2, 1, 1)) from tba1; +sql_error select sqrt(leastsquares(c2, 1, 1)) from tba1; +print execute sql select sqrt(derivative(c2, 1s, 0)) from tba1; +sql_error select sqrt(derivative(c2, 1s, 0)) from tba1; +print execute sql select sqrt(diff(c2)) from tba1; +sql_error select sqrt(diff(c2)) from tba1; +print execute sql select sqrt(csum(c2)) from tba1; +sql_error select sqrt(csum(c2)) from tba1; +print execute sql select sqrt(mavg(c2,2)) from tba1; +sql_error select sqrt(mavg(c2,2)) from tba1; +print execute sql select sqrt(sample(c2,2)) from tba1; +sql_error select sqrt(sample(c2,2)) from tba1; +print execute sql select sqrt(_block_dist()) from tba1; +sql_error select sqrt(_block_dist()) from tba1; diff --git a/tests/script/general/compute/math_tan.sim b/tests/script/general/compute/math_tan.sim new file mode 100644 index 0000000000000000000000000000000000000000..d6f3a99230a716ef750e38c7f8e491e15dafc2fc --- /dev/null +++ b/tests/script/general/compute/math_tan.sim @@ -0,0 +1,1073 @@ +sleep 100 +sql connect +sql use db + +print execute sql select tan(*) from tb1; +sql_error select tan(*) from tb1; +print execute sql select tan(*) from tb1; +sql_error select tan(*) from tb1; +print execute sql select tan(*) from tb1; +sql_error select tan(*) from tb1; +print execute sql select tan(*) from tb1; +sql_error select tan(*) from tb1; +print execute sql select tan(*) as a from tb1; +sql_error select tan(*) as a from tb1; +print execute sql select tan(*) + 1 as a from tb1; +sql_error select tan(*) + 1 as a from tb1; +print execute sql select tan(tb1.*) + 1 as a from tb1; +sql_error select tan(tb1.*) + 1 as a from tb1; +print execute sql select tan(*) from tb1; +sql_error select tan(*) from tb1; +print execute sql select tan(c1) from tb1; +sql_error select tan(c1) from tb1; +print execute sql select tan(c1) from tb1; +sql_error select tan(c1) from tb1; +print execute sql select tan(c1 + c2) from tb1; +sql_error select tan(c1 + c2) from tb1; +print execute sql select tan(13) from tb1; +sql select tan(13) from tb1; +if $data00 != @0.463021133@ then + return -1 +endi +if $data10 != @0.463021133@ then + return -1 +endi +if $data20 != @0.463021133@ then + return -1 +endi +if $data30 != @0.463021133@ then + return -1 +endi +if $data40 != @0.463021133@ then + return -1 +endi +if $data50 != @0.463021133@ then + return -1 +endi +if $data60 != @0.463021133@ then + return -1 +endi +print execute sql select tan(c1) from tb1; +sql_error select tan(c1) from tb1; +print execute sql select tan(c2) from tb1; +sql select tan(c2) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @4.185891832@ then + return -1 +endi +if $data60 != @-4.185891832@ then + return -1 +endi +print execute sql select tan(c3) from tb1; +sql select tan(c3) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @0.190892344@ then + return -1 +endi +if $data60 != @-0.190892344@ then + return -1 +endi +print execute sql select tan(c4) from tb1; +sql select tan(c4) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @1.052377964@ then + return -1 +endi +if $data60 != @-1.052377964@ then + return -1 +endi +print execute sql select tan(c5) from tb1; +sql select tan(c5) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @84.739312969@ then + return -1 +endi +if $data60 != @-84.739312969@ then + return -1 +endi +print execute sql select tan(c6) from tb1; +sql select tan(c6) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @-0.611797950@ then + return -1 +endi +if $data60 != @0.611797950@ then + return -1 +endi +print execute sql select tan(c7) from tb1; +sql select tan(c7) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @1.788490683@ then + return -1 +endi +if $data60 != @-1.788490683@ then + return -1 +endi +print execute sql select tan(c8) from tb1; +sql_error select tan(c8) from tb1; +print execute sql select tan(c9) from tb1; +sql_error select tan(c9) from tb1; +print execute sql select tan(c10) from tb1; +sql select tan(c10) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @-0.506714715@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select tan(c11) from tb1; +sql select tan(c11) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @0.396223010@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select tan(c12) from tb1; +sql select tan(c12) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @-19.579238092@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select tan(c13) from tb1; +sql select tan(c13) from tb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @84.739312969@ then + return -1 +endi +if $data60 != @0.000000000@ then + return -1 +endi +print execute sql select tan(12345678900000000000000000) from tb1; +sql_error select tan(12345678900000000000000000) from tb1; +print execute sql select distinct tan(123) from tb1; +sql_error select distinct tan(123) from tb1; +print execute sql select tan(t1) from stb1; +sql_error select tan(t1) from stb1; +print execute sql select tan(c1),avg(c3) from tb1; +sql_error select tan(c1),avg(c3) from tb1; +print execute sql select tan(c1),top(c3,1) from tb1; +sql_error select tan(c1),top(c3,1) from tb1; +print execute sql select tan(c2+c3) from tb1 session(ts, 1s); +sql_error select tan(c2+c3) from tb1 session(ts, 1s); +print execute sql select tan(c2+c3) from tb1 STATE_WINDOW(c1); +sql_error select tan(c2+c3) from tb1 STATE_WINDOW(c1); +print execute sql select tan(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select tan(c2+c3) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select tan(c2+c3) from stb1 group by t1; +sql_error select tan(c2+c3) from stb1 group by t1; +print execute sql select tan(c2+c3) from stb1 group by ts; +sql_error select tan(c2+c3) from stb1 group by ts; +print execute sql select tan(c2+c3) from stb1 group by c1; +sql_error select tan(c2+c3) from stb1 group by c1; +print execute sql select tan(c2+c3) from stb1 group by tbname; +sql_error select tan(c2+c3) from stb1 group by tbname; +print execute sql select tan(c2+c3) from tb1 order by c2; +sql_error select tan(c2+c3) from tb1 order by c2; +print execute sql select tan(c8),tan(c9) from tbn; +sql_error select tan(c8),tan(c9) from tbn; +print execute sql select tan(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select tan(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select tan(a) from (select tan(c2) as a from tb1); +sql select tan(a) from (select tan(c2) as a from tb1); +if $data00 != @74.685933399@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.417928576@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @2.282204450@ then + return -1 +endi +if $data50 != @1.720515194@ then + return -1 +endi +if $data60 != @-1.720515194@ then + return -1 +endi +print execute sql select tan("abc") from tb1; +sql_error select tan("abc") from tb1; +print execute sql select tan(c2 + c3) from tb1; +sql select tan(c2 + c3) from tb1; +if $data00 != @-2.185039863@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-6.799711455@ then + return -1 +endi +if $data50 != @21.780973629@ then + return -1 +endi +if $data60 != @-21.780973629@ then + return -1 +endi +print execute sql select tan((c2 + c3)) from tb1; +sql select tan((c2 + c3)) from tb1; +if $data00 != @-2.185039863@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-6.799711455@ then + return -1 +endi +if $data50 != @21.780973629@ then + return -1 +endi +if $data60 != @-21.780973629@ then + return -1 +endi +print execute sql select tan((c2 * c3)+c4-6) from tb1; +sql select tan((c2 * c3)+c4-6) from tb1; +if $data00 != @-1.157821282@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @7.244606616@ then + return -1 +endi +if $data50 != @-0.083708953@ then + return -1 +endi +if $data60 != @7.387587308@ then + return -1 +endi +print execute sql select tan(11)+c2 from tb1; +sql select tan(11)+c2 from tb1; +if $data00 != @-224.950846454@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-223.950846454@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-221.950846454@ then + return -1 +endi +if $data50 != @-98.950846454@ then + return -1 +endi +if $data60 != @-352.950846454@ then + return -1 +endi +print execute sql select tan(c1)+c2 from tb1; +sql_error select tan(c1)+c2 from tb1; +print execute sql select tan(c2)+11 from tb1; +sql select tan(c2)+11 from tb1; +if $data00 != @12.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @8.814960137@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @12.157821282@ then + return -1 +endi +if $data50 != @15.185891832@ then + return -1 +endi +if $data60 != @6.814108168@ then + return -1 +endi +print execute sql select tan(c1),c1,c2 from tb1; +sql_error select tan(c1),c1,c2 from tb1; +print execute sql select tan(c1),t1,ts,tbname,_C0,_c0 from tb1; +sql_error select tan(c1),t1,ts,tbname,_C0,_c0 from tb1; +print execute sql select tan(c1),floor(c3) from tb1; +sql_error select tan(c1),floor(c3) from tb1; +print execute sql select tan(c1),tan(c2+c3) from tb1; +sql_error select tan(c1),tan(c2+c3) from tb1; +print execute sql select tan(c2+c3) from tb1 where c2 is not null and c3 is not null; +sql select tan(c2+c3) from tb1 where c2 is not null and c3 is not null; +if $data00 != @-2.185039863@ then + return -1 +endi +if $data10 != @-6.799711455@ then + return -1 +endi +if $data20 != @21.780973629@ then + return -1 +endi +if $data30 != @-21.780973629@ then + return -1 +endi +print execute sql select tan(c2) from tb1 order by ts desc; +sql select tan(c2) from tb1 order by ts desc; +if $data00 != @-4.185891832@ then + return -1 +endi +if $data10 != @4.185891832@ then + return -1 +endi +if $data20 != @1.157821282@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @-2.185039863@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1.557407725@ then + return -1 +endi +print execute sql select tan(c2+c3) from tb1 order by ts desc; +sql select tan(c2+c3) from tb1 order by ts desc; +if $data00 != @-21.780973629@ then + return -1 +endi +if $data10 != @21.780973629@ then + return -1 +endi +if $data20 != @-6.799711455@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @-2.185039863@ then + return -1 +endi +print execute sql select tan(c2+c3) from tb1 order by ts desc limit 3 offset 2; +sql select tan(c2+c3) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @-6.799711455@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select tan(c2) from stb1; +sql select tan(c2) from stb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @4.185891832@ then + return -1 +endi +if $data60 != @-4.185891832@ then + return -1 +endi +if $data70 != @1.557407725@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data90 != @-0.142546543@ then + return -1 +endi +print execute sql select tan(c2) from stb1 order by ts desc; +sql select tan(c2) from stb1 order by ts desc; +if $data00 != @0.871447983@ then + if $data00 != @-4.185891832@ then + return -1 + endi +endi +if $data20 != @-0.291006191@ then + if $data20 != @4.185891832@ then + return -1 + endi +endi +if $data40 != @-3.380515006@ then + if $data40 != @1.157821282@ then + return -1 + endi +endi +if $data60 != @1.157821282@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @-0.142546543@ then + if $data80 != @-2.185039863@ then + return -1 + endi +endi +print execute sql select tan(c4),t1 from stb1 order by ts desc; +sql select tan(c4),t1 from stb1 order by ts desc; +if $data00 != @0.871447983@ then + if $data00 != @-1.052377964@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @-0.291006191@ then + if $data20 != @1.052377964@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @-3.380515006@ then + if $data40 != @1.157821282@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @1.157821282@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @-2.185039863@ then + if $data80 != @-2.185039863@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select tan(c3),tbname from stb1; +sql select tan(c3),tbname from stb1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @0.190892344@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @-0.190892344@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1.557407725@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @-0.142546543@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select tan(c3),tbname from stb1 where t1 > 1; +sql select tan(c3),tbname from stb1 where t1 > 1; +if $data00 != @1.557407725@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @-2.185039863@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @-0.142546543@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @1.157821282@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @-3.380515006@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @-0.291006191@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @0.871447983@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select tan(c8),tan(c9) from tbn; +sql_error select tan(c8),tan(c9) from tbn; +print execute sql select tan(c8),tan(c9) from tbn order by ts desc; +sql_error select tan(c8),tan(c9) from tbn order by ts desc; +print execute sql select tan(tan(c8)) from tbn; +sql_error select tan(tan(c8)) from tbn; +print execute sql select tan(a) from (select avg(c2) as a from stb1 interval(1s)); +sql select tan(a) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @-2.185039863@ then + return -1 +endi +if $data20 != @-0.747022297@ then + return -1 +endi +if $data30 != @1.157821282@ then + return -1 +endi +if $data40 != @4.637332055@ then + return -1 +endi +if $data50 != @0.581297682@ then + return -1 +endi +if $data60 != @-0.320040389@ then + return -1 +endi +print execute sql select tan(c2) from (select * from stb1); +sql select tan(c2) from (select * from stb1); +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @4.185891832@ then + return -1 +endi +if $data60 != @-4.185891832@ then + return -1 +endi +if $data70 != @1.557407725@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data90 != @-0.142546543@ then + return -1 +endi +print execute sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @-2.185039863@ then + return -1 +endi +if $data20 != @-0.747022297@ then + return -1 +endi +if $data30 != @1.157821282@ then + return -1 +endi +if $data40 != @4.637332055@ then + return -1 +endi +if $data50 != @0.581297682@ then + return -1 +endi +if $data60 != @-0.320040389@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @-2.185039863@ then + return -1 +endi +if $data20 != @-0.747022297@ then + return -1 +endi +if $data30 != @1.157821282@ then + return -1 +endi +if $data40 != @4.637332055@ then + return -1 +endi +if $data50 != @0.581297682@ then + return -1 +endi +if $data60 != @-0.320040389@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.320040389@ then + return -1 +endi +if $data40 != @0.581297682@ then + return -1 +endi +if $data50 != @4.637332055@ then + return -1 +endi +if $data60 != @1.157821282@ then + return -1 +endi +if $data70 != @-0.747022297@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data90 != @1.557407725@ then + return -1 +endi +print execute sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0.581297682@ then + return -1 +endi +if $data40 != @4.637332055@ then + return -1 +endi +if $data50 != @1.157821282@ then + return -1 +endi +if $data60 != @-0.747022297@ then + return -1 +endi +if $data70 != @-2.185039863@ then + return -1 +endi +if $data80 != @1.557407725@ then + return -1 +endi +if $data90 != @-0.320040389@ then + return -1 +endi +print execute sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @-0.320040389@ then + return -1 +endi +if $data10 != @1.557407725@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @-0.747022297@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @4.637332055@ then + return -1 +endi +if $data60 != @0.581297682@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select tan(a) from (select tan(c2) as a from tb1); +sql select tan(a) from (select tan(c2) as a from tb1); +if $data00 != @74.685933399@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1.417928576@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @2.282204450@ then + return -1 +endi +if $data50 != @1.720515194@ then + return -1 +endi +if $data60 != @-1.720515194@ then + return -1 +endi +print execute sql select tan(tb1.c3),tan(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +sql select tan(tb1.c3),tan(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1.557407725@ then + return -1 +endi +if $data01 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @-2.185039863@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @-0.142546543@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data31 != @1.157821282@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data41 != @-3.380515006@ then + return -1 +endi +if $data50 != @0.190892344@ then + return -1 +endi +if $data51 != @-0.291006191@ then + return -1 +endi +if $data60 != @-0.190892344@ then + return -1 +endi +if $data61 != @0.871447983@ then + return -1 +endi +print execute sql select tan(c3) from tb1 union all select tan(c3) from tb2; +sql select tan(c3) from tb1 union all select tan(c3) from tb2; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-0.142546543@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @0.190892344@ then + return -1 +endi +if $data60 != @-0.190892344@ then + return -1 +endi +if $data70 != @1.557407725@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data90 != @-0.142546543@ then + return -1 +endi diff --git a/tests/script/general/compute/math_tan2.sim b/tests/script/general/compute/math_tan2.sim new file mode 100644 index 0000000000000000000000000000000000000000..23e4803c6d9c6e8d09eed5bbaac8f3df44287d00 --- /dev/null +++ b/tests/script/general/compute/math_tan2.sim @@ -0,0 +1,366 @@ +sleep 100 +sql connect +sql use db + +print execute sql select tan(stb1.c4),tan(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select tan(stb1.c4),tan(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1.557407725@ then + return -1 +endi +if $data01 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @-2.185039863@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data21 != @-0.142546543@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @1.157821282@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data41 != @-3.380515006@ then + return -1 +endi +if $data50 != @1.052377964@ then + return -1 +endi +if $data51 != @-0.291006191@ then + return -1 +endi +if $data60 != @-1.052377964@ then + return -1 +endi +if $data61 != @0.871447983@ then + return -1 +endi +print execute sql select tan(c4) as a from stb1 union all select tan(c5) as a from stba; +sql select tan(c4) as a from stb1 union all select tan(c5) as a from stba; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @-2.185039863@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1.157821282@ then + return -1 +endi +if $data50 != @1.052377964@ then + return -1 +endi +if $data60 != @-1.052377964@ then + return -1 +endi +if $data70 != @1.557407725@ then + return -1 +endi +if $data80 != @-2.185039863@ then + return -1 +endi +if $data90 != @-2.185039863@ then + return -1 +endi +print execute sql select tan(c2) from stba; +sql select tan(c2) from stba; +if $data00 != @1.557407725@ then + return -1 +endi +if $data10 != @-2.185039863@ then + return -1 +endi +if $data20 != @-0.142546543@ then + return -1 +endi +if $data30 != @1.157821282@ then + return -1 +endi +if $data40 != @-3.380515006@ then + return -1 +endi +if $data50 != @-0.291006191@ then + return -1 +endi +if $data60 != @0.871447983@ then + return -1 +endi +if $data70 != @-6.799711455@ then + return -1 +endi +if $data80 != @-0.452315659@ then + return -1 +endi +if $data90 != @0.000000000@ then + return -1 +endi +print execute sql select tan(min(c2)) from tba1; +sql select tan(min(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select tan(max(c2)) from tba1; +sql select tan(max(c2)) from tba1; +if $data00 != @-0.452315659@ then + return -1 +endi +print execute sql select tan(count(c2)) from tba1; +sql select tan(count(c2)) from tba1; +if $data00 != @-6.405331197@ then + return -1 +endi +print execute sql select tan(sum(c2)) from tba1; +sql select tan(sum(c2)) from tba1; +if $data00 != @-0.088715757@ then + return -1 +endi +print execute sql select tan(avg(c2)) from tba1; +sql select tan(avg(c2)) from tba1; +if $data00 != @4.637332055@ then + return -1 +endi +print execute sql select tan(percentile(c2, 10)) from tba1; +sql select tan(percentile(c2, 10)) from tba1; +if $data00 != @1.260158218@ then + return -1 +endi +print execute sql select tan(apercentile(c2, 10)) from tba1; +sql select tan(apercentile(c2, 10)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select tan(stddev(c2)) from tba1; +sql select tan(stddev(c2)) from tba1; +if $data00 != @-0.276016859@ then + return -1 +endi +print execute sql select tan(spread(c2)) from tba1; +sql select tan(spread(c2)) from tba1; +if $data00 != @-0.452315659@ then + return -1 +endi +print execute sql select tan(twa(c2)) from tba1; +sql select tan(twa(c2)) from tba1; +if $data00 != @13.405571552@ then + return -1 +endi +print execute sql select tan(leastsquares(c2, 1, 1)) from tba1; +sql_error select tan(leastsquares(c2, 1, 1)) from tba1; +print execute sql select tan(interp(c2)) from tba1 every(1s) +sql select tan(interp(c2)) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.557407725@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @-2.185039863@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @-0.142546543@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @1.157821282@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @-3.380515006@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @-0.291006191@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.871447983@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @-6.799711455@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @-0.452315659@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +print execute sql select tan(interp(c2)) from stba every(1s) group by tbname; +sql select tan(interp(c2)) from stba every(1s) group by tbname; +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1.557407725@ then + return -1 +endi +if $data02 != @tba1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @-2.185039863@ then + return -1 +endi +if $data12 != @tba1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @-0.142546543@ then + return -1 +endi +if $data22 != @tba1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @1.157821282@ then + return -1 +endi +if $data32 != @tba1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @-3.380515006@ then + return -1 +endi +if $data42 != @tba1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @-0.291006191@ then + return -1 +endi +if $data52 != @tba1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @0.871447983@ then + return -1 +endi +if $data62 != @tba1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @-6.799711455@ then + return -1 +endi +if $data72 != @tba1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @-0.452315659@ then + return -1 +endi +if $data82 != @tba1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @0.000000000@ then + return -1 +endi +if $data92 != @tba1@ then + return -1 +endi +print execute sql select tan(elapsed(ts)) from tba1; +sql select tan(elapsed(ts)) from tba1; +if $data00 != @-0.041809624@ then + return -1 +endi +print execute sql select tan(rate(c2)) from tba1; +sql select tan(rate(c2)) from tba1; +if $data00 != @1.251272660@ then + return -1 +endi +print execute sql select tan(irate(c2)) from tba1; +sql select tan(irate(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select tan(first(c2)) from tba1; +sql select tan(first(c2)) from tba1; +if $data00 != @1.557407725@ then + return -1 +endi +print execute sql select tan(last(c2)) from tba1; +sql select tan(last(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select tan(last_row(c2)) from tba1; +sql select tan(last_row(c2)) from tba1; +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select tan(top(c2, 1)) from tba1; +sql_error select tan(top(c2, 1)) from tba1; +print execute sql select tan(bottom(c2, 1)) from tba1; +sql_error select tan(bottom(c2, 1)) from tba1; +print execute sql select tan(leastsquares(c2, 1, 1)) from tba1; +sql_error select tan(leastsquares(c2, 1, 1)) from tba1; +print execute sql select tan(derivative(c2, 1s, 0)) from tba1; +sql_error select tan(derivative(c2, 1s, 0)) from tba1; +print execute sql select tan(diff(c2)) from tba1; +sql_error select tan(diff(c2)) from tba1; +print execute sql select tan(csum(c2)) from tba1; +sql_error select tan(csum(c2)) from tba1; +print execute sql select tan(mavg(c2,2)) from tba1; +sql_error select tan(mavg(c2,2)) from tba1; +print execute sql select tan(sample(c2,2)) from tba1; +sql_error select tan(sample(c2,2)) from tba1; +print execute sql select tan(_block_dist()) from tba1; +sql_error select tan(_block_dist()) from tba1; diff --git a/tests/script/general/compute/scalar_pow.sim b/tests/script/general/compute/scalar_pow.sim new file mode 100644 index 0000000000000000000000000000000000000000..1ca20c93412e9ad649f68e184f616c14d526c05c --- /dev/null +++ b/tests/script/general/compute/scalar_pow.sim @@ -0,0 +1,525 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 500 +sql connect + +$dbPrefix = db +$tbPrefix = ct +$mtPrefix = st +$tbNum = 2 +$rowNum = 50 + +print =============== step1 create stable/table +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0.25 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , $x , $y , $x , $x , $x , $y , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + endw + + $i = $i + 1 +endw + +print ================= step2 +$i = 1 +$tb = $tbPrefix . $i + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $tb + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi + +print ===> $data90 , $data91 , $data92 , $data93 , $data94 , $data95 +if $data94 != 813.169925001 then + return -1 +endi + +if $data95 != 19764.000000000 then + return -1 +endi + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $tb where ts == 1601481600000 + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi + +$stb = $mtPrefix . 0 +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $stb where ts == 1601481600000 + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $stb where c1 == 0 + +print ===> $data01 , $data02 , $data03 , $data04 , $data05 +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $stb where c1 == 0 +print ===> $data01 , $data02 , $data03 , $data04 , $data05 + +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $stb order by ts desc + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 + +if $data00 != 49 then + return -1 +endi +if $data01 != 5.614709844 then + return -1 +endi +if $data02 != 2401.000000000 then + return -1 +endi +if $data03 != 2403.000000000 then + return -1 +endi +if $data04 != 120055.614709844 then + return -1 +endi +if $data05 != 40356008.000000000 then + return -1 +endi + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from $tb order by ts limit 2; +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +print ===> $data10 , $data11 , $data12 , $data13 , $data14 , $data15 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 1.000000000 then + return -1 +endi +if $data13 != 3.000000000 then + return -1 +endi +if $data14 != 2.000000000 then + return -1 +endi +if $data15 != 2.000000000 then + return -1 +endi +print execute sql select sqrt(abs(log(c1,2)-pow(c1,2)))+2 from $tb order by ts limit 2 +sql select sqrt(abs(log(c1,2)-pow(c1,2)))+2 from $tb order by ts limit 2 +print $data00 , $ data10 +if $data00 != @inf@ then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi + +print ===============> step 3 sql_error stable, group by, window +sql_error select log(c2,2) from $stb group by tbname; + +sql_error select log(c2,2) from $stb group by tgcol; + +sql_error select log(c2,2) from $stb group by c3; + +sql_error select log(c2,2) from $stb interval(1m); + +sql_error select log(c2,2) from $stb state_window(c7); + +sql_error select log(c2,2) from $tb state_window(c7); + +sql_error select log(c2,2) from $stb session(ts, 30s); + +sql_error select log(c2,2) from $tb session(ts, 30s); + +sql_error select log(c2,2) from $stb slimit 2; + +sql_error select pow(c2,2) from $stb group by tbname; + +sql_error select pow(c2,2) from $stb group by tgcol; + +sql_error select pow(c2,2) from $stb group by c3; + +sql_error select pow(c2,2) from $stb interval(1m); + +sql_error select pow(c2,2) from $stb state_window(c7); + +sql_error select pow(c2,2) from $tb state_window(c7); + +sql_error select pow(c2,2) from $stb session(ts, 30s); + +sql_error select pow(c2,2) from $tb session(ts, 30s); + +sql_error select pow(c2,2) from $stb slimit 2; + +sql_error select pow(c2,2) from $stb interval(1m) slimit 2; +print ===============> step 4 nested query +print ===============> step 4.1 select expr... from (select * from super_tb order by ts desc) +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $stb order by ts desc) + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 49 then + return -1 +endi +if $data01 != 5.614709844 then + return -1 +endi +if $data02 != 2401.000000000 then + return -1 +endi +if $data03 != 2403.000000000 then + return -1 +endi +if $data04 != 120055.614709844 then + return -1 +endi +if $data05 != 40356008.000000000 then + return -1 +endi + +print ==============> step 4.2 select expr... from (select * from child_tb order by ts); + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $tb order by ts limit 2); +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +print ===> $data10 , $data11 , $data12 , $data13 , $data14 , $data15 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 1.000000000 then + return -1 +endi +if $data13 != 3.000000000 then + return -1 +endi +if $data14 != 2.000000000 then + return -1 +endi +if $data15 != 2.000000000 then + return -1 +endi + +print ====> step 4.3 select exprs... from (select * from super_tb ) order by ts desc; +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $stb ) order by ts desc +if $data00 != 49 then + return -1 +endi +if $data01 != 5.614709844 then + return -1 +endi +if $data02 != 2401.000000000 then + return -1 +endi +if $data03 != 2403.000000000 then + return -1 +endi +if $data04 != 120055.614709844 then + return -1 +endi +if $data05 != 40356008.000000000 then + return -1 +endi +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $stb ) +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi + +print ====> step 4.4 select exprs... from (select * from child_tb) order by ts limit 2; + +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $tb ) order by ts limit 2; +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +print ===> $data10 , $data11 , $data12 , $data13 , $data14 , $data15 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 1.000000000 then + return -1 +endi +if $data13 != 3.000000000 then + return -1 +endi +if $data14 != 2.000000000 then + return -1 +endi +if $data15 != 2.000000000 then + return -1 +endi +sql select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6 from (select * from $tb ) limit 2; +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +print ===> $data10 , $data11 , $data12 , $data13 , $data14 , $data15 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 1.000000000 then + return -1 +endi +if $data13 != 3.000000000 then + return -1 +endi +if $data14 != 2.000000000 then + return -1 +endi +if $data15 != 2.000000000 then + return -1 +endi + +print ===============> step 4.5 select * from (select expr... from super_tb order by ts desc) +sql select * from (select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6, ts from $stb order by ts desc) + +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +if $data00 != 49 then + return -1 +endi +if $data01 != 5.614709844 then + return -1 +endi +if $data02 != 2401.000000000 then + return -1 +endi +if $data03 != 2403.000000000 then + return -1 +endi +if $data04 != 120055.614709844 then + return -1 +endi +if $data05 != 40356008.000000000 then + return -1 +endi + +print ==============> step 4.6 select * from (select expr... from child_tb order by ts); + +sql select * from (select c1, log(c2, 2), pow(c1,2), pow(c2,2)+2, pow(c2,3)+log(c3, 2)+pow(c5,2) as v4, pow(c4, 4.5)+pow(c3, 2), log(c1,2)+log(c3,4)+pow(c6,2.8)+2 as v6, ts from $tb order by ts limit 2); +print ===> $data00 , $data01 , $data02 , $data03 , $data04 , $data05 +print ===> $data10 , $data11 , $data12 , $data13 , $data14 , $data15 +if $data00 != 0 then + return -1 +endi +if $data01 != -2.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 2.062500000 then + return -1 +endi +if $data04 != -inf then + return -1 +endi +if $data05 != 0.000000000 then + return -1 +endi +if $data10 != 1 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 1.000000000 then + return -1 +endi +if $data13 != 3.000000000 then + return -1 +endi +if $data14 != 2.000000000 then + return -1 +endi +if $data15 != 2.000000000 then + return -1 +endi + + +print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/scalar_str_concat_len.sim b/tests/script/general/compute/scalar_str_concat_len.sim new file mode 100644 index 0000000000000000000000000000000000000000..bce47896b00cfed41c509c6af2819341aab27b14 --- /dev/null +++ b/tests/script/general/compute/scalar_str_concat_len.sim @@ -0,0 +1,828 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 500 +sql connect + +$dbPrefix = db +$tbPrefix = ct +$mtPrefix = st +$quote = ' +$tbNum = 2 +$rowNum = 50 + +print =============== step1 create stable/table +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 binary(10), c3 binary(30), c4 binary(40), c5 binary(50), c6 nchar(10), c7 nchar(20), c8 nchar(30), c9 nchar(40)) TAGS (tgcol int) +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + + $z2 = $x . 2 + $y2 = $quote . $z2 + $y2 = $y2 . $quote + + $z3 = $x . 3 + $y3 = $quote . $z3 + $y3 = $y3 . $quote + + $z4 = $x . 4 + $y4 = $quote . $z4 + $y4 = $y4 . $quote + + $z5 = $x . 5 + $y5 = $quote . $z5 + $y5 = $y5 . $quote + + $z6 = $x . 6 + $y6 = $quote . $z6 + $y6 = $y6 . $quote + + $z7 = $x . 7 + $y7 = $quote . $z7 + $y7 = $y7 . $quote + + $z8 = $x . 8 + $y8 = $quote . $z8 + $y8 = $y8 . $quote + + $z9 = $x . 9 + $y9 = $quote . $z9 + $y9 = $y9 . $quote + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into $tb values ($ms , $x , $y2 , $y3 , $y4 , $y5 , $y6 , $y7 , $y8 , $y9 ) + $x = $x + 1 + $z2 = $x . 2 + $y2 = $quote . $z2 + $y2 = $y2 . $quote + + $z3 = $x . 3 + $y3 = $quote . $z3 + $y3 = $y3 . $quote + + $z4 = $x . 4 + $y4 = $quote . $z4 + $y4 = $y4 . $quote + + $z5 = $x . 5 + $y5 = $quote . $z5 + $y5 = $y5 . $quote + + $z6 = $x . 6 + $y6 = $quote . $z6 + $y6 = $y6 . $quote + + $z7 = $x . 7 + $y7 = $quote . $z7 + $y7 = $y7 . $quote + + $z8 = $x . 8 + $y8 = $quote . $z8 + $y8 = $y8 . $quote + + $z9 = $x . 9 + $y9 = $quote . $z9 + $y9 = $y9 . $quote + endw + + $i = $i + 1 +endw + +print ================= step2 +$i = 1 +$tb = $tbPrefix . $i +$stb = $mtPrefix . 0 + +print sql select concat(c2, c3, c4, c5) from $tb +sql select concat(c2, c3, c4, c5) from $tb +print $data00 +if $data00 != 02030405 then + return -1 +endi + +print sql select concat_ws('data',c2,c3,c4,c5) from $tb +sql select concat_ws('data',c2,c3,c4,c5) from $tb +print $data00 + +if $data00 != 02data03data04data05 then + return -1 +endi +print sql select concat(c6, c7, c8, c9) from $tb +sql select concat(c6, c7, c8, c9) from $tb +print $data00 +if $data00 != 06070809 then + return -1 +endi + +print sql select concat_ws('data' ,c6,c7,c8,c9) from $tb +sql select concat_ws('data' ,c6,c7,c8,c9) from $tb +print $data00 + +if $data00 != 06data07data08data09 then + return -1 +endi + +print sql select length(concat(c2, c3, c4, c5)) from $tb +sql select length(concat(c2, c3, c4, c5)) from $tb +print $data00 +if $data00 != 8 then + return -1 +endi + +print sql select char_length(concat(c2, c3, c4, c5)) from $tb +sql select char_length(concat(c2, c3, c4, c5)) from $tb +print $data00 +if $data00 != 8 then + return -1 +endi + +print sql select length(concat_ws('data',c2,c3,c4,c5)) from $tb +sql select length(concat_ws('data',c2,c3,c4,c5)) from $tb +print $data00 + +if $data00 != 20 then + return -1 +endi + +print sql select char_length(concat_ws('data',c2,c3,c4,c5)) from $tb +sql select char_length(concat_ws('data',c2,c3,c4,c5)) from $tb +print $data00 + +if $data00 != 20 then + return -1 +endi + +print sql select length(concat(c6, c7, c8, c9)) from $tb +sql select length(concat(c6, c7, c8, c9)) from $tb +print $data00 +if $data00 != 32 then + return -1 +endi + +print sql select char_length(concat(c6, c7, c8, c9)) from $tb +sql select char_length(concat(c6, c7, c8, c9)) from $tb +print $data00 +if $data00 != 8 then + return -1 +endi + +print sql select length(concat_ws('data' ,c6,c7,c8,c9)) from $tb +sql select length(concat_ws('data' ,c6,c7,c8,c9)) from $tb +print $data00 + +if $data00 != 80 then + return -1 +endi + +print sql select char_length(concat_ws('data', c6,c7,c8,c9)) from $tb +sql select char_length(concat_ws('data', c6, c7, c8, c9)) from $tb +print $data00 +if $data00 != 20 then + return -1 +endi + +print sql_error select concat(c1, c2, c3, c4, c5) from $tb +sql_error select concat(c1, c2, c3, c4, c5) from $tb +print sql_error select concat_ws('data',c1,c2,c3,c4,c5) from $tb +sql_error select concat_ws('data',c1,c2,c3,c4,c5) from $tb + +print ===============> step 3 sql_error stable, group by, window +sql_error select concat(c2) from $stb group by tbname; + +sql_error select concat(c2) from $stb group by tgcol; + +sql_error select concat(c2) from $stb group by c3; + +sql_error select concat(c2) from $stb interval(1m); + +sql_error select concat(c2) from $stb state_window(c7); + +sql_error select concat(c2) from $tb state_window(c7); + +sql_error select concat(c2) from $stb session(ts, 30s); + +sql_error select concat(c2) from $tb session(ts, 30s); + +sql_error select concat(c2) from $stb slimit 2; + +sql_error select concat(c2) from $stb interval(1m) slimit 2; + +sql_error select length(c2) from $stb group by tbname; + +sql_error select length(c2) from $stb group by tgcol; + +sql_error select length(c2) from $stb group by c3; + +sql_error select length(c2) from $stb interval(1m); + +sql_error select length(c2) from $stb state_window(c7); + +sql_error select length(c2) from $tb state_window(c7); + +sql_error select length(c2) from $stb session(ts, 30s); + +sql_error select length(c2) from $tb session(ts, 30s); + +sql_error select length(c2) from $stb slimit 2; + +sql_error select length(c2) from $stb interval(1m) slimit 2; + +sql_error select concat_ws(c2) from $stb group by tbname; + +sql_error select concat_ws(c2) from $stb group by tgcol; + +sql_error select concat_ws(c2) from $stb group by c3; + +sql_error select concat_ws(c2) from $stb interval(1m); + +sql_error select concat_ws(c2) from $stb state_window(c7); + +sql_error select concat_ws(c2) from $tb state_window(c7); + +sql_error select concat_ws(c2) from $stb session(ts, 30s); + +sql_error select concat_ws(c2) from $tb session(ts, 30s); + +sql_error select concat_ws(c2) from $stb slimit 2; + +sql_error select concat_ws(c2) from $stb interval(1m) slimit 2; + +sql_error select char_length(c2) from $stb group by tbname; + +sql_error select char_length(c2) from $stb group by tgcol; + +sql_error select char_length(c2) from $stb group by c3; + +sql_error select char_length(c2) from $stb interval(1m); + +sql_error select char_length(c2) from $stb state_window(c7); + +sql_error select char_length(c2) from $tb state_window(c7); + +sql_error select char_length(c2) from $stb session(ts, 30s); + +sql_error select char_length(c2) from $tb session(ts, 30s); + +sql_error select char_length(c2) from $stb slimit 2; + +sql_error select char_length(c2) from $stb interval(1m) slimit 2; + +print =============== trival test + +print execute sql select concat(c2,c3),concat(c2,c3,c4),concat(c2,c3,c4,c5) from ct1 + +sql select concat(c2,c3),concat(c2,c3,c4),concat(c2,c3,c4,c5) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @0203@ then + return -1 +endi +if $data01 != @020304@ then + return -1 +endi +if $data02 != @02030405@ then + return -1 +endi +if $data10 != @1213@ then + return -1 +endi +if $data11 != @121314@ then + return -1 +endi +if $data12 != @12131415@ then + return -1 +endi +print execute sql select concat('taos',c2,c3),concat('taos',c2,c4),concat('taos',c2,c5),concat('taos',c3,c4),concat('taos',c3,c5) from ct1 + +sql select concat('taos',c2,c3),concat('taos',c2,c4),concat('taos',c2,c5),concat('taos',c3,c4),concat('taos',c3,c5) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @taos0203@ then + return -1 +endi +if $data01 != @taos0204@ then + return -1 +endi +if $data02 != @taos0205@ then + return -1 +endi +if $data03 != @taos0304@ then + return -1 +endi +if $data04 != @taos0305@ then + return -1 +endi +if $data10 != @taos1213@ then + return -1 +endi +if $data11 != @taos1214@ then + return -1 +endi +if $data12 != @taos1215@ then + return -1 +endi +if $data13 != @taos1314@ then + return -1 +endi +if $data14 != @taos1315@ then + return -1 +endi +print execute sql select concat(c6,c7,'taos'),concat(c6,c8,'taos'),concat(c6,c9,'taos'),concat(c7,c8,'taos'),concat(c7,c9,'taos') from ct1 + +sql select concat(c6,c7,'taos'),concat(c6,c8,'taos'),concat(c6,c9,'taos'),concat(c7,c8,'taos'),concat(c7,c9,'taos') from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @0607taos@ then + return -1 +endi +if $data01 != @0608taos@ then + return -1 +endi +if $data02 != @0609taos@ then + return -1 +endi +if $data03 != @0708taos@ then + return -1 +endi +if $data04 != @0709taos@ then + return -1 +endi +if $data10 != @1617taos@ then + return -1 +endi +if $data11 != @1618taos@ then + return -1 +endi +if $data12 != @1619taos@ then + return -1 +endi +if $data13 != @1718taos@ then + return -1 +endi +if $data14 != @1719taos@ then + return -1 +endi +print execute sql select concat('data',c7,'taos'),concat('data',c8,'taos'),concat('data',c9,'taos'),concat(c7,c8,'taos'),concat(c7,c9,'taos') from ct1 + +sql select concat('data',c7,'taos'),concat('data',c8,'taos'),concat('data',c9,'taos'),concat(c7,c8,'taos'),concat(c7,c9,'taos') from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @data07taos@ then + return -1 +endi +if $data01 != @data08taos@ then + return -1 +endi +if $data02 != @data09taos@ then + return -1 +endi +if $data03 != @0708taos@ then + return -1 +endi +if $data04 != @0709taos@ then + return -1 +endi +if $data10 != @data17taos@ then + return -1 +endi +if $data11 != @data18taos@ then + return -1 +endi +if $data12 != @data19taos@ then + return -1 +endi +if $data13 != @1718taos@ then + return -1 +endi +if $data14 != @1719taos@ then + return -1 +endi +print execute sql select concat_ws('jeff',c2,c3),concat_ws('jeff',c2,c3,c4),concat_ws('jeff',c2,c3,c4,c5) from ct1 + +sql select concat_ws('jeff',c2,c3),concat_ws('jeff',c2,c3,c4),concat_ws('jeff',c2,c3,c4,c5) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @02jeff03@ then + return -1 +endi +if $data01 != @02jeff03jeff04@ then + return -1 +endi +if $data02 != @02jeff03jeff04jeff05@ then + return -1 +endi +if $data10 != @12jeff13@ then + return -1 +endi +if $data11 != @12jeff13jeff14@ then + return -1 +endi +if $data12 != @12jeff13jeff14jeff15@ then + return -1 +endi +print execute sql select concat_ws('jeff','taos',c2,c3),concat_ws('jeff','taos',c2,c4),concat_ws('jeff','taos',c2,c5),concat_ws('jeff','taos',c3,c4),concat_ws('jeff','taos',c3,c5) from ct1 + +sql select concat_ws('jeff','taos',c2,c3),concat_ws('jeff','taos',c2,c4),concat_ws('jeff','taos',c2,c5),concat_ws('jeff','taos',c3,c4),concat_ws('jeff','taos',c3,c5) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @taosjeff02jeff03@ then + return -1 +endi +if $data01 != @taosjeff02jeff04@ then + return -1 +endi +if $data02 != @taosjeff02jeff05@ then + return -1 +endi +if $data03 != @taosjeff03jeff04@ then + return -1 +endi +if $data04 != @taosjeff03jeff05@ then + return -1 +endi +if $data10 != @taosjeff12jeff13@ then + return -1 +endi +if $data11 != @taosjeff12jeff14@ then + return -1 +endi +if $data12 != @taosjeff12jeff15@ then + return -1 +endi +if $data13 != @taosjeff13jeff14@ then + return -1 +endi +if $data14 != @taosjeff13jeff15@ then + return -1 +endi +print execute sql select concat_ws('jeff','data',c3),concat_ws('jeff','data',c3,c4),concat_ws('jeff','data',c3,c4,c5) from ct1 + +sql select concat_ws('jeff','data',c3),concat_ws('jeff','data',c3,c4),concat_ws('jeff','data',c3,c4,c5) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @datajeff03@ then + return -1 +endi +if $data01 != @datajeff03jeff04@ then + return -1 +endi +if $data02 != @datajeff03jeff04jeff05@ then + return -1 +endi +if $data10 != @datajeff13@ then + return -1 +endi +if $data11 != @datajeff13jeff14@ then + return -1 +endi +if $data12 != @datajeff13jeff14jeff15@ then + return -1 +endi +print execute sql select concat_ws('jeff','data',c7,'taos'),concat_ws('jeff','data',c8,'taos'),concat_ws('jeff','data',c9,'taos'),concat_ws('jeff',c7,c8,'taos'),concat_ws('jeff',c7,c9,'taos') from ct1 + +sql select concat_ws('jeff','data',c7,'taos'),concat_ws('jeff','data',c8,'taos'),concat_ws('jeff','data',c9,'taos'),concat_ws('jeff',c7,c8,'taos'),concat_ws('jeff',c7,c9,'taos') from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @datajeff07jefftaos@ then + return -1 +endi +if $data01 != @datajeff08jefftaos@ then + return -1 +endi +if $data02 != @datajeff09jefftaos@ then + return -1 +endi +if $data03 != @07jeff08jefftaos@ then + return -1 +endi +if $data04 != @07jeff09jefftaos@ then + return -1 +endi +if $data10 != @datajeff17jefftaos@ then + return -1 +endi +if $data11 != @datajeff18jefftaos@ then + return -1 +endi +if $data12 != @datajeff19jefftaos@ then + return -1 +endi +if $data13 != @17jeff18jefftaos@ then + return -1 +endi +if $data14 != @17jeff19jefftaos@ then + return -1 +endi +print execute sql select length(concat(c2,c3)),length(concat(c2,c3,c4)),length(concat(c2,c3,c4,c5)) from ct1 + +sql select length(concat(c2,c3)),length(concat(c2,c3,c4)),length(concat(c2,c3,c4,c5)) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @4@ then + return -1 +endi +if $data01 != @6@ then + return -1 +endi +if $data02 != @8@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data11 != @6@ then + return -1 +endi +if $data12 != @8@ then + return -1 +endi +print execute sql select length(concat(c6,c7,'taos')),length(concat(c6,c8,'taos')),length(concat(c6,c9,'taos')),length(concat(c7,c8,'taos')),length(concat(c7,c9,'taos')) from ct1 + +sql select length(concat(c6,c7,'taos')),length(concat(c6,c8,'taos')),length(concat(c6,c9,'taos')),length(concat(c7,c8,'taos')),length(concat(c7,c9,'taos')) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @32@ then + return -1 +endi +if $data01 != @32@ then + return -1 +endi +if $data02 != @32@ then + return -1 +endi +if $data03 != @32@ then + return -1 +endi +if $data04 != @32@ then + return -1 +endi +if $data10 != @32@ then + return -1 +endi +if $data11 != @32@ then + return -1 +endi +if $data12 != @32@ then + return -1 +endi +if $data13 != @32@ then + return -1 +endi +if $data14 != @32@ then + return -1 +endi +print execute sql select length(concat_ws('jeff','taos',c2,c3)),length(concat_ws('jeff','taos',c2,c4)),length(concat_ws('jeff','taos',c2,c5)),length(concat_ws('jeff','taos',c3,c4)),length(concat_ws('jeff','taos',c3,c5)) from ct1 + +sql select length(concat_ws('jeff','taos',c2,c3)),length(concat_ws('jeff','taos',c2,c4)),length(concat_ws('jeff','taos',c2,c5)),length(concat_ws('jeff','taos',c3,c4)),length(concat_ws('jeff','taos',c3,c5)) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @16@ then + return -1 +endi +if $data01 != @16@ then + return -1 +endi +if $data02 != @16@ then + return -1 +endi +if $data03 != @16@ then + return -1 +endi +if $data04 != @16@ then + return -1 +endi +if $data10 != @16@ then + return -1 +endi +if $data11 != @16@ then + return -1 +endi +if $data12 != @16@ then + return -1 +endi +if $data13 != @16@ then + return -1 +endi +if $data14 != @16@ then + return -1 +endi +print execute sql select length(concat_ws('jeff',c6,c7,'taos')),length(concat_ws('jeff',c6,c8,'taos')),length(concat_ws('jeff',c6,c9,'taos')),length(concat_ws('jeff',c7,c8,'taos')),length(concat_ws('jeff',c7,c9,'taos')) from ct1 + +sql select length(concat_ws('jeff',c6,c7,'taos')),length(concat_ws('jeff',c6,c8,'taos')),length(concat_ws('jeff',c6,c9,'taos')),length(concat_ws('jeff',c7,c8,'taos')),length(concat_ws('jeff',c7,c9,'taos')) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @64@ then + return -1 +endi +if $data01 != @64@ then + return -1 +endi +if $data02 != @64@ then + return -1 +endi +if $data03 != @64@ then + return -1 +endi +if $data04 != @64@ then + return -1 +endi +if $data10 != @64@ then + return -1 +endi +if $data11 != @64@ then + return -1 +endi +if $data12 != @64@ then + return -1 +endi +if $data13 != @64@ then + return -1 +endi +if $data14 != @64@ then + return -1 +endi +print execute sql select char_length(concat(c2,'taos',c3)),char_length(concat(c2,'taos',c4)),char_length(concat(c2,'taos',c5)),char_length(concat(c3,'taos',c4)),char_length(concat(c3,'taos',c5)) from ct1 + +sql select char_length(concat(c2,'taos',c3)),char_length(concat(c2,'taos',c4)),char_length(concat(c2,'taos',c5)),char_length(concat(c3,'taos',c4)),char_length(concat(c3,'taos',c5)) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @8@ then + return -1 +endi +if $data01 != @8@ then + return -1 +endi +if $data02 != @8@ then + return -1 +endi +if $data03 != @8@ then + return -1 +endi +if $data04 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data11 != @8@ then + return -1 +endi +if $data12 != @8@ then + return -1 +endi +if $data13 != @8@ then + return -1 +endi +if $data14 != @8@ then + return -1 +endi +print execute sql select char_length(concat(c6,'taos')),char_length(concat(c7,'taos')),char_length(concat(c8,'taos')),char_length(concat(c9,'taos')) from ct1 + +sql select char_length(concat(c6,'taos')),char_length(concat(c7,'taos')),char_length(concat(c8,'taos')),char_length(concat(c9,'taos')) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @6@ then + return -1 +endi +if $data01 != @6@ then + return -1 +endi +if $data02 != @6@ then + return -1 +endi +if $data03 != @6@ then + return -1 +endi +if $data10 != @6@ then + return -1 +endi +if $data11 != @6@ then + return -1 +endi +if $data12 != @6@ then + return -1 +endi +if $data13 != @6@ then + return -1 +endi +print execute sql select char_length(concat_ws('jeff',c2,'taos',c3)),char_length(concat_ws('jeff',c2,'taos',c4)),char_length(concat_ws('jeff',c2,'taos',c5)),char_length(concat_ws('jeff',c3,'taos',c4)),char_length(concat_ws('jeff',c3,'taos',c5)) from ct1 + +sql select char_length(concat_ws('jeff',c2,'taos',c3)),char_length(concat_ws('jeff',c2,'taos',c4)),char_length(concat_ws('jeff',c2,'taos',c5)),char_length(concat_ws('jeff',c3,'taos',c4)),char_length(concat_ws('jeff',c3,'taos',c5)) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @16@ then + return -1 +endi +if $data01 != @16@ then + return -1 +endi +if $data02 != @16@ then + return -1 +endi +if $data03 != @16@ then + return -1 +endi +if $data04 != @16@ then + return -1 +endi +if $data10 != @16@ then + return -1 +endi +if $data11 != @16@ then + return -1 +endi +if $data12 != @16@ then + return -1 +endi +if $data13 != @16@ then + return -1 +endi +if $data14 != @16@ then + return -1 +endi +print execute sql select char_length(concat_ws('jeff',c6,'taos')),char_length(concat_ws('jeff',c7,'taos')),char_length(concat_ws('jeff',c8,'taos')),char_length(concat_ws('jeff',c9,'taos')) from ct1 + +sql select char_length(concat_ws('jeff',c6,'taos')),char_length(concat_ws('jeff',c7,'taos')),char_length(concat_ws('jeff',c8,'taos')),char_length(concat_ws('jeff',c9,'taos')) from ct1 + +if $rows != 50 then + return -1 +endi +if $data00 != @10@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @10@ then + return -1 +endi +if $data03 != @10@ then + return -1 +endi +if $data10 != @10@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @10@ then + return -1 +endi +if $data13 != @10@ then + return -1 +endi +print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/scalar_triangle.sim b/tests/script/general/compute/scalar_triangle.sim new file mode 100644 index 0000000000000000000000000000000000000000..524a544e9fe88cc4fe5fa1d639e52a032f06222e --- /dev/null +++ b/tests/script/general/compute/scalar_triangle.sim @@ -0,0 +1,1763 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 500 +sql connect + +$dbPrefix = db +$tbPrefix = ct +$mtPrefix = st +$tbNum = 2 +$rowNum = 50 + +print =============== step1 create stable/table +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0.25 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , $x , $y , $x , $x , $x , $y , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + endw + + $i = $i + 1 +endw + +print ================= step2 +$i = 1 +$tb = $tbPrefix . $i +$stb = $mtPrefix . 0 + + +print execute sql select c1, sin(c1), cos(c1), tan(c1), asin(c1), acos(c1), atan(c1) from ct1 +sql select c1, sin(c1), cos(c1), tan(c1), asin(c1), acos(c1), atan(c1) from ct1 +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.000000000@ then + return -1 +endi +if $data02 != @1.000000000@ then + return -1 +endi +if $data03 != @0.000000000@ then + return -1 +endi +if $data04 != @0.000000000@ then + return -1 +endi +if $data05 != @1.570796327@ then + return -1 +endi +if $data06 != @0.000000000@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.841470985@ then + return -1 +endi +if $data12 != @0.540302306@ then + return -1 +endi +if $data13 != @1.557407725@ then + return -1 +endi +if $data14 != @1.570796327@ then + return -1 +endi +if $data15 != @0.000000000@ then + return -1 +endi +if $data16 != @0.785398163@ then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data21 != @0.909297427@ then + return -1 +endi +if $data22 != @-0.416146837@ then + return -1 +endi +if $data23 != @-2.185039863@ then + return -1 +endi +if $data24 != @nan@ then + return -1 +endi +if $data25 != @nan@ then + return -1 +endi +if $data26 != @1.107148718@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @0.141120008@ then + return -1 +endi +if $data32 != @-0.989992497@ then + return -1 +endi +if $data33 != @-0.142546543@ then + return -1 +endi +if $data34 != @nan@ then + return -1 +endi +if $data35 != @nan@ then + return -1 +endi +if $data36 != @1.249045772@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @-0.756802495@ then + return -1 +endi +if $data42 != @-0.653643621@ then + return -1 +endi +if $data43 != @1.157821282@ then + return -1 +endi +if $data44 != @nan@ then + return -1 +endi +if $data45 != @nan@ then + return -1 +endi +if $data46 != @1.325817664@ then + return -1 +endi +if $data50 != @5@ then + return -1 +endi +if $data51 != @-0.958924275@ then + return -1 +endi +if $data52 != @0.283662185@ then + return -1 +endi +if $data53 != @-3.380515006@ then + return -1 +endi +if $data54 != @nan@ then + return -1 +endi +if $data55 != @nan@ then + return -1 +endi +if $data56 != @1.373400767@ then + return -1 +endi +if $data60 != @6@ then + return -1 +endi +if $data61 != @-0.279415498@ then + return -1 +endi +if $data62 != @0.960170287@ then + return -1 +endi +if $data63 != @-0.291006191@ then + return -1 +endi +if $data64 != @nan@ then + return -1 +endi +if $data65 != @nan@ then + return -1 +endi +if $data66 != @1.405647649@ then + return -1 +endi +if $data70 != @7@ then + return -1 +endi +if $data71 != @0.656986599@ then + return -1 +endi +if $data72 != @0.753902254@ then + return -1 +endi +if $data73 != @0.871447983@ then + return -1 +endi +if $data74 != @nan@ then + return -1 +endi +if $data75 != @nan@ then + return -1 +endi +if $data76 != @1.428899272@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data81 != @0.989358247@ then + return -1 +endi +if $data82 != @-0.145500034@ then + return -1 +endi +if $data83 != @-6.799711455@ then + return -1 +endi +if $data84 != @nan@ then + return -1 +endi +if $data85 != @nan@ then + return -1 +endi +if $data86 != @1.446441332@ then + return -1 +endi +if $data90 != @9@ then + return -1 +endi +if $data91 != @0.412118485@ then + return -1 +endi +if $data92 != @-0.911130262@ then + return -1 +endi +if $data93 != @-0.452315659@ then + return -1 +endi +if $data94 != @nan@ then + return -1 +endi +if $data95 != @nan@ then + return -1 +endi +if $data96 != @1.460139106@ then + return -1 +endi +print execute sql select c1, sin(c2)+2, cos(c2)+2, cos(pow(c2,2)+2), tan(pow(c2,3)+log(c3, 2)+pow(c5,2)) as v4, asin(pow(c4, 4.5)+pow(c3, 2)), acos(log(c1,2)+log(c3,4)+pow(c6,2.8)+2) as v6 from ct1 where ts == 1601481600000 +sql select c1, sin(c2)+2, cos(c2)+2, cos(pow(c2,2)+2), tan(pow(c2,3)+log(c3, 2)+pow(c5,2)) as v4, asin(pow(c4, 4.5)+pow(c3, 2)), acos(log(c1,2)+log(c3,4)+pow(c6,2.8)+2) as v6 from ct1 where ts == 1601481600000 +if $data00 != @0@ then + return -1 +endi +if $data01 != @2.247403959@ then + return -1 +endi +if $data02 != @2.968912422@ then + return -1 +endi +if $data03 != @-0.472128411@ then + return -1 +endi +if $data04 != @-nan@ then + return -1 +endi +if $data05 != @0.000000000@ then + return -1 +endi +if $data06 != @nan@ then + return -1 +endi +print execute sql select c1, sin(c2), cos(c1+2), tan(c2+2)+2, sin(c2+3)+cos(c3+2)+tan(c5+2) as v4, sin(c4+4.5)+tan(c3+2), sin(c1+2)+cos(c3+4)+acos(c6+2.8)+2 as v6 from st0 where ts == 1601481600000 +sql select c1, sin(c2), cos(c1+2), tan(c2+2)+2, sin(c2+3)+cos(c3+2)+tan(c5+2) as v4, sin(c4+4.5)+tan(c3+2), sin(c1+2)+cos(c3+4)+acos(c6+2.8)+2 as v6 from st0 where ts == 1601481600000 +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.247403959@ then + return -1 +endi +if $data02 != @-0.416146837@ then + return -1 +endi +if $data03 != @0.761372384@ then + return -1 +endi +if $data04 != @-2.709381834@ then + return -1 +endi +if $data05 != @-3.162569981@ then + return -1 +endi +if $data06 != @nan@ then + return -1 +endi +if $data10 != @0@ then + return -1 +endi +if $data11 != @0.247403959@ then + return -1 +endi +if $data12 != @-0.416146837@ then + return -1 +endi +if $data13 != @0.761372384@ then + return -1 +endi +if $data14 != @-2.709381834@ then + return -1 +endi +if $data15 != @-3.162569981@ then + return -1 +endi +if $data16 != @nan@ then + return -1 +endi +print execute sql select c1, tan(c2+ 2), sin(pow(c1,2)), cos(pow(c2,2)+2), tan(pow(c2,3)+log(c3, 2)+pow(c5,2)) as v4, asin(pow(c4, 4.5)+pow(c3, 2)), acos(log(c1,2)+log(c3,4)+pow(c6,2.8)+2) as v6 from st0 where c1 == 0 +sql select c1, tan(c2+ 2), sin(pow(c1,2)), cos(pow(c2,2)+2), tan(pow(c2,3)+log(c3, 2)+pow(c5,2)) as v4, asin(pow(c4, 4.5)+pow(c3, 2)), acos(log(c1,2)+log(c3,4)+pow(c6,2.8)+2) as v6 from st0 where c1 == 0 +if $data00 != @0@ then + return -1 +endi +if $data01 != @-1.238627616@ then + return -1 +endi +if $data02 != @0.000000000@ then + return -1 +endi +if $data03 != @-0.472128411@ then + return -1 +endi +if $data04 != @-nan@ then + return -1 +endi +if $data05 != @0.000000000@ then + return -1 +endi +if $data06 != @nan@ then + return -1 +endi +if $data10 != @0@ then + return -1 +endi +if $data11 != @-1.238627616@ then + return -1 +endi +if $data12 != @0.000000000@ then + return -1 +endi +if $data13 != @-0.472128411@ then + return -1 +endi +if $data14 != @-nan@ then + return -1 +endi +if $data15 != @0.000000000@ then + return -1 +endi +if $data16 != @nan@ then + return -1 +endi +print execute sql select c1, atan(c2+2), asin(c1+2), acos(c2+c1)+2, acos(c2+c3)+asin(c3+c2)+pow(c5,2) as v4, acos(c4/4.5)+asin(c3-2), asin(c1/2)+log(c3,c4)+pow(c6, 2.8)+2 as v6 from st0 where c1 == 0 +sql select c1, atan(c2+2), asin(c1+2), acos(c2+c1)+2, acos(c2+c3)+asin(c3+c2)+pow(c5,2) as v4, acos(c4/4.5)+asin(c3-2), asin(c1/2)+log(c3,c4)+pow(c6, 2.8)+2 as v6 from st0 where c1 == 0 +if $data00 != @0@ then + return -1 +endi +if $data01 != @1.152571997@ then + return -1 +endi +if $data02 != @nan@ then + return -1 +endi +if $data03 != @3.318116072@ then + return -1 +endi +if $data04 != @1.570796327@ then + return -1 +endi +if $data05 != @nan@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @0@ then + return -1 +endi +if $data11 != @1.152571997@ then + return -1 +endi +if $data12 != @nan@ then + return -1 +endi +if $data13 != @3.318116072@ then + return -1 +endi +if $data14 != @1.570796327@ then + return -1 +endi +if $data15 != @nan@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +print execute sql select c1, cos(c2+2), cos(ceil(pow(c1,2))), sin(floor(pow(c2,2)+2)), sin(ceil(c2)+floor(c3+c2)+round(c5+c2)) as v4, atan(pow(c4, 4.5)+pow(c3, 2)), tan(log(c1,2)+cos(c3+4)+pow(c6,2.8)+2) as v6 from st0 order by ts desc +sql select c1, cos(c2+2), cos(ceil(pow(c1,2))), sin(floor(pow(c2,2)+2)), sin(ceil(c2)+floor(c3+c2)+round(c5+c2)) as v4, atan(pow(c4, 4.5)+pow(c3, 2)), tan(log(c1,2)+cos(c3+4)+pow(c6,2.8)+2) as v6 from st0 order by ts desc +if $data00 != @49@ then + return -1 +endi +if $data01 != @0.742154197@ then + return -1 +endi +if $data02 != @0.679868770@ then + return -1 +endi +if $data03 != @0.313028384@ then + return -1 +endi +if $data04 != @-0.044212563@ then + return -1 +endi +if $data05 != @1.570796302@ then + return -1 +endi +if $data06 != @0.036525682@ then + return -1 +endi +if $data10 != @49@ then + return -1 +endi +if $data11 != @0.742154197@ then + return -1 +endi +if $data12 != @0.679868770@ then + return -1 +endi +if $data13 != @0.313028384@ then + return -1 +endi +if $data14 != @-0.044212563@ then + return -1 +endi +if $data15 != @1.570796302@ then + return -1 +endi +if $data16 != @0.036525682@ then + return -1 +endi +if $data20 != @48@ then + return -1 +endi +if $data21 != @0.964966028@ then + return -1 +endi +if $data22 != @-0.350599733@ then + return -1 +endi +if $data23 != @0.070932648@ then + return -1 +endi +if $data24 != @0.945445155@ then + return -1 +endi +if $data25 != @1.570796300@ then + return -1 +endi +if $data26 != @-2.376507095@ then + return -1 +endi +if $data30 != @48@ then + return -1 +endi +if $data31 != @0.964966028@ then + return -1 +endi +if $data32 != @-0.350599733@ then + return -1 +endi +if $data33 != @0.070932648@ then + return -1 +endi +if $data34 != @0.945445155@ then + return -1 +endi +if $data35 != @1.570796300@ then + return -1 +endi +if $data36 != @-2.376507095@ then + return -1 +endi +if $data40 != @47@ then + return -1 +endi +if $data41 != @0.300592544@ then + return -1 +endi +if $data42 != @-0.895890607@ then + return -1 +endi +if $data43 != @-0.629747508@ then + return -1 +endi +if $data44 != @0.580586641@ then + return -1 +endi +if $data45 != @1.570796297@ then + return -1 +endi +if $data46 != @-0.760993034@ then + return -1 +endi +if $data50 != @47@ then + return -1 +endi +if $data51 != @0.300592544@ then + return -1 +endi +if $data52 != @-0.895890607@ then + return -1 +endi +if $data53 != @-0.629747508@ then + return -1 +endi +if $data54 != @0.580586641@ then + return -1 +endi +if $data55 != @1.570796297@ then + return -1 +endi +if $data56 != @-0.760993034@ then + return -1 +endi +if $data60 != @46@ then + return -1 +endi +if $data61 != @-0.640144339@ then + return -1 +endi +if $data62 != @0.136916383@ then + return -1 +endi +if $data63 != @0.536725534@ then + return -1 +endi +if $data64 != @-0.616064204@ then + return -1 +endi +if $data65 != @1.570796294@ then + return -1 +endi +if $data66 != @-1.929269971@ then + return -1 +endi +if $data70 != @46@ then + return -1 +endi +if $data71 != @-0.640144339@ then + return -1 +endi +if $data72 != @0.136916383@ then + return -1 +endi +if $data73 != @0.536725534@ then + return -1 +endi +if $data74 != @-0.616064204@ then + return -1 +endi +if $data75 != @1.570796294@ then + return -1 +endi +if $data76 != @-1.929269971@ then + return -1 +endi +if $data80 != @45@ then + return -1 +endi +if $data81 != @-0.992335469@ then + return -1 +endi +if $data82 != @-0.241134582@ then + return -1 +endi +if $data83 != @-0.623130100@ then + return -1 +endi +if $data84 != @-0.930094878@ then + return -1 +endi +if $data85 != @1.570796290@ then + return -1 +endi +if $data86 != @-7.205947409@ then + return -1 +endi +if $data90 != @45@ then + return -1 +endi +if $data91 != @-0.992335469@ then + return -1 +endi +if $data92 != @-0.241134582@ then + return -1 +endi +if $data93 != @-0.623130100@ then + return -1 +endi +if $data94 != @-0.930094878@ then + return -1 +endi +if $data95 != @1.570796290@ then + return -1 +endi +if $data96 != @-7.205947409@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5+c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from ct1 order by ts limit 2; +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5+c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from ct1 order by ts limit 2; +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @2.154346269@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 order by ts desc) +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 order by ts desc) +if $data00 != @49@ then + return -1 +endi +if $data01 != @0.670229176@ then + return -1 +endi +if $data02 != @0.992374553@ then + return -1 +endi +if $data03 != @0.929814367@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.713618282@ then + return -1 +endi +if $data06 != @-0.746290424@ then + return -1 +endi +if $data10 != @49@ then + return -1 +endi +if $data11 != @0.670229176@ then + return -1 +endi +if $data12 != @0.992374553@ then + return -1 +endi +if $data13 != @0.929814367@ then + return -1 +endi +if $data14 != @nan@ then + return -1 +endi +if $data15 != @0.713618282@ then + return -1 +endi +if $data16 != @-0.746290424@ then + return -1 +endi +if $data20 != @48@ then + return -1 +endi +if $data21 != @-0.262374854@ then + return -1 +endi +if $data22 != @0.620208114@ then + return -1 +endi +if $data23 != @1.817585733@ then + return -1 +endi +if $data24 != @nan@ then + return -1 +endi +if $data25 != @1.211884234@ then + return -1 +endi +if $data26 != @5.183714989@ then + return -1 +endi +if $data30 != @48@ then + return -1 +endi +if $data31 != @-0.262374854@ then + return -1 +endi +if $data32 != @0.620208114@ then + return -1 +endi +if $data33 != @1.817585733@ then + return -1 +endi +if $data34 != @nan@ then + return -1 +endi +if $data35 != @1.211884234@ then + return -1 +endi +if $data36 != @5.183714989@ then + return -1 +endi +if $data40 != @47@ then + return -1 +endi +if $data41 != @-0.953752653@ then + return -1 +endi +if $data42 != @0.659304076@ then + return -1 +endi +if $data43 != @3.457510675@ then + return -1 +endi +if $data44 != @nan@ then + return -1 +endi +if $data45 != @0.882083819@ then + return -1 +endi +if $data46 != @2.630220446@ then + return -1 +endi +if $data50 != @47@ then + return -1 +endi +if $data51 != @-0.953752653@ then + return -1 +endi +if $data52 != @0.659304076@ then + return -1 +endi +if $data53 != @3.457510675@ then + return -1 +endi +if $data54 != @nan@ then + return -1 +endi +if $data55 != @0.882083819@ then + return -1 +endi +if $data56 != @2.630220446@ then + return -1 +endi +if $data60 != @46@ then + return -1 +endi +if $data61 != @-0.768254661@ then + return -1 +endi +if $data62 != @0.999843325@ then + return -1 +endi +if $data63 != @1.276316926@ then + return -1 +endi +if $data64 != @nan@ then + return -1 +endi +if $data65 != @-0.300459259@ then + return -1 +endi +if $data66 != @0.133920399@ then + return -1 +endi +if $data70 != @46@ then + return -1 +endi +if $data71 != @-0.768254661@ then + return -1 +endi +if $data72 != @0.999843325@ then + return -1 +endi +if $data73 != @1.276316926@ then + return -1 +endi +if $data74 != @nan@ then + return -1 +endi +if $data75 != @-0.300459259@ then + return -1 +endi +if $data76 != @0.133920399@ then + return -1 +endi +if $data80 != @45@ then + return -1 +endi +if $data81 != @0.123573123@ then + return -1 +endi +if $data82 != @0.673565060@ then + return -1 +endi +if $data83 != @1.519318619@ then + return -1 +endi +if $data84 != @nan@ then + return -1 +endi +if $data85 != @-1.566189594@ then + return -1 +endi +if $data86 != @5.513771854@ then + return -1 +endi +if $data90 != @45@ then + return -1 +endi +if $data91 != @0.123573123@ then + return -1 +endi +if $data92 != @0.673565060@ then + return -1 +endi +if $data93 != @1.519318619@ then + return -1 +endi +if $data94 != @nan@ then + return -1 +endi +if $data95 != @-1.566189594@ then + return -1 +endi +if $data96 != @5.513771854@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 order by ts limit 2); +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 order by ts limit 2); +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @1.832595715@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 ) order by ts desc +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 ) order by ts desc +if $data00 != @49@ then + return -1 +endi +if $data01 != @0.670229176@ then + return -1 +endi +if $data02 != @0.992374553@ then + return -1 +endi +if $data03 != @0.929814367@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.713618282@ then + return -1 +endi +if $data06 != @-0.746290424@ then + return -1 +endi +if $data10 != @49@ then + return -1 +endi +if $data11 != @0.670229176@ then + return -1 +endi +if $data12 != @0.992374553@ then + return -1 +endi +if $data13 != @0.929814367@ then + return -1 +endi +if $data14 != @nan@ then + return -1 +endi +if $data15 != @0.713618282@ then + return -1 +endi +if $data16 != @-0.746290424@ then + return -1 +endi +if $data20 != @48@ then + return -1 +endi +if $data21 != @-0.262374854@ then + return -1 +endi +if $data22 != @0.620208114@ then + return -1 +endi +if $data23 != @1.817585733@ then + return -1 +endi +if $data24 != @nan@ then + return -1 +endi +if $data25 != @1.211884234@ then + return -1 +endi +if $data26 != @5.183714989@ then + return -1 +endi +if $data30 != @48@ then + return -1 +endi +if $data31 != @-0.262374854@ then + return -1 +endi +if $data32 != @0.620208114@ then + return -1 +endi +if $data33 != @1.817585733@ then + return -1 +endi +if $data34 != @nan@ then + return -1 +endi +if $data35 != @1.211884234@ then + return -1 +endi +if $data36 != @5.183714989@ then + return -1 +endi +if $data40 != @47@ then + return -1 +endi +if $data41 != @-0.953752653@ then + return -1 +endi +if $data42 != @0.659304076@ then + return -1 +endi +if $data43 != @3.457510675@ then + return -1 +endi +if $data44 != @nan@ then + return -1 +endi +if $data45 != @0.882083819@ then + return -1 +endi +if $data46 != @2.630220446@ then + return -1 +endi +if $data50 != @47@ then + return -1 +endi +if $data51 != @-0.953752653@ then + return -1 +endi +if $data52 != @0.659304076@ then + return -1 +endi +if $data53 != @3.457510675@ then + return -1 +endi +if $data54 != @nan@ then + return -1 +endi +if $data55 != @0.882083819@ then + return -1 +endi +if $data56 != @2.630220446@ then + return -1 +endi +if $data60 != @46@ then + return -1 +endi +if $data61 != @-0.768254661@ then + return -1 +endi +if $data62 != @0.999843325@ then + return -1 +endi +if $data63 != @1.276316926@ then + return -1 +endi +if $data64 != @nan@ then + return -1 +endi +if $data65 != @-0.300459259@ then + return -1 +endi +if $data66 != @0.133920399@ then + return -1 +endi +if $data70 != @46@ then + return -1 +endi +if $data71 != @-0.768254661@ then + return -1 +endi +if $data72 != @0.999843325@ then + return -1 +endi +if $data73 != @1.276316926@ then + return -1 +endi +if $data74 != @nan@ then + return -1 +endi +if $data75 != @-0.300459259@ then + return -1 +endi +if $data76 != @0.133920399@ then + return -1 +endi +if $data80 != @45@ then + return -1 +endi +if $data81 != @0.123573123@ then + return -1 +endi +if $data82 != @0.673565060@ then + return -1 +endi +if $data83 != @1.519318619@ then + return -1 +endi +if $data84 != @nan@ then + return -1 +endi +if $data85 != @-1.566189594@ then + return -1 +endi +if $data86 != @5.513771854@ then + return -1 +endi +if $data90 != @45@ then + return -1 +endi +if $data91 != @0.123573123@ then + return -1 +endi +if $data92 != @0.673565060@ then + return -1 +endi +if $data93 != @1.519318619@ then + return -1 +endi +if $data94 != @nan@ then + return -1 +endi +if $data95 != @-1.566189594@ then + return -1 +endi +if $data96 != @5.513771854@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 ) +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from st0 ) +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @1.832595715@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +if $data20 != @2@ then + return -1 +endi +if $data21 != @-0.756802495@ then + return -1 +endi +if $data22 != @1.000000000@ then + return -1 +endi +if $data23 != @1.234030298@ then + return -1 +endi +if $data24 != @nan@ then + return -1 +endi +if $data25 != @0.755422294@ then + return -1 +endi +if $data26 != @0.058157641@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @-0.958924275@ then + return -1 +endi +if $data32 != @0.666366745@ then + return -1 +endi +if $data33 != @3.428875323@ then + return -1 +endi +if $data34 != @nan@ then + return -1 +endi +if $data35 != @1.008737178@ then + return -1 +endi +if $data36 != @2.578037959@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @-0.279415498@ then + return -1 +endi +if $data42 != @0.614300282@ then + return -1 +endi +if $data43 != @1.853464439@ then + return -1 +endi +if $data44 != @nan@ then + return -1 +endi +if $data45 != @0.382340276@ then + return -1 +endi +if $data46 != @5.147179529@ then + return -1 +endi +if $data50 != @5@ then + return -1 +endi +if $data51 != @0.656986599@ then + return -1 +endi +if $data52 != @0.990059086@ then + return -1 +endi +if $data53 != @0.886449574@ then + return -1 +endi +if $data54 != @nan@ then + return -1 +endi +if $data55 != @-0.876294736@ then + return -1 +endi +if $data56 != @-0.924536117@ then + return -1 +endi +if $data60 != @6@ then + return -1 +endi +if $data61 != @0.989358247@ then + return -1 +endi +if $data62 != @0.727035131@ then + return -1 +endi +if $data63 != @3.124320480@ then + return -1 +endi +if $data64 != @nan@ then + return -1 +endi +if $data65 != @-1.869688257@ then + return -1 +endi +if $data66 != @2.172420891@ then + return -1 +endi +if $data70 != @7@ then + return -1 +endi +if $data71 != @0.412118485@ then + return -1 +endi +if $data72 != @0.574400879@ then + return -1 +endi +if $data73 != @2.137595835@ then + return -1 +endi +if $data74 != @nan@ then + return -1 +endi +if $data75 != @-1.811908862@ then + return -1 +endi +if $data76 != @4.862055338@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data81 != @-0.544021111@ then + return -1 +endi +if $data82 != @0.961216805@ then + return -1 +endi +if $data83 != @0.578734473@ then + return -1 +endi +if $data84 != @nan@ then + return -1 +endi +if $data85 != @-0.719965518@ then + return -1 +endi +if $data86 != @-4.087614772@ then + return -1 +endi +if $data90 != @9@ then + return -1 +endi +if $data91 != @-0.999990207@ then + return -1 +endi +if $data92 != @0.791836209@ then + return -1 +endi +if $data93 != @2.776612512@ then + return -1 +endi +if $data94 != @nan@ then + return -1 +endi +if $data95 != @0.592988627@ then + return -1 +endi +if $data96 != @1.796697094@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 ) order by ts limit 2; +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 ) order by ts limit 2; +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @1.832595715@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +print execute sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 ) limit 2; +sql select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6 from (select * from ct1 ) limit 2; +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @1.832595715@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +print execute sql select * from (select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6, ts from st0 order by ts desc) +sql select * from (select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6, ts from st0 order by ts desc) +if $data00 != @49@ then + return -1 +endi +if $data01 != @0.670229176@ then + return -1 +endi +if $data02 != @0.992374553@ then + return -1 +endi +if $data03 != @0.929814367@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.713618282@ then + return -1 +endi +if $data06 != @-0.746290424@ then + return -1 +endi +if $data07 != @20-10-01 00:49:00.000@ then + return -1 +endi +if $data10 != @49@ then + return -1 +endi +if $data11 != @0.670229176@ then + return -1 +endi +if $data12 != @0.992374553@ then + return -1 +endi +if $data13 != @0.929814367@ then + return -1 +endi +if $data14 != @nan@ then + return -1 +endi +if $data15 != @0.713618282@ then + return -1 +endi +if $data16 != @-0.746290424@ then + return -1 +endi +if $data17 != @20-10-01 00:49:00.000@ then + return -1 +endi +if $data20 != @48@ then + return -1 +endi +if $data21 != @-0.262374854@ then + return -1 +endi +if $data22 != @0.620208114@ then + return -1 +endi +if $data23 != @1.817585733@ then + return -1 +endi +if $data24 != @nan@ then + return -1 +endi +if $data25 != @1.211884234@ then + return -1 +endi +if $data26 != @5.183714989@ then + return -1 +endi +if $data27 != @20-10-01 00:48:00.000@ then + return -1 +endi +if $data30 != @48@ then + return -1 +endi +if $data31 != @-0.262374854@ then + return -1 +endi +if $data32 != @0.620208114@ then + return -1 +endi +if $data33 != @1.817585733@ then + return -1 +endi +if $data34 != @nan@ then + return -1 +endi +if $data35 != @1.211884234@ then + return -1 +endi +if $data36 != @5.183714989@ then + return -1 +endi +if $data37 != @20-10-01 00:48:00.000@ then + return -1 +endi +if $data40 != @47@ then + return -1 +endi +if $data41 != @-0.953752653@ then + return -1 +endi +if $data42 != @0.659304076@ then + return -1 +endi +if $data43 != @3.457510675@ then + return -1 +endi +if $data44 != @nan@ then + return -1 +endi +if $data45 != @0.882083819@ then + return -1 +endi +if $data46 != @2.630220446@ then + return -1 +endi +if $data47 != @20-10-01 00:47:00.000@ then + return -1 +endi +if $data50 != @47@ then + return -1 +endi +if $data51 != @-0.953752653@ then + return -1 +endi +if $data52 != @0.659304076@ then + return -1 +endi +if $data53 != @3.457510675@ then + return -1 +endi +if $data54 != @nan@ then + return -1 +endi +if $data55 != @0.882083819@ then + return -1 +endi +if $data56 != @2.630220446@ then + return -1 +endi +if $data57 != @20-10-01 00:47:00.000@ then + return -1 +endi +if $data60 != @46@ then + return -1 +endi +if $data61 != @-0.768254661@ then + return -1 +endi +if $data62 != @0.999843325@ then + return -1 +endi +if $data63 != @1.276316926@ then + return -1 +endi +if $data64 != @nan@ then + return -1 +endi +if $data65 != @-0.300459259@ then + return -1 +endi +if $data66 != @0.133920399@ then + return -1 +endi +if $data67 != @20-10-01 00:46:00.000@ then + return -1 +endi +if $data70 != @46@ then + return -1 +endi +if $data71 != @-0.768254661@ then + return -1 +endi +if $data72 != @0.999843325@ then + return -1 +endi +if $data73 != @1.276316926@ then + return -1 +endi +if $data74 != @nan@ then + return -1 +endi +if $data75 != @-0.300459259@ then + return -1 +endi +if $data76 != @0.133920399@ then + return -1 +endi +if $data77 != @20-10-01 00:46:00.000@ then + return -1 +endi +if $data80 != @45@ then + return -1 +endi +if $data81 != @0.123573123@ then + return -1 +endi +if $data82 != @0.673565060@ then + return -1 +endi +if $data83 != @1.519318619@ then + return -1 +endi +if $data84 != @nan@ then + return -1 +endi +if $data85 != @-1.566189594@ then + return -1 +endi +if $data86 != @5.513771854@ then + return -1 +endi +if $data87 != @20-10-01 00:45:00.000@ then + return -1 +endi +if $data90 != @45@ then + return -1 +endi +if $data91 != @0.123573123@ then + return -1 +endi +if $data92 != @0.673565060@ then + return -1 +endi +if $data93 != @1.519318619@ then + return -1 +endi +if $data94 != @nan@ then + return -1 +endi +if $data95 != @-1.566189594@ then + return -1 +endi +if $data96 != @5.513771854@ then + return -1 +endi +if $data97 != @20-10-01 00:45:00.000@ then + return -1 +endi +print execute sql select * from (select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6, ts from ct1 order by ts limit 2); +sql select * from (select c1, sin(c2+2), cos(sin(c1-2)), tan(cos(c2*2))+2, asin(acos(c2%3))+acos(c3/2)+atan(c5*c2) as v4, sin(c4+4.5)+cos(c3/2), tan(c1)+log(c3, c4)+sin(c6+c3)+2 as v6, ts from ct1 order by ts limit 2); +if $data00 != @0@ then + return -1 +endi +if $data01 != @0.778073197@ then + return -1 +endi +if $data02 != @0.614300282@ then + return -1 +endi +if $data03 != @3.203726628@ then + return -1 +endi +if $data04 != @nan@ then + return -1 +endi +if $data05 != @0.022469882@ then + return -1 +endi +if $data06 != @-nan@ then + return -1 +endi +if $data07 != @20-10-01 00:00:00.000@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @0.141120008@ then + return -1 +endi +if $data12 != @0.666366745@ then + return -1 +endi +if $data13 != @1.558041126@ then + return -1 +endi +if $data14 != @1.832595715@ then + return -1 +endi +if $data15 != @0.172042236@ then + return -1 +endi +if $data16 != @-nan@ then + return -1 +endi +if $data17 != @20-10-01 00:01:00.000@ then + return -1 +endi + +print ===============> step 3 sql_error stable, group by, window +sql_error select sin(c2) from $stb group by tbname; + +sql_error select sin(c2) from $stb group by tgcol; + +sql_error select sin(c2) from $stb group by c3; + +sql_error select sin(c2) from $stb interval(1m); + +sql_error select sin(c2) from $stb state_window(c7); + +sql_error select sin(c2) from $tb state_window(c7); + +sql_error select sin(c2) from $stb session(ts, 30s); + +sql_error select sin(c2) from $tb session(ts, 30s); + +sql_error select sin(c2) from $stb slimit 2; + +sql_error select sin(c2) from $stb interval(1m) slimit 2; + +print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/str_char_length.sim b/tests/script/general/compute/str_char_length.sim new file mode 100644 index 0000000000000000000000000000000000000000..5ef295db8893dfbcf9fd38f07c6849764251dcce --- /dev/null +++ b/tests/script/general/compute/str_char_length.sim @@ -0,0 +1,1315 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select char_length(*) from tb1; +sql_error select char_length(*) from tb1; +print execute sql select char_length(*) + 1 as a from tb1; +sql_error select char_length(*) + 1 as a from tb1; +print execute sql select char_length(tb1.*) + 1 as a from tb1; +sql_error select char_length(tb1.*) + 1 as a from tb1; +print execute sql select char_length(*) from tb1; +sql_error select char_length(*) from tb1; +print execute sql select char_length(c1) from tb1; +sql_error select char_length(c1) from tb1; +print execute sql select char_length(13) from tb1; +sql_error select char_length(13) from tb1; +print execute sql select char_length(c1) from tb1; +sql_error select char_length(c1) from tb1; +print execute sql select char_length(c2) from tb1; +sql_error select char_length(c2) from tb1; +print execute sql select char_length(c3) from tb1; +sql_error select char_length(c3) from tb1; +print execute sql select char_length(c4) from tb1; +sql_error select char_length(c4) from tb1; +print execute sql select char_length(c5) from tb1; +sql_error select char_length(c5) from tb1; +print execute sql select char_length(c6) from tb1; +sql_error select char_length(c6) from tb1; +print execute sql select char_length(c7) from tb1; +sql_error select char_length(c7) from tb1; +print execute sql select char_length(c9) from tb1; +sql select char_length(c9) from tb1; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +print execute sql select char_length(c15) from tb1; +sql select char_length(c15) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select char_length(c10) from tb1; +sql_error select char_length(c10) from tb1; +print execute sql select char_length(c11) from tb1; +sql_error select char_length(c11) from tb1; +print execute sql select char_length(c12) from tb1; +sql_error select char_length(c12) from tb1; +print execute sql select char_length(c13) from tb1; +sql_error select char_length(c13) from tb1; +print execute sql select char_length('12345678900000000000000000') from tb1; +sql select char_length('12345678900000000000000000') from tb1; +if $data00 != @26@ then + return -1 +endi +if $data10 != @26@ then + return -1 +endi +if $data20 != @26@ then + return -1 +endi +if $data30 != @26@ then + return -1 +endi +if $data40 != @26@ then + return -1 +endi +if $data50 != @26@ then + return -1 +endi +if $data60 != @26@ then + return -1 +endi +print execute sql select distinct char_length(123) from tb1; +sql_error select distinct char_length(123) from tb1; +print execute sql select char_length(t1) from stb1; +sql_error select char_length(t1) from stb1; +print execute sql select char_length(c9),avg(c3) from tb1; +sql_error select char_length(c9),avg(c3) from tb1; +print execute sql select char_length(c9),top(c3,1) from tb1; +sql_error select char_length(c9),top(c3,1) from tb1; +print execute sql select char_length(concat(c9,c15)) from tb1 session(ts, 1s); +sql_error select char_length(concat(c9,c15)) from tb1 session(ts, 1s); +print execute sql select char_length(concat(c9,c15)) from tb1 STATE_WINDOW(c1); +sql_error select char_length(concat(c9,c15)) from tb1 STATE_WINDOW(c1); +print execute sql select char_length(concat(c9,c15)) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select char_length(concat(c9,c15)) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select char_length(concat(c9,c15)) from stb1 group by t1; +sql_error select char_length(concat(c9,c15)) from stb1 group by t1; +print execute sql select char_length(concat(c9,c15)) from stb1 group by ts; +sql_error select char_length(concat(c9,c15)) from stb1 group by ts; +print execute sql select char_length(concat(c9,c15)) from stb1 group by c1; +sql_error select char_length(concat(c9,c15)) from stb1 group by c1; +print execute sql select char_length(concat(c9,c15)) from stb1 group by tbname; +sql_error select char_length(concat(c9,c15)) from stb1 group by tbname; +print execute sql select char_length(concat(c9,c15)) from tb1 order by c2; +sql_error select char_length(concat(c9,c15)) from tb1 order by c2; +print execute sql select char_length(c9),char_length(c15) from tbn; +sql select char_length(c9),char_length(c15) from tbn; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select char_length(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select char_length(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select char_length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +sql select char_length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select char_length("abc") from tb1; +sql select char_length("abc") from tb1; +if $data00 != @3@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select char_length(concat(c9,c15)) from tb1; +sql select char_length(concat(c9,c15)) from tb1; +if $data00 != @5@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @5@ then + return -1 +endi +if $data40 != @5@ then + return -1 +endi +if $data50 != @7@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +print execute sql select char_length((concat(c9,c15))) from tb1; +sql select char_length((concat(c9,c15))) from tb1; +if $data00 != @5@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @5@ then + return -1 +endi +if $data40 != @5@ then + return -1 +endi +if $data50 != @7@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +print execute sql select char_length('11')+c2 from tb1; +sql select char_length('11')+c2 from tb1; +if $data00 != @3.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @6.000000000@ then + return -1 +endi +if $data50 != @129.000000000@ then + return -1 +endi +if $data60 != @-125.000000000@ then + return -1 +endi +print execute sql select char_length(c9)+c2 from tb1; +sql select char_length(c9)+c2 from tb1; +if $data00 != @5.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @8.000000000@ then + return -1 +endi +if $data50 != @131.000000000@ then + return -1 +endi +if $data60 != @-123.000000000@ then + return -1 +endi +print execute sql select char_length(c15)+11 from tb1; +sql select char_length(c15)+11 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @12.000000000@ then + return -1 +endi +if $data40 != @12.000000000@ then + return -1 +endi +if $data50 != @14.000000000@ then + return -1 +endi +if $data60 != @11.000000000@ then + return -1 +endi +print execute sql select char_length(c9),c9,c2 from tb1; +sql select char_length(c9),c9,c2 from tb1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1234@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @3456@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @4567@ then + return -1 +endi +if $data42 != @4@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @5678@ then + return -1 +endi +if $data52 != @127@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @6789@ then + return -1 +endi +if $data62 != @-127@ then + return -1 +endi +print execute sql select char_length(c9),t1,ts,tbname,_C0,_c0 from tb1; +sql select char_length(c9),t1,ts,tbname,_C0,_c0 from tb1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data03 != @tb1@ then + return -1 +endi +if $data04 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data05 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data12 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data13 != @tb1@ then + return -1 +endi +if $data14 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data15 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data22 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data23 != @tb1@ then + return -1 +endi +if $data24 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data25 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data32 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data33 != @tb1@ then + return -1 +endi +if $data34 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data35 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data42 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data43 != @tb1@ then + return -1 +endi +if $data44 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data45 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data52 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data53 != @tb1@ then + return -1 +endi +if $data54 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data55 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data62 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data63 != @tb1@ then + return -1 +endi +if $data64 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data65 != @21-11-11 09:00:06.000@ then + return -1 +endi +print execute sql select char_length(c9),floor(c3) from tb1; +sql select char_length(c9),floor(c3) from tb1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @3@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @32767@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @-32767@ then + return -1 +endi +print execute sql select char_length(c9),char_length(concat(c9,c15)) from tb1; +sql select char_length(c9),char_length(concat(c9,c15)) from tb1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @5@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @5@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @5@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @7@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @4@ then + return -1 +endi +print execute sql select char_length(concat(c9,c15)) from tb1 where c9 is not null and c15 is not null; +sql select char_length(concat(c9,c15)) from tb1 where c9 is not null and c15 is not null; +if $data00 != @5@ then + return -1 +endi +if $data10 != @5@ then + return -1 +endi +if $data20 != @5@ then + return -1 +endi +if $data30 != @7@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +print execute sql select char_length(c15) from tb1 order by ts desc; +sql select char_length(c15) from tb1 order by ts desc; +if $data00 != @0@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1@ then + return -1 +endi +print execute sql select char_length(concat(c9,c15)) from tb1 order by ts desc; +sql select char_length(concat(c9,c15)) from tb1 order by ts desc; +if $data00 != @4@ then + return -1 +endi +if $data10 != @7@ then + return -1 +endi +if $data20 != @5@ then + return -1 +endi +if $data30 != @5@ then + return -1 +endi +if $data40 != @NULL@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @5@ then + return -1 +endi +print execute sql select char_length(concat(c9,c15)) from tb1 order by ts desc limit 3 offset 2; +sql select char_length(concat(c9,c15)) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @5@ then + return -1 +endi +if $data10 != @5@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +print execute sql select char_length(c15) from stb1; +sql select char_length(c15) from stb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +print execute sql select char_length(c15) from stb1 order by ts desc; +sql select char_length(c15) from stb1 order by ts desc; +if $data00 != @1@ then + if $data00 != @0@ then + return -1 + endi +endi +if $data20 != @1@ then + if $data20 != @3@ then + return -1 + endi +endi +if $data40 != @1@ then + if $data40 != @1@ then + return -1 + endi +endi +if $data60 != @1@ then + if $data60 != @1@ then + return -1 + endi +endi +if $data80 != @1@ then + if $data80 != @NULL@ then + return -1 + endi +endi +print execute sql select char_length(c15),t1 from stb1 order by ts desc; +sql select char_length(c15),t1 from stb1 order by ts desc; +if $data00 != @1@ then + if $data00 != @0@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @1@ then + if $data20 != @3@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @1@ then + if $data40 != @1@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @1@ then + if $data60 != @1@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @1@ then + if $data80 != @NULL@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select char_length(c15),tbname from stb1; +sql select char_length(c15),tbname from stb1; +if $data00 != @1@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select char_length(c15),tbname from stb1 where t1 > 1; +sql select char_length(c15),tbname from stb1 where t1 > 1; +if $data00 != @1@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @1@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @1@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select char_length(c9),char_length(c15) from tbn; +sql select char_length(c9),char_length(c15) from tbn; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select char_length(c9),char_length(c15) from tbn order by ts desc; +sql select char_length(c9),char_length(c15) from tbn order by ts desc; +if $data00 != @4@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select char_length(char_length(c9)) from tbn; +sql_error select char_length(char_length(c9)) from tbn; +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 interval(1s)); +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +print execute sql select char_length(c15) from (select * from stb1); +sql select char_length(c15) from (select * from stb1); +if $data00 != @0@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @0@ then + return -1 +endi +if $data40 != @0@ then + return -1 +endi +if $data50 != @0@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @0@ then + return -1 +endi +if $data80 != @0@ then + return -1 +endi +if $data90 != @0@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @10@ then + return -1 +endi +if $data40 != @9@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @8@ then + return -1 +endi +if $data70 != @8@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data90 != @8@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @9@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @8@ then + return -1 +endi +if $data70 != @8@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data90 != @10@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select char_length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @10@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @9@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select char_length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +sql select char_length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select char_length(tb1.c15),char_length(tb2.c15) from tb1,tb2 where tb1.ts=tb2.ts; +sql select char_length(tb1.c15),char_length(tb2.c15) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select char_length(c15) from tb1 union all select char_length(c15) from tb2; +sql select char_length(c15) from tb1 union all select char_length(c15) from tb2; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi diff --git a/tests/script/general/compute/str_char_length2.sim b/tests/script/general/compute/str_char_length2.sim new file mode 100644 index 0000000000000000000000000000000000000000..e54f38b4dabbce0beb516e82be8e451410d4af98 --- /dev/null +++ b/tests/script/general/compute/str_char_length2.sim @@ -0,0 +1,309 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select char_length(stb1.c14),char_length(stba.c15) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select char_length(stb1.c14),char_length(stba.c15) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select char_length(c14) as a from stb1 union all select char_length(c15) as a from stba; +sql select char_length(c14) as a from stb1 union all select char_length(c15) as a from stba; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +print execute sql select char_length(c8) from stba; +sql select char_length(c8) from stba; +if $data00 != @3@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data70 != @3@ then + return -1 +endi +if $data80 != @3@ then + return -1 +endi +if $data90 != @3@ then + return -1 +endi +print execute sql select char_length(c9) from stba; +sql select char_length(c9) from stba; +if $data00 != @4@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data70 != @4@ then + return -1 +endi +if $data80 != @4@ then + return -1 +endi +if $data90 != @4@ then + return -1 +endi +print execute sql select char_length(cast(min(c2) as binary(20))) from tba1; +sql select char_length(cast(min(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select char_length(cast(max(c2) as binary(20))) from tba1; +sql select char_length(cast(max(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select char_length(cast(count(c2) as binary(20))) from tba1; +sql select char_length(cast(count(c2) as binary(20))) from tba1; +if $data00 != @2@ then + return -1 +endi +print execute sql select char_length(cast(sum(c2) as binary(20))) from tba1; +sql select char_length(cast(sum(c2) as binary(20))) from tba1; +if $data00 != @3@ then + return -1 +endi +print execute sql select char_length(cast(avg(c2) as binary(20))) from tba1; +sql select char_length(cast(avg(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(percentile(c2, 10) as binary(20))) from tba1; +sql select char_length(cast(percentile(c2, 10) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(apercentile(c2, 10) as binary(20))) from tba1; +sql select char_length(cast(apercentile(c2, 10) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(stddev(c2) as binary(20))) from tba1; +sql select char_length(cast(stddev(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(spread(c2) as binary(20))) from tba1; +sql select char_length(cast(spread(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(twa(c2) as binary(20))) from tba1; +sql select char_length(cast(twa(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +sql_error select char_length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +print execute sql select char_length(cast(interp(c2) as binary(20))) from tba1 every(1s) +sql select char_length(cast(interp(c2) as binary(20))) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1@ then + return -1 +endi +print execute sql select char_length(cast(interp(c2) as binary(20))) stba every(1s) group by tbname; +sql_error select char_length(cast(interp(c2) as binary(20))) stba every(1s) group by tbname; +print execute sql select char_length(cast(elapsed(ts) as binary(20))) from tba1; +sql select char_length(cast(elapsed(ts) as binary(20))) from tba1; +if $data00 != @12@ then + return -1 +endi +print execute sql select char_length(cast(rate(c2) as binary(20))) from tba1; +sql select char_length(cast(rate(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(irate(c2) as binary(20))) from tba1; +sql select char_length(cast(irate(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select char_length(cast(first(c2) as binary(20))) from tba1; +sql select char_length(cast(first(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select char_length(cast(last(c2) as binary(20))) from tba1; +sql select char_length(cast(last(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select char_length(cast(last_row(c2) as binary(20))) from tba1; +sql select char_length(cast(last_row(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select char_length(cast(top(c2, 1) as binary(20))) from tba1; +sql_error select char_length(cast(top(c2, 1) as binary(20))) from tba1; +print execute sql select char_length(cast(bottom(c2, 1) as binary(20))) from tba1; +sql_error select char_length(cast(bottom(c2, 1) as binary(20))) from tba1; +print execute sql select char_length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +sql_error select char_length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +print execute sql select char_length(cast(derivative(c2, 1s, 0) as binary(20))) from tba1; +sql_error select char_length(cast(derivative(c2, 1s, 0) as binary(20))) from tba1; +print execute sql select char_length(cast(diff(c2) as binary(20))) from tba1; +sql_error select char_length(cast(diff(c2) as binary(20))) from tba1; +print execute sql select char_length(cast(csum(c2) as binary(20))) from tba1; +sql_error select char_length(cast(csum(c2) as binary(20))) from tba1; +print execute sql select char_length(cast(mavg(c2,2) as binary(20))) from tba1; +sql_error select char_length(cast(mavg(c2,2) as binary(20))) from tba1; +print execute sql select char_length(cast(sample(c2,2) as binary(20))) from tba1; +sql_error select char_length(cast(sample(c2,2) as binary(20))) from tba1; +print execute sql select char_length(cast(_block_dist() as binary(20))) from tba1; +sql_error select char_length(cast(_block_dist() as binary(20))) from tba1; diff --git a/tests/script/general/compute/str_concat.sim b/tests/script/general/compute/str_concat.sim new file mode 100644 index 0000000000000000000000000000000000000000..b968e357af0fc0c136abce453628e32cfa67f4bd --- /dev/null +++ b/tests/script/general/compute/str_concat.sim @@ -0,0 +1,1592 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select concat(c8,*) from tb1; +sql_error select concat(c8,*) from tb1; +print execute sql select concat(c8,*) from tb1; +sql_error select concat(c8,*) from tb1; +print execute sql select concat(c8,*) from tb1; +sql_error select concat(c8,*) from tb1; +print execute sql select concat(c8,*) from tb1; +sql_error select concat(c8,*) from tb1; +print execute sql select concat(c8,*) as a from tb1; +sql_error select concat(c8,*) as a from tb1; +print execute sql select concat(concat(c9,c15), '1') as a from tb1; +sql select concat(concat(c9,c15), '1') as a from tb1; +if $data00 != @123411@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @345631@ then + return -1 +endi +if $data40 != @456741@ then + return -1 +endi +if $data50 != @56782781@ then + return -1 +endi +if $data60 != @67891@ then + return -1 +endi +print execute sql select concat(concat(c8,tb1.*), '1') as a from tb1; +sql_error select concat(concat(c8,tb1.*), '1') as a from tb1; +print execute sql select concat(c8,*) from tb1; +sql_error select concat(c8,*) from tb1; +print execute sql select concat(c8,c14) from tb1; +sql select concat(c8,c14) from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +print execute sql select concat(c8,c15) from tb1; +sql_error select concat(c8,c15) from tb1; +print execute sql select concat(c8,concat(c14,c8)) from tb1; +sql select concat(c8,concat(c14,c8)) from tb1; +if $data00 != @1231123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564456@ then + return -1 +endi +if $data50 != @56727567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(c8,13) from tb1; +sql_error select concat(c8,13) from tb1; +print execute sql select concat(c8,c14) from tb1; +sql select concat(c8,c14) from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +print execute sql select concat(c8,c8) from tb1; +sql select concat(c8,c8) from tb1; +if $data00 != @123123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456456@ then + return -1 +endi +if $data50 != @567567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(c8,c9) from tb1; +sql_error select concat(c8,c9) from tb1; +print execute sql select concat(c8,c4) from tb1; +sql_error select concat(c8,c4) from tb1; +print execute sql select concat(c8,c5) from tb1; +sql_error select concat(c8,c5) from tb1; +print execute sql select concat(c8,c6) from tb1; +sql_error select concat(c8,c6) from tb1; +print execute sql select concat(c8,c7) from tb1; +sql_error select concat(c8,c7) from tb1; +print execute sql select concat(c8,c8) from tb1; +sql select concat(c8,c8) from tb1; +if $data00 != @123123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456456@ then + return -1 +endi +if $data50 != @567567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(c9,c8) from tb1; +sql_error select concat(c9,c8) from tb1; +print execute sql select concat(c8,c10) from tb1; +sql_error select concat(c8,c10) from tb1; +print execute sql select concat(c8,c11) from tb1; +sql_error select concat(c8,c11) from tb1; +print execute sql select concat(c8,c12) from tb1; +sql_error select concat(c8,c12) from tb1; +print execute sql select concat(c8,c13) from tb1; +sql_error select concat(c8,c13) from tb1; +print execute sql select concat(c8,12345678900000000000000000) from tb1; +sql_error select concat(c8,12345678900000000000000000) from tb1; +print execute sql select distinct concat(c8,'123') from tb1; +sql_error select distinct concat(c8,'123') from tb1; +print execute sql select concat(c8,t1) from stb1; +sql_error select concat(c8,t1) from stb1; +print execute sql select concat(c8,c14),avg(c2) from tb1; +sql_error select concat(c8,c14),avg(c2) from tb1; +print execute sql select concat(c8,c14),top(c3,1) from tb1; +sql_error select concat(c8,c14),top(c3,1) from tb1; +print execute sql select concat(c8,concat(c8,c14)) from tb1 session(ts, 1s); +sql_error select concat(c8,concat(c8,c14)) from tb1 session(ts, 1s); +print execute sql select concat(c8,concat(c8,c14)) from tb1 STATE_WINDOW(c1); +sql_error select concat(c8,concat(c8,c14)) from tb1 STATE_WINDOW(c1); +print execute sql select concat(c8,concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select concat(c8,concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select concat(c8,concat(c8,c14)) from stb1 group by t1; +sql_error select concat(c8,concat(c8,c14)) from stb1 group by t1; +print execute sql select concat(c8,concat(c8,c14)) from stb1 group by ts; +sql_error select concat(c8,concat(c8,c14)) from stb1 group by ts; +print execute sql select concat(c8,concat(c8,c14)) from stb1 group by c1; +sql_error select concat(c8,concat(c8,c14)) from stb1 group by c1; +print execute sql select concat(c8,concat(c8,c14)) from stb1 group by tbname; +sql_error select concat(c8,concat(c8,c14)) from stb1 group by tbname; +print execute sql select concat(c8,concat(c8,c14)) from tb1 order by c8; +sql_error select concat(c8,concat(c8,c14)) from tb1 order by c8; +print execute sql select concat(c8,c8),concat(c8,c14) from tbn; +sql select concat(c8,c8),concat(c8,c14) from tbn; +if $data00 != @111111@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != @222222@ then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @333333@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != @444444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @555555@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @666666@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @777777@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c2) as a from stb1 interval(1s)); +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.001.00@ then + return -1 +endi +if $data10 != @2.002.00@ then + return -1 +endi +if $data20 != @2.502.50@ then + return -1 +endi +if $data30 != @4.004.00@ then + return -1 +endi +if $data40 != @4.504.50@ then + return -1 +endi +if $data50 != @66.566.5@ then + return -1 +endi +if $data60 != @-60.-60.@ then + return -1 +endi +print execute sql select concat(c8,a) from (select c8, concat(c8,c8) as a from tb1); +sql select concat(c8,a) from (select c8, concat(c8,c8) as a from tb1); +if $data00 != @123123123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234234234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456456456@ then + return -1 +endi +if $data50 != @567567567@ then + return -1 +endi +if $data60 != @678678678@ then + return -1 +endi +print execute sql select concat(c8,"abc") from tb1; +sql select concat(c8,"abc") from tb1; +if $data00 != @123abc@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234abc@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456abc@ then + return -1 +endi +if $data50 != @567abc@ then + return -1 +endi +if $data60 != @678abc@ then + return -1 +endi +print execute sql select concat(c8,concat(c8,c14)) from tb1; +sql select concat(c8,concat(c8,c14)) from tb1; +if $data00 != @1231231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564564@ then + return -1 +endi +if $data50 != @56756727@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(c8,concat(c14,c8)) from tb1; +sql select concat(c8,concat(c14,c8)) from tb1; +if $data00 != @1231123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564456@ then + return -1 +endi +if $data50 != @56727567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(c8,concat(concat(c14,c8), c8, '6') from tb1; +sql_error select concat(c8,concat(concat(c14,c8), c8, '6') from tb1; +print execute sql select concat(concat(c8,'11'), c8) from tb1; +sql select concat(concat(c8,'11'), c8) from tb1; +if $data00 != @12311123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @23411234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @45611456@ then + return -1 +endi +if $data50 != @56711567@ then + return -1 +endi +if $data60 != @67811678@ then + return -1 +endi +print execute sql select concat(concat(c8,c14), c8) from tb1; +sql select concat(concat(c8,c14), c8) from tb1; +if $data00 != @1231123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564456@ then + return -1 +endi +if $data50 != @56727567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +print execute sql select concat(concat(c8,c8), '11') from tb1; +sql select concat(concat(c8,c8), '11') from tb1; +if $data00 != @12312311@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @23423411@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @45645611@ then + return -1 +endi +if $data50 != @56756711@ then + return -1 +endi +if $data60 != @67867811@ then + return -1 +endi +print execute sql select concat(c8,c14),c14,c8 from tb1; +sql select concat(c8,c14),c14,c8 from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data42 != @456@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @27@ then + return -1 +endi +if $data52 != @567@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != $emptyString then + return -1 +endi +if $data62 != @678@ then + return -1 +endi +print execute sql select concat(c8,c14),t1,ts,tbname,_C0,_c0 from tb1; +sql select concat(c8,c14),t1,ts,tbname,_C0,_c0 from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data03 != @tb1@ then + return -1 +endi +if $data04 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data05 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data12 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data13 != @tb1@ then + return -1 +endi +if $data14 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data15 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data22 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data23 != @tb1@ then + return -1 +endi +if $data24 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data25 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data32 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data33 != @tb1@ then + return -1 +endi +if $data34 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data35 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data42 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data43 != @tb1@ then + return -1 +endi +if $data44 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data45 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data52 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data53 != @tb1@ then + return -1 +endi +if $data54 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data55 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data62 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data63 != @tb1@ then + return -1 +endi +if $data64 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data65 != @21-11-11 09:00:06.000@ then + return -1 +endi +print execute sql select concat(c8,c14),floor(c7) from tb1; +sql select concat(c8,c14),floor(c7) from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @3.000000000@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @4.000000000@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @-179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +print execute sql select concat(c8,c14),concat(c8,concat(c8,c14)) from tb1; +sql select concat(c8,c14),concat(c8,concat(c8,c14)) from tb1; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1231231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @2342342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @4564564@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @56756727@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @678678@ then + return -1 +endi +print execute sql select concat(c8,concat(c8,c14)) from tb1 where c8 is not null and c9 is not null; +sql select concat(c8,concat(c8,c14)) from tb1 where c8 is not null and c9 is not null; +if $data00 != @1231231@ then + return -1 +endi +if $data10 != @4564564@ then + return -1 +endi +if $data20 != @56756727@ then + return -1 +endi +if $data30 != @678678@ then + return -1 +endi +print execute sql select concat(c8,c8) from tb1 order by ts desc; +sql select concat(c8,c8) from tb1 order by ts desc; +if $data00 != @678678@ then + return -1 +endi +if $data10 != @567567@ then + return -1 +endi +if $data20 != @456456@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @234234@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @123123@ then + return -1 +endi +print execute sql select concat(c8,concat(c8,c14)) from tb1 order by ts desc; +sql select concat(c8,concat(c8,c14)) from tb1 order by ts desc; +if $data00 != @678678@ then + return -1 +endi +if $data10 != @56756727@ then + return -1 +endi +if $data20 != @4564564@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @2342342@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1231231@ then + return -1 +endi +print execute sql select concat(c8,concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +sql select concat(c8,concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @4564564@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342342@ then + return -1 +endi +print execute sql select concat(c8,c8) from stb1; +sql select concat(c8,c8) from stb1; +if $data00 != @123123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456456@ then + return -1 +endi +if $data50 != @567567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +if $data70 != @111111@ then + return -1 +endi +if $data80 != @222222@ then + return -1 +endi +if $data90 != @333333@ then + return -1 +endi +print execute sql select concat(c8,c8) from stb1 order by ts desc; +sql select concat(c8,c8) from stb1 order by ts desc; +if $data00 != @777777@ then + if $data00 != @678678@ then + return -1 + endi +endi +if $data20 != @666666@ then + if $data20 != @567567@ then + return -1 + endi +endi +if $data40 != @555555@ then + if $data40 != @456456@ then + return -1 + endi +endi +if $data60 != @444444@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @333333@ then + if $data80 != @234234@ then + return -1 + endi +endi +print execute sql select concat(c8,c14),t1 from stb1 order by ts desc; +sql select concat(c8,c14),t1 from stb1 order by ts desc; +if $data00 != @7777@ then + if $data00 != @678@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @6666@ then + if $data20 != @56727@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @5555@ then + if $data40 != @4564@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @4444@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @3333@ then + if $data80 != @2342@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select concat(c8,c14),tbname from stb1; +sql select concat(c8,c14),tbname from stb1; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @1111@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @2222@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @3333@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select concat(c8,c14),tbname from stb1 where t1 > 1; +sql select concat(c8,c14),tbname from stb1 where t1 > 1; +if $data00 != @1111@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @2222@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @3333@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @4444@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @5555@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @6666@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @7777@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select concat(c8,c8),concat(c8,c14) from tbn; +sql select concat(c8,c8),concat(c8,c14) from tbn; +if $data00 != @111111@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != @222222@ then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @333333@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != @444444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @555555@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @666666@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @777777@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat(c8,c8),concat(c8,c14) from tbn order by ts desc; +sql select concat(c8,c8),concat(c8,c14) from tbn order by ts desc; +if $data00 != @777777@ then + return -1 +endi +if $data01 != @7777@ then + return -1 +endi +if $data10 != @666666@ then + return -1 +endi +if $data11 != @6666@ then + return -1 +endi +if $data20 != @555555@ then + return -1 +endi +if $data21 != @5555@ then + return -1 +endi +if $data30 != @444444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @333333@ then + return -1 +endi +if $data41 != @3333@ then + return -1 +endi +if $data50 != @222222@ then + return -1 +endi +if $data51 != @2222@ then + return -1 +endi +if $data60 != @111111@ then + return -1 +endi +if $data61 != @1111@ then + return -1 +endi +print execute sql select concat(c8,concat(c8,c8)) from tbn; +sql select concat(c8,concat(c8,c8)) from tbn; +if $data00 != @111111111@ then + return -1 +endi +if $data10 != @222222222@ then + return -1 +endi +if $data20 != @333333333@ then + return -1 +endi +if $data30 != @444444444@ then + return -1 +endi +if $data40 != @555555555@ then + return -1 +endi +if $data50 != @666666666@ then + return -1 +endi +if $data60 != @777777777@ then + return -1 +endi +print execute sql select concat(cast(a as binary(10)),cast(a as binary(10))) from (select avg(c6) as a from stb1 interval(1s)); +sql select concat(cast(a as binary(10)),cast(a as binary(10))) from (select avg(c6) as a from stb1 interval(1s)); +if $data00 != @1.0000001.000000@ then + return -1 +endi +if $data10 != @2.0000002.000000@ then + return -1 +endi +if $data20 != @2.5000002.500000@ then + return -1 +endi +if $data30 != @4.0000004.000000@ then + return -1 +endi +if $data40 != @4.5000004.500000@ then + return -1 +endi +if $data50 != @17014117331701411733@ then + return -1 +endi +if $data60 != @-170141173-170141173@ then + return -1 +endi +print execute sql select concat(c8,c8) from (select * from stb1); +sql select concat(c8,c8) from (select * from stb1); +if $data00 != @123123@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @234234@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @456456@ then + return -1 +endi +if $data50 != @567567@ then + return -1 +endi +if $data60 != @678678@ then + return -1 +endi +if $data70 != @111111@ then + return -1 +endi +if $data80 != @222222@ then + return -1 +endi +if $data90 != @333333@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.001.00@ then + return -1 +endi +if $data10 != @2.002.00@ then + return -1 +endi +if $data20 != @2.502.50@ then + return -1 +endi +if $data30 != @4.004.00@ then + return -1 +endi +if $data40 != @4.504.50@ then + return -1 +endi +if $data50 != @17011701@ then + return -1 +endi +if $data60 != @-170-170@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.001.00@ then + return -1 +endi +if $data10 != @2.002.00@ then + return -1 +endi +if $data20 != @2.502.50@ then + return -1 +endi +if $data30 != @4.004.00@ then + return -1 +endi +if $data40 != @4.504.50@ then + return -1 +endi +if $data50 != @17011701@ then + return -1 +endi +if $data60 != @-170-170@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @-170-170@ then + return -1 +endi +if $data40 != @17011701@ then + return -1 +endi +if $data50 != @4.504.50@ then + return -1 +endi +if $data60 != @4.004.00@ then + return -1 +endi +if $data70 != @2.502.50@ then + return -1 +endi +if $data80 != @2.002.00@ then + return -1 +endi +if $data90 != @1.001.00@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @17011701@ then + return -1 +endi +if $data40 != @4.504.50@ then + return -1 +endi +if $data50 != @4.004.00@ then + return -1 +endi +if $data60 != @2.502.50@ then + return -1 +endi +if $data70 != @2.002.00@ then + return -1 +endi +if $data80 != @1.001.00@ then + return -1 +endi +if $data90 != @-170-170@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @-170-170@ then + return -1 +endi +if $data10 != @1.001.00@ then + return -1 +endi +if $data20 != @2.002.00@ then + return -1 +endi +if $data30 != @2.502.50@ then + return -1 +endi +if $data40 != @4.004.00@ then + return -1 +endi +if $data50 != @4.504.50@ then + return -1 +endi +if $data60 != @17011701@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select concat(c8,c8) as a from tb1); +sql select concat(cast(a as binary(4)),cast(a as binary(4))) from (select concat(c8,c8) as a from tb1); +if $data00 != @12311231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @23422342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @45644564@ then + return -1 +endi +if $data50 != @56755675@ then + return -1 +endi +if $data60 != @67866786@ then + return -1 +endi +print execute sql select concat(tb1.c8,tb1.c14),concat(tb2.c8,tb2.c14) from tb1,tb2 where tb1.ts=tb2.ts; +sql select concat(tb1.c8,tb1.c14),concat(tb2.c8,tb2.c14) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat(c8,c14) from tb1 union all select concat(c8,c14) from tb2; +sql select concat(c8,c14) from tb1 union all select concat(c8,c14) from tb2; +if $data00 != @1231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data70 != @1111@ then + return -1 +endi +if $data80 != @2222@ then + return -1 +endi +if $data90 != @3333@ then + return -1 +endi +print execute sql select concat(c8, cast(a as binary(10))),c8, a from (select c8,c2 as a from stb1) +sql select concat(c8, cast(a as binary(10))),c8, a from (select c8,c2 as a from stb1) +if $data00 != @1231@ then + return -1 +endi +if $data01 != @123@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @234@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @456@ then + return -1 +endi +if $data42 != @4@ then + return -1 +endi +if $data50 != @567127@ then + return -1 +endi +if $data51 != @567@ then + return -1 +endi +if $data52 != @127@ then + return -1 +endi +if $data60 != @678-127@ then + return -1 +endi +if $data61 != @678@ then + return -1 +endi +if $data62 != @-127@ then + return -1 +endi +if $data70 != @1111@ then + return -1 +endi +if $data71 != @111@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data80 != @2222@ then + return -1 +endi +if $data81 != @222@ then + return -1 +endi +if $data82 != @2@ then + return -1 +endi +if $data90 != @3333@ then + return -1 +endi +if $data91 != @333@ then + return -1 +endi +if $data92 != @3@ then + return -1 +endi +print execute sql select concat(c9,concat(c9,c15)) from tb1; +sql select concat(c9,concat(c9,c15)) from tb1; +if $data00 != @123412341@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @345634563@ then + return -1 +endi +if $data40 != @456745674@ then + return -1 +endi +if $data50 != @56785678278@ then + return -1 +endi +if $data60 != @67896789@ then + return -1 +endi +print execute sql select concat(c9,concat(c15,c9)) from tb1; +sql select concat(c9,concat(c15,c9)) from tb1; +if $data00 != @123411234@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @345633456@ then + return -1 +endi +if $data40 != @456744567@ then + return -1 +endi +if $data50 != @56782785678@ then + return -1 +endi +if $data60 != @67896789@ then + return -1 +endi +print execute sql select concat(c9,concat(concat(c15,c9), c9, '6')) from tb1; +sql select concat(c9,concat(concat(c15,c9), c9, '6')) from tb1; +if $data00 != @12341123412346@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @34563345634566@ then + return -1 +endi +if $data40 != @45674456745676@ then + return -1 +endi +if $data50 != @5678278567856786@ then + return -1 +endi +if $data60 != @6789678967896@ then + return -1 +endi diff --git a/tests/script/general/compute/str_concat2.sim b/tests/script/general/compute/str_concat2.sim new file mode 100644 index 0000000000000000000000000000000000000000..4f4289719be7154e58e6e9ebffdb1189774469af --- /dev/null +++ b/tests/script/general/compute/str_concat2.sim @@ -0,0 +1,211 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select concat(stb1.c8,stb1.c14),pow(stba.c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select concat(stb1.c8,stb1.c14),pow(stba.c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1231@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @4.000000000@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data21 != @27.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @256.000000000@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data41 != @3125.000000000@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data51 != @46656.000000000@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data61 != @823543.000000000@ then + return -1 +endi +print execute sql select concat(c8,c14) as a from stb1 union all select concat(c8,c14) as a from stba; +sql select concat(c8,c14) as a from stb1 union all select concat(c8,c14) as a from stba; +if $data00 != @1231@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @2342@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4564@ then + return -1 +endi +if $data50 != @56727@ then + return -1 +endi +if $data60 != @678@ then + return -1 +endi +if $data70 != @1111@ then + return -1 +endi +if $data80 != @2222@ then + return -1 +endi +if $data90 != @3333@ then + return -1 +endi +print execute sql select concat(c8,c14) from stba; +sql select concat(c8,c14) from stba; +if $data00 != @1111@ then + return -1 +endi +if $data10 != @2222@ then + return -1 +endi +if $data20 != @3333@ then + return -1 +endi +if $data30 != @4444@ then + return -1 +endi +if $data40 != @5555@ then + return -1 +endi +if $data50 != @6666@ then + return -1 +endi +if $data60 != @7777@ then + return -1 +endi +if $data70 != @8888@ then + return -1 +endi +if $data80 != @9999@ then + return -1 +endi +if $data90 != @0000@ then + return -1 +endi +print execute sql select concat('c8',cast(min(c2) as binary(10))) from tba1; +sql select concat('c8',cast(min(c2) as binary(10))) from tba1; +if $data00 != @c80@ then + return -1 +endi +print execute sql select concat('c8',cast(max(c2) as binary(10))) from tba1; +sql select concat('c8',cast(max(c2) as binary(10))) from tba1; +if $data00 != @c89@ then + return -1 +endi +print execute sql select concat('c8',cast(count(c2) as binary(10))) from tba1; +sql select concat('c8',cast(count(c2) as binary(10))) from tba1; +if $data00 != @c830@ then + return -1 +endi +print execute sql select concat('c8',cast(sum(c2) as binary(10))) from tba1; +sql select concat('c8',cast(sum(c2) as binary(10))) from tba1; +if $data00 != @c8135@ then + return -1 +endi +print execute sql select concat('c8',cast(avg(c2) as binary(10))) from tba1; +sql select concat('c8',cast(avg(c2) as binary(10))) from tba1; +if $data00 != @c84.500000@ then + return -1 +endi +print execute sql select concat('c8',cast(percentile(c2, 10) as binary(10))) from tba1; +sql select concat('c8',cast(percentile(c2, 10) as binary(10))) from tba1; +if $data00 != @c80.900000@ then + return -1 +endi +print execute sql select concat('c8',cast(apercentile(c2, 10) as binary(10))) from tba1; +sql select concat('c8',cast(apercentile(c2, 10) as binary(10))) from tba1; +if $data00 != @c80.000000@ then + return -1 +endi +print execute sql select concat('c8',cast(stddev(c2) as binary(10))) from tba1; +sql select concat('c8',cast(stddev(c2) as binary(10))) from tba1; +if $data00 != @c82.872281@ then + return -1 +endi +print execute sql select concat('c8',cast(spread(c2) as binary(10))) from tba1; +sql select concat('c8',cast(spread(c2) as binary(10))) from tba1; +if $data00 != @c89.000000@ then + return -1 +endi +print execute sql select concat('c8',cast(twa(c2) as binary(10))) from tba1; +sql select concat('c8',cast(twa(c2) as binary(10))) from tba1; +if $data00 != @c84.637931@ then + return -1 +endi +print execute sql select concat('c8',cast(elapsed(ts) as binary(10))) from tba1; +sql select concat('c8',cast(elapsed(ts) as binary(10))) from tba1; +if $data00 != @c829000.0000@ then + return -1 +endi +print execute sql select concat('c8',cast(rate(c2) as binary(10))) from tba1; +sql select concat('c8',cast(rate(c2) as binary(10))) from tba1; +if $data00 != @c80.896552@ then + return -1 +endi +print execute sql select concat('c8',cast(irate(c2) as binary(10))) from tba1; +sql select concat('c8',cast(irate(c2) as binary(10))) from tba1; +if $data00 != @c80.000000@ then + return -1 +endi +print execute sql select concat('c8',cast(first(c2) as binary(10))) from tba1; +sql select concat('c8',cast(first(c2) as binary(10))) from tba1; +if $data00 != @c81@ then + return -1 +endi +print execute sql select concat('c8',cast(last(c2) as binary(10))) from tba1; +sql select concat('c8',cast(last(c2) as binary(10))) from tba1; +if $data00 != @c80@ then + return -1 +endi +print execute sql select concat('c8',cast(last_row(c2) as binary(10))) from tba1; +sql select concat('c8',cast(last_row(c2) as binary(10))) from tba1; +if $data00 != @c80@ then + return -1 +endi +print execute sql select concat('c8',cast(top(c2, 1) as binary(10))) from tba1; +sql_error select concat('c8',cast(top(c2, 1) as binary(10))) from tba1; +print execute sql select concat('c8',cast(bottom(c2, 1) as binary(10))) from tba1; +sql_error select concat('c8',cast(bottom(c2, 1) as binary(10))) from tba1; +print execute sql select concat('c8',cast(leastsquares(c2, 1, 1) as binary(10))) from tba1; +sql_error select concat('c8',cast(leastsquares(c2, 1, 1) as binary(10))) from tba1; +print execute sql select concat('c8',cast(derivative(c2, 1s, 0) as binary(10))) from tba1; +sql_error select concat('c8',cast(derivative(c2, 1s, 0) as binary(10))) from tba1; +print execute sql select concat('c8',cast(diff(c2) as binary(10))) from tba1; +sql_error select concat('c8',cast(diff(c2) as binary(10))) from tba1; +print execute sql select concat('c8',cast(csum(c2) as binary(10))) from tba1; +sql_error select concat('c8',cast(csum(c2) as binary(10))) from tba1; +print execute sql select concat('c8',cast(mavg(c2,2) as binary(10))) from tba1; +sql_error select concat('c8',cast(mavg(c2,2) as binary(10))) from tba1; +print execute sql select concat('c8',cast(sample(c2,2) as binary(10))) from tba1; +sql_error select concat('c8',cast(sample(c2,2) as binary(10))) from tba1; +print execute sql select concat('c8',cast(_block_dist() as binary(10))) from tba1; +sql_error select concat('c8',cast(_block_dist() as binary(10))) from tba1; diff --git a/tests/script/general/compute/str_concat_ws.sim b/tests/script/general/compute/str_concat_ws.sim new file mode 100644 index 0000000000000000000000000000000000000000..c8e55e7c03566d2f4e1d19b1bcd417f2e0b25373 --- /dev/null +++ b/tests/script/general/compute/str_concat_ws.sim @@ -0,0 +1,1592 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select concat_ws('sepa',c8,*) from tb1; +sql_error select concat_ws('sepa',c8,*) from tb1; +print execute sql select concat_ws('sepa',c8,*) from tb1; +sql_error select concat_ws('sepa',c8,*) from tb1; +print execute sql select concat_ws('sepa',c8,*) from tb1; +sql_error select concat_ws('sepa',c8,*) from tb1; +print execute sql select concat_ws('sepa',c8,*) from tb1; +sql_error select concat_ws('sepa',c8,*) from tb1; +print execute sql select concat_ws('sepa',c8,*) as a from tb1; +sql_error select concat_ws('sepa',c8,*) as a from tb1; +print execute sql select concat_ws('sepa',concat(c9,c15), '1') as a from tb1; +sql select concat_ws('sepa',concat(c9,c15), '1') as a from tb1; +if $data00 != @12341sepa1@ then + return -1 +endi +if $data10 != @1@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @34563sepa1@ then + return -1 +endi +if $data40 != @45674sepa1@ then + return -1 +endi +if $data50 != @5678278sepa1@ then + return -1 +endi +if $data60 != @6789sepa1@ then + return -1 +endi +print execute sql select concat_ws('sepa',concat(c8,tb1.*), '1') as a from tb1; +sql_error select concat_ws('sepa',concat(c8,tb1.*), '1') as a from tb1; +print execute sql select concat_ws('sepa',c8,*) from tb1; +sql_error select concat_ws('sepa',c8,*) from tb1; +print execute sql select concat_ws('sepa',c8,c14) from tb1; +sql select concat_ws('sepa',c8,c14) from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c15) from tb1; +sql_error select concat_ws('sepa',c8,c15) from tb1; +print execute sql select concat_ws('sepa',c8,concat(c14,c8)) from tb1; +sql select concat_ws('sepa',c8,concat(c14,c8)) from tb1; +if $data00 != @123sepa1123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4456@ then + return -1 +endi +if $data50 != @567sepa27567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,13) from tb1; +sql_error select concat_ws('sepa',c8,13) from tb1; +print execute sql select concat_ws('sepa',c8,c14) from tb1; +sql select concat_ws('sepa',c8,c14) from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8) from tb1; +sql select concat_ws('sepa',c8,c8) from tb1; +if $data00 != @123sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa456@ then + return -1 +endi +if $data50 != @567sepa567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c9) from tb1; +sql_error select concat_ws('sepa',c8,c9) from tb1; +print execute sql select concat_ws('sepa',c8,c4) from tb1; +sql_error select concat_ws('sepa',c8,c4) from tb1; +print execute sql select concat_ws('sepa',c8,c5) from tb1; +sql_error select concat_ws('sepa',c8,c5) from tb1; +print execute sql select concat_ws('sepa',c8,c6) from tb1; +sql_error select concat_ws('sepa',c8,c6) from tb1; +print execute sql select concat_ws('sepa',c8,c7) from tb1; +sql_error select concat_ws('sepa',c8,c7) from tb1; +print execute sql select concat_ws('sepa',c8,c8) from tb1; +sql select concat_ws('sepa',c8,c8) from tb1; +if $data00 != @123sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa456@ then + return -1 +endi +if $data50 != @567sepa567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c9,c8) from tb1; +sql_error select concat_ws('sepa',c9,c8) from tb1; +print execute sql select concat_ws('sepa',c8,c10) from tb1; +sql_error select concat_ws('sepa',c8,c10) from tb1; +print execute sql select concat_ws('sepa',c8,c11) from tb1; +sql_error select concat_ws('sepa',c8,c11) from tb1; +print execute sql select concat_ws('sepa',c8,c12) from tb1; +sql_error select concat_ws('sepa',c8,c12) from tb1; +print execute sql select concat_ws('sepa',c8,c13) from tb1; +sql_error select concat_ws('sepa',c8,c13) from tb1; +print execute sql select concat_ws('sepa',c8,12345678900000000000000000) from tb1; +sql_error select concat_ws('sepa',c8,12345678900000000000000000) from tb1; +print execute sql select distinct concat_ws('sepa',c8,'123') from tb1; +sql_error select distinct concat_ws('sepa',c8,'123') from tb1; +print execute sql select concat_ws('sepa',c8,t1) from stb1; +sql_error select concat_ws('sepa',c8,t1) from stb1; +print execute sql select concat_ws('sepa',c8,c14),avg(c2) from tb1; +sql_error select concat_ws('sepa',c8,c14),avg(c2) from tb1; +print execute sql select concat_ws('sepa',c8,c14),top(c3,1) from tb1; +sql_error select concat_ws('sepa',c8,c14),top(c3,1) from tb1; +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 session(ts, 1s); +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from tb1 session(ts, 1s); +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 STATE_WINDOW(c1); +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from tb1 STATE_WINDOW(c1); +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by t1; +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by t1; +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by ts; +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by ts; +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by c1; +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by c1; +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by tbname; +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from stb1 group by tbname; +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by c8; +sql_error select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by c8; +print execute sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn; +sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn; +if $data00 != @111sepa111@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != @222sepa222@ then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @333sepa333@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != @444sepa444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @555sepa555@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @666sepa666@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @777sepa777@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c2) as a from stb1 interval(1s)); +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @1.00sepa1.00@ then + return -1 +endi +if $data10 != @2.00sepa2.00@ then + return -1 +endi +if $data20 != @2.50sepa2.50@ then + return -1 +endi +if $data30 != @4.00sepa4.00@ then + return -1 +endi +if $data40 != @4.50sepa4.50@ then + return -1 +endi +if $data50 != @66.5sepa66.5@ then + return -1 +endi +if $data60 != @-60.sepa-60.@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,a) from (select c8, concat_ws('sepa',c8,c8) as a from tb1); +sql select concat_ws('sepa',c8,a) from (select c8, concat_ws('sepa',c8,c8) as a from tb1); +if $data00 != @123sepa123sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa234sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa456sepa456@ then + return -1 +endi +if $data50 != @567sepa567sepa567@ then + return -1 +endi +if $data60 != @678sepa678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,"abc") from tb1; +sql select concat_ws('sepa',c8,"abc") from tb1; +if $data00 != @123sepaabc@ then + return -1 +endi +if $data10 != @abc@ then + return -1 +endi +if $data20 != @234sepaabc@ then + return -1 +endi +if $data30 != @abc@ then + return -1 +endi +if $data40 != @456sepaabc@ then + return -1 +endi +if $data50 != @567sepaabc@ then + return -1 +endi +if $data60 != @678sepaabc@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1; +sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1; +if $data00 != @123sepa1231@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2342@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4564@ then + return -1 +endi +if $data50 != @567sepa56727@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c14,c8)) from tb1; +sql select concat_ws('sepa',c8,concat(c14,c8)) from tb1; +if $data00 != @123sepa1123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4456@ then + return -1 +endi +if $data50 != @567sepa27567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(concat(c14,c8), c8, '6') from tb1; +sql_error select concat_ws('sepa',c8,concat(concat(c14,c8), c8, '6') from tb1; +print execute sql select concat_ws('sepa',concat(c8,'11'), c8) from tb1; +sql select concat_ws('sepa',concat(c8,'11'), c8) from tb1; +if $data00 != @12311sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @23411sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @45611sepa456@ then + return -1 +endi +if $data50 != @56711sepa567@ then + return -1 +endi +if $data60 != @67811sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',concat(c8,c14), c8) from tb1; +sql select concat_ws('sepa',concat(c8,c14), c8) from tb1; +if $data00 != @1231sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @2342sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @4564sepa456@ then + return -1 +endi +if $data50 != @56727sepa567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',concat(c8,c8), '11') from tb1; +sql select concat_ws('sepa',concat(c8,c8), '11') from tb1; +if $data00 != @123123sepa11@ then + return -1 +endi +if $data10 != @11@ then + return -1 +endi +if $data20 != @234234sepa11@ then + return -1 +endi +if $data30 != @11@ then + return -1 +endi +if $data40 != @456456sepa11@ then + return -1 +endi +if $data50 != @567567sepa11@ then + return -1 +endi +if $data60 != @678678sepa11@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14),c14,c8 from tb1; +sql select concat_ws('sepa',c8,c14),c14,c8 from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data42 != @456@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @27@ then + return -1 +endi +if $data52 != @567@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != $emptyString then + return -1 +endi +if $data62 != @678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14),t1,ts,tbname,_C0,_c0 from tb1; +sql select concat_ws('sepa',c8,c14),t1,ts,tbname,_C0,_c0 from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data03 != @tb1@ then + return -1 +endi +if $data04 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data05 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data12 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data13 != @tb1@ then + return -1 +endi +if $data14 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data15 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data22 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data23 != @tb1@ then + return -1 +endi +if $data24 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data25 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data32 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data33 != @tb1@ then + return -1 +endi +if $data34 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data35 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data42 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data43 != @tb1@ then + return -1 +endi +if $data44 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data45 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data52 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data53 != @tb1@ then + return -1 +endi +if $data54 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data55 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data62 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data63 != @tb1@ then + return -1 +endi +if $data64 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data65 != @21-11-11 09:00:06.000@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14),floor(c7) from tb1; +sql select concat_ws('sepa',c8,c14),floor(c7) from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @3.000000000@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @4.000000000@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @-179769000000000006323030492138942643493033036433685336215410983289126434148906289940615299632196609445533816320312774433484859900046491141051651091672734470972759941382582304802812882753059262973637182942535982636884444611376868582636745405553206881859340916340092953230149901406738427651121855107737424232448.000000000@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14),concat(c8,concat(c8,c14)) from tb1; +sql select concat_ws('sepa',c8,c14),concat(c8,concat(c8,c14)) from tb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1231231@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @2342342@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @4564564@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @56756727@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @678678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 where c8 is not null and c9 is not null; +sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 where c8 is not null and c9 is not null; +if $data00 != @123sepa1231@ then + return -1 +endi +if $data10 != @456sepa4564@ then + return -1 +endi +if $data20 != @567sepa56727@ then + return -1 +endi +if $data30 != @678sepa678@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8) from tb1 order by ts desc; +sql select concat_ws('sepa',c8,c8) from tb1 order by ts desc; +if $data00 != @678sepa678@ then + return -1 +endi +if $data10 != @567sepa567@ then + return -1 +endi +if $data20 != @456sepa456@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @234sepa234@ then + return -1 +endi +if $data50 != $emptyString then + return -1 +endi +if $data60 != @123sepa123@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by ts desc; +sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by ts desc; +if $data00 != @678sepa678@ then + return -1 +endi +if $data10 != @567sepa56727@ then + return -1 +endi +if $data20 != @456sepa4564@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @234sepa2342@ then + return -1 +endi +if $data50 != $emptyString then + return -1 +endi +if $data60 != @123sepa1231@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +sql select concat_ws('sepa',c8,concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @456sepa4564@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2342@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8) from stb1; +sql select concat_ws('sepa',c8,c8) from stb1; +if $data00 != @123sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa456@ then + return -1 +endi +if $data50 != @567sepa567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +if $data70 != @111sepa111@ then + return -1 +endi +if $data80 != @222sepa222@ then + return -1 +endi +if $data90 != @333sepa333@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8) from stb1 order by ts desc; +sql select concat_ws('sepa',c8,c8) from stb1 order by ts desc; +if $data00 != @777sepa777@ then + if $data00 != @678sepa678@ then + return -1 + endi +endi +if $data20 != @666sepa666@ then + if $data20 != @567sepa567@ then + return -1 + endi +endi +if $data40 != @555sepa555@ then + if $data40 != @456sepa456@ then + return -1 + endi +endi +if $data60 != @444sepa444@ then + if $data60 != $emptyString then + return -1 + endi +endi +if $data80 != @333sepa333@ then + if $data80 != @234sepa234@ then + return -1 + endi +endi +print execute sql select concat_ws('sepa',c8,c14),t1 from stb1 order by ts desc; +sql select concat_ws('sepa',c8,c14),t1 from stb1 order by ts desc; +if $data00 != @777sepa7@ then + if $data00 != @678sepa@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @666sepa6@ then + if $data20 != @567sepa27@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @555sepa5@ then + if $data40 != @456sepa4@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @444sepa4@ then + if $data60 != $emptyString then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @333sepa3@ then + if $data80 != @234sepa2@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select concat_ws('sepa',c8,c14),tbname from stb1; +sql select concat_ws('sepa',c8,c14),tbname from stb1; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @111sepa1@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @222sepa2@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @333sepa3@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14),tbname from stb1 where t1 > 1; +sql select concat_ws('sepa',c8,c14),tbname from stb1 where t1 > 1; +if $data00 != @111sepa1@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @222sepa2@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @333sepa3@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @444sepa4@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @555sepa5@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @666sepa6@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @777sepa7@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn; +sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn; +if $data00 != @111sepa111@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != @222sepa222@ then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @333sepa333@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != @444sepa444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @555sepa555@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @666sepa666@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @777sepa777@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn order by ts desc; +sql select concat_ws('sepa',c8,c8),concat(c8,c14) from tbn order by ts desc; +if $data00 != @777sepa777@ then + return -1 +endi +if $data01 != @7777@ then + return -1 +endi +if $data10 != @666sepa666@ then + return -1 +endi +if $data11 != @6666@ then + return -1 +endi +if $data20 != @555sepa555@ then + return -1 +endi +if $data21 != @5555@ then + return -1 +endi +if $data30 != @444sepa444@ then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @333sepa333@ then + return -1 +endi +if $data41 != @3333@ then + return -1 +endi +if $data50 != @222sepa222@ then + return -1 +endi +if $data51 != @2222@ then + return -1 +endi +if $data60 != @111sepa111@ then + return -1 +endi +if $data61 != @1111@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,concat(c8,c8)) from tbn; +sql select concat_ws('sepa',c8,concat(c8,c8)) from tbn; +if $data00 != @111sepa111111@ then + return -1 +endi +if $data10 != @222sepa222222@ then + return -1 +endi +if $data20 != @333sepa333333@ then + return -1 +endi +if $data30 != @444sepa444444@ then + return -1 +endi +if $data40 != @555sepa555555@ then + return -1 +endi +if $data50 != @666sepa666666@ then + return -1 +endi +if $data60 != @777sepa777777@ then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(10)),cast(a as binary(10))) from (select avg(c6) as a from stb1 interval(1s)); +sql select concat_ws('sepa',cast(a as binary(10)),cast(a as binary(10))) from (select avg(c6) as a from stb1 interval(1s)); +if $data00 != @1.000000sepa1.000000@ then + return -1 +endi +if $data10 != @2.000000sepa2.000000@ then + return -1 +endi +if $data20 != @2.500000sepa2.500000@ then + return -1 +endi +if $data30 != @4.000000sepa4.000000@ then + return -1 +endi +if $data40 != @4.500000sepa4.500000@ then + return -1 +endi +if $data50 != @1701411733sepa1701411733@ then + return -1 +endi +if $data60 != @-170141173sepa-170141173@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c8) from (select * from stb1); +sql select concat_ws('sepa',c8,c8) from (select * from stb1); +if $data00 != @123sepa123@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa234@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa456@ then + return -1 +endi +if $data50 != @567sepa567@ then + return -1 +endi +if $data60 != @678sepa678@ then + return -1 +endi +if $data70 != @111sepa111@ then + return -1 +endi +if $data80 != @222sepa222@ then + return -1 +endi +if $data90 != @333sepa333@ then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @1.00sepa1.00@ then + return -1 +endi +if $data10 != @2.00sepa2.00@ then + return -1 +endi +if $data20 != @2.50sepa2.50@ then + return -1 +endi +if $data30 != @4.00sepa4.00@ then + return -1 +endi +if $data40 != @4.50sepa4.50@ then + return -1 +endi +if $data50 != @1701sepa1701@ then + return -1 +endi +if $data60 != @-170sepa-170@ then + return -1 +endi +if $data70 != $emptyString then + return -1 +endi +if $data80 != $emptyString then + return -1 +endi +if $data90 != $emptyString then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @1.00sepa1.00@ then + return -1 +endi +if $data10 != @2.00sepa2.00@ then + return -1 +endi +if $data20 != @2.50sepa2.50@ then + return -1 +endi +if $data30 != @4.00sepa4.00@ then + return -1 +endi +if $data40 != @4.50sepa4.50@ then + return -1 +endi +if $data50 != @1701sepa1701@ then + return -1 +endi +if $data60 != @-170sepa-170@ then + return -1 +endi +if $data70 != $emptyString then + return -1 +endi +if $data80 != $emptyString then + return -1 +endi +if $data90 != $emptyString then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != $emptyString then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != $emptyString then + return -1 +endi +if $data30 != @-170sepa-170@ then + return -1 +endi +if $data40 != @1701sepa1701@ then + return -1 +endi +if $data50 != @4.50sepa4.50@ then + return -1 +endi +if $data60 != @4.00sepa4.00@ then + return -1 +endi +if $data70 != @2.50sepa2.50@ then + return -1 +endi +if $data80 != @2.00sepa2.00@ then + return -1 +endi +if $data90 != @1.00sepa1.00@ then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != $emptyString then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != $emptyString then + return -1 +endi +if $data30 != @1701sepa1701@ then + return -1 +endi +if $data40 != @4.50sepa4.50@ then + return -1 +endi +if $data50 != @4.00sepa4.00@ then + return -1 +endi +if $data60 != @2.50sepa2.50@ then + return -1 +endi +if $data70 != @2.00sepa2.00@ then + return -1 +endi +if $data80 != @1.00sepa1.00@ then + return -1 +endi +if $data90 != @-170sepa-170@ then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select avg(c6) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @-170sepa-170@ then + return -1 +endi +if $data10 != @1.00sepa1.00@ then + return -1 +endi +if $data20 != @2.00sepa2.00@ then + return -1 +endi +if $data30 != @2.50sepa2.50@ then + return -1 +endi +if $data40 != @4.00sepa4.00@ then + return -1 +endi +if $data50 != @4.50sepa4.50@ then + return -1 +endi +if $data60 != @1701sepa1701@ then + return -1 +endi +if $data70 != $emptyString then + return -1 +endi +if $data80 != $emptyString then + return -1 +endi +if $data90 != $emptyString then + return -1 +endi +print execute sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select concat_ws('sepa',c8,c8) as a from tb1); +sql select concat_ws('sepa',cast(a as binary(4)),cast(a as binary(4))) from (select concat_ws('sepa',c8,c8) as a from tb1); +if $data00 != @123ssepa123s@ then + return -1 +endi +if $data10 != @sepa@ then + return -1 +endi +if $data20 != @234ssepa234s@ then + return -1 +endi +if $data30 != @sepa@ then + return -1 +endi +if $data40 != @456ssepa456s@ then + return -1 +endi +if $data50 != @567ssepa567s@ then + return -1 +endi +if $data60 != @678ssepa678s@ then + return -1 +endi +print execute sql select concat_ws('sepa',tb1.c8,tb1.c14),concat(tb2.c8,tb2.c14) from tb1,tb2 where tb1.ts=tb2.ts; +sql select concat_ws('sepa',tb1.c8,tb1.c14),concat(tb2.c8,tb2.c14) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1111@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @2222@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @3333@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @4444@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @5555@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @6666@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @7777@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8,c14) from tb1 union all select concat_ws('sepa',c8,c14) from tb2; +sql select concat_ws('sepa',c8,c14) from tb1 union all select concat_ws('sepa',c8,c14) from tb2; +if $data00 != @123sepa1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data70 != @111sepa1@ then + return -1 +endi +if $data80 != @222sepa2@ then + return -1 +endi +if $data90 != @333sepa3@ then + return -1 +endi +print execute sql select concat_ws('sepa',c8, cast(a as binary(10))),c8, a from (select c8,c2 as a from stb1) +sql select concat_ws('sepa',c8, cast(a as binary(10))),c8, a from (select c8,c2 as a from stb1) +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @123@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @234@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @456@ then + return -1 +endi +if $data42 != @4@ then + return -1 +endi +if $data50 != @567sepa127@ then + return -1 +endi +if $data51 != @567@ then + return -1 +endi +if $data52 != @127@ then + return -1 +endi +if $data60 != @678sepa-127@ then + return -1 +endi +if $data61 != @678@ then + return -1 +endi +if $data62 != @-127@ then + return -1 +endi +if $data70 != @111sepa1@ then + return -1 +endi +if $data71 != @111@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data80 != @222sepa2@ then + return -1 +endi +if $data81 != @222@ then + return -1 +endi +if $data82 != @2@ then + return -1 +endi +if $data90 != @333sepa3@ then + return -1 +endi +if $data91 != @333@ then + return -1 +endi +if $data92 != @3@ then + return -1 +endi +print execute sql select concat_ws('sepa',c9,concat(c9,c15)) from tb1; +sql select concat_ws('sepa',c9,concat(c9,c15)) from tb1; +if $data00 != @1234sepa12341@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != $emptyString then + return -1 +endi +if $data30 != @3456sepa34563@ then + return -1 +endi +if $data40 != @4567sepa45674@ then + return -1 +endi +if $data50 != @5678sepa5678278@ then + return -1 +endi +if $data60 != @6789sepa6789@ then + return -1 +endi +print execute sql select concat_ws('sepa',c9,concat(c15,c9)) from tb1; +sql select concat_ws('sepa',c9,concat(c15,c9)) from tb1; +if $data00 != @1234sepa11234@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != $emptyString then + return -1 +endi +if $data30 != @3456sepa33456@ then + return -1 +endi +if $data40 != @4567sepa44567@ then + return -1 +endi +if $data50 != @5678sepa2785678@ then + return -1 +endi +if $data60 != @6789sepa6789@ then + return -1 +endi +print execute sql select concat_ws('sepa',c9,concat(concat(c15,c9), c9, '6')) from tb1; +sql select concat_ws('sepa',c9,concat(concat(c15,c9), c9, '6')) from tb1; +if $data00 != @1234sepa1123412346@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != $emptyString then + return -1 +endi +if $data30 != @3456sepa3345634566@ then + return -1 +endi +if $data40 != @4567sepa4456745676@ then + return -1 +endi +if $data50 != @5678sepa278567856786@ then + return -1 +endi +if $data60 != @6789sepa678967896@ then + return -1 +endi diff --git a/tests/script/general/compute/str_concat_ws2.sim b/tests/script/general/compute/str_concat_ws2.sim new file mode 100644 index 0000000000000000000000000000000000000000..72cfa4fda8bd108778c4771182490d790b061921 --- /dev/null +++ b/tests/script/general/compute/str_concat_ws2.sim @@ -0,0 +1,211 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select concat_ws('sepa',stb1.c8,stb1.c14),pow(stba.c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select concat_ws('sepa',stb1.c8,stb1.c14),pow(stba.c2,stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @123sepa1@ then + return -1 +endi +if $data01 != @1.000000000@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data11 != @4.000000000@ then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data21 != @27.000000000@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data31 != @256.000000000@ then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data41 != @3125.000000000@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data51 != @46656.000000000@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data61 != @823543.000000000@ then + return -1 +endi +print execute sql select concat_ws('sepa', c8,c14) as a from stb1 union all select concat(c8,c14) as a from stba; +sql select concat_ws('sepa', c8,c14) as a from stb1 union all select concat(c8,c14) as a from stba; +if $data00 != @123sepa1@ then + return -1 +endi +if $data10 != $emptyString then + return -1 +endi +if $data20 != @234sepa2@ then + return -1 +endi +if $data30 != $emptyString then + return -1 +endi +if $data40 != @456sepa4@ then + return -1 +endi +if $data50 != @567sepa27@ then + return -1 +endi +if $data60 != @678sepa@ then + return -1 +endi +if $data70 != @111sepa1@ then + return -1 +endi +if $data80 != @222sepa2@ then + return -1 +endi +if $data90 != @333sepa3@ then + return -1 +endi +print execute sql select concat_ws('sepa', c8,c14) from stba; +sql select concat_ws('sepa', c8,c14) from stba; +if $data00 != @111sepa1@ then + return -1 +endi +if $data10 != @222sepa2@ then + return -1 +endi +if $data20 != @333sepa3@ then + return -1 +endi +if $data30 != @444sepa4@ then + return -1 +endi +if $data40 != @555sepa5@ then + return -1 +endi +if $data50 != @666sepa6@ then + return -1 +endi +if $data60 != @777sepa7@ then + return -1 +endi +if $data70 != @888sepa8@ then + return -1 +endi +if $data80 != @999sepa9@ then + return -1 +endi +if $data90 != @000sepa0@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(min(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(min(c2) as binary(10))) from tba1; +if $data00 != @c8sepa0@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(max(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(max(c2) as binary(10))) from tba1; +if $data00 != @c8sepa9@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(count(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(count(c2) as binary(10))) from tba1; +if $data00 != @c8sepa30@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(sum(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(sum(c2) as binary(10))) from tba1; +if $data00 != @c8sepa135@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(avg(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(avg(c2) as binary(10))) from tba1; +if $data00 != @c8sepa4.500000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(percentile(c2, 10) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(percentile(c2, 10) as binary(10))) from tba1; +if $data00 != @c8sepa0.900000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(apercentile(c2, 10) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(apercentile(c2, 10) as binary(10))) from tba1; +if $data00 != @c8sepa0.000000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(stddev(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(stddev(c2) as binary(10))) from tba1; +if $data00 != @c8sepa2.872281@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(spread(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(spread(c2) as binary(10))) from tba1; +if $data00 != @c8sepa9.000000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(twa(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(twa(c2) as binary(10))) from tba1; +if $data00 != @c8sepa4.637931@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(elapsed(ts) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(elapsed(ts) as binary(10))) from tba1; +if $data00 != @c8sepa29000.0000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(rate(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(rate(c2) as binary(10))) from tba1; +if $data00 != @c8sepa0.896552@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(irate(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(irate(c2) as binary(10))) from tba1; +if $data00 != @c8sepa0.000000@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(first(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(first(c2) as binary(10))) from tba1; +if $data00 != @c8sepa1@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(last(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(last(c2) as binary(10))) from tba1; +if $data00 != @c8sepa0@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(last_row(c2) as binary(10))) from tba1; +sql select concat_ws('sepa','c8',cast(last_row(c2) as binary(10))) from tba1; +if $data00 != @c8sepa0@ then + return -1 +endi +print execute sql select concat_ws('sepa','c8',cast(top(c2, 1) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(top(c2, 1) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(bottom(c2, 1) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(bottom(c2, 1) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(leastsquares(c2, 1, 1) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(leastsquares(c2, 1, 1) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(derivative(c2, 1s, 0) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(derivative(c2, 1s, 0) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(diff(c2) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(diff(c2) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(csum(c2) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(csum(c2) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(mavg(c2,2) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(mavg(c2,2) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(sample(c2,2) as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(sample(c2,2) as binary(10))) from tba1; +print execute sql select concat_ws('sepa','c8',cast(_block_dist() as binary(10))) from tba1; +sql_error select concat_ws('sepa','c8',cast(_block_dist() as binary(10))) from tba1; diff --git a/tests/script/general/compute/str_length.sim b/tests/script/general/compute/str_length.sim new file mode 100644 index 0000000000000000000000000000000000000000..f53fd2bb61ee9f68dcebb9e1a7462c4e05ab4a22 --- /dev/null +++ b/tests/script/general/compute/str_length.sim @@ -0,0 +1,1315 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select length(*) from tb1; +sql_error select length(*) from tb1; +print execute sql select length(*) + 1 as a from tb1; +sql_error select length(*) + 1 as a from tb1; +print execute sql select length(tb1.*) + 1 as a from tb1; +sql_error select length(tb1.*) + 1 as a from tb1; +print execute sql select length(*) from tb1; +sql_error select length(*) from tb1; +print execute sql select length(c1) from tb1; +sql_error select length(c1) from tb1; +print execute sql select length(13) from tb1; +sql_error select length(13) from tb1; +print execute sql select length(c1) from tb1; +sql_error select length(c1) from tb1; +print execute sql select length(c2) from tb1; +sql_error select length(c2) from tb1; +print execute sql select length(c3) from tb1; +sql_error select length(c3) from tb1; +print execute sql select length(c4) from tb1; +sql_error select length(c4) from tb1; +print execute sql select length(c5) from tb1; +sql_error select length(c5) from tb1; +print execute sql select length(c6) from tb1; +sql_error select length(c6) from tb1; +print execute sql select length(c7) from tb1; +sql_error select length(c7) from tb1; +print execute sql select length(c8) from tb1; +sql select length(c8) from tb1; +if $data00 != @3@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length(c14) from tb1; +sql select length(c14) from tb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +print execute sql select length(c10) from tb1; +sql_error select length(c10) from tb1; +print execute sql select length(c11) from tb1; +sql_error select length(c11) from tb1; +print execute sql select length(c12) from tb1; +sql_error select length(c12) from tb1; +print execute sql select length(c13) from tb1; +sql_error select length(c13) from tb1; +print execute sql select length('12345678900000000000000000') from tb1; +sql select length('12345678900000000000000000') from tb1; +if $data00 != @26@ then + return -1 +endi +if $data10 != @26@ then + return -1 +endi +if $data20 != @26@ then + return -1 +endi +if $data30 != @26@ then + return -1 +endi +if $data40 != @26@ then + return -1 +endi +if $data50 != @26@ then + return -1 +endi +if $data60 != @26@ then + return -1 +endi +print execute sql select distinct length(123) from tb1; +sql_error select distinct length(123) from tb1; +print execute sql select length(t1) from stb1; +sql_error select length(t1) from stb1; +print execute sql select length(c8),avg(c3) from tb1; +sql_error select length(c8),avg(c3) from tb1; +print execute sql select length(c8),top(c3,1) from tb1; +sql_error select length(c8),top(c3,1) from tb1; +print execute sql select length(concat(c8,c14)) from tb1 session(ts, 1s); +sql_error select length(concat(c8,c14)) from tb1 session(ts, 1s); +print execute sql select length(concat(c8,c14)) from tb1 STATE_WINDOW(c1); +sql_error select length(concat(c8,c14)) from tb1 STATE_WINDOW(c1); +print execute sql select length(concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +sql_error select length(concat(c8,c14)) from tb1 interval(1s) sliding(1s) fill(NULL); +print execute sql select length(concat(c8,c14)) from stb1 group by t1; +sql_error select length(concat(c8,c14)) from stb1 group by t1; +print execute sql select length(concat(c8,c14)) from stb1 group by ts; +sql_error select length(concat(c8,c14)) from stb1 group by ts; +print execute sql select length(concat(c8,c14)) from stb1 group by c1; +sql_error select length(concat(c8,c14)) from stb1 group by c1; +print execute sql select length(concat(c8,c14)) from stb1 group by tbname; +sql_error select length(concat(c8,c14)) from stb1 group by tbname; +print execute sql select length(concat(c8,c14)) from tb1 order by c2; +sql_error select length(concat(c8,c14)) from tb1 order by c2; +print execute sql select length(c8),length(c14) from tbn; +sql select length(c8),length(c14) from tbn; +if $data00 != @3@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select length(ts) from (select avg(c2) as a from stb1 interval(1s)); +sql_error select length(ts) from (select avg(c2) as a from stb1 interval(1s)); +print execute sql select length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +sql select length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length("abc") from tb1; +sql select length("abc") from tb1; +if $data00 != @3@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length(concat(c8,c14)) from tb1; +sql select length(concat(c8,c14)) from tb1; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @5@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length((concat(c8,c14))) from tb1; +sql select length((concat(c8,c14))) from tb1; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @5@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length('11')+c2 from tb1; +sql select length('11')+c2 from tb1; +if $data00 != @3.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @6.000000000@ then + return -1 +endi +if $data50 != @129.000000000@ then + return -1 +endi +if $data60 != @-125.000000000@ then + return -1 +endi +print execute sql select length(c8)+c2 from tb1; +sql select length(c8)+c2 from tb1; +if $data00 != @4.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @5.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @7.000000000@ then + return -1 +endi +if $data50 != @130.000000000@ then + return -1 +endi +if $data60 != @-124.000000000@ then + return -1 +endi +print execute sql select length(c14)+11 from tb1; +sql select length(c14)+11 from tb1; +if $data00 != @12.000000000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @12.000000000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @12.000000000@ then + return -1 +endi +if $data50 != @13.000000000@ then + return -1 +endi +if $data60 != @11.000000000@ then + return -1 +endi +print execute sql select length(c8),c8,c2 from tb1; +sql select length(c8),c8,c2 from tb1; +if $data00 != @3@ then + return -1 +endi +if $data01 != @123@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data12 != @NULL@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @234@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data32 != @NULL@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @456@ then + return -1 +endi +if $data42 != @4@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @567@ then + return -1 +endi +if $data52 != @127@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @678@ then + return -1 +endi +if $data62 != @-127@ then + return -1 +endi +print execute sql select length(c8),t1,ts,tbname,_C0,_c0 from tb1; +sql select length(c8),t1,ts,tbname,_C0,_c0 from tb1; +if $data00 != @3@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data03 != @tb1@ then + return -1 +endi +if $data04 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data05 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data12 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data13 != @tb1@ then + return -1 +endi +if $data14 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data15 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data22 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data23 != @tb1@ then + return -1 +endi +if $data24 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data25 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data32 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data33 != @tb1@ then + return -1 +endi +if $data34 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data35 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data42 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data43 != @tb1@ then + return -1 +endi +if $data44 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data45 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data52 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data53 != @tb1@ then + return -1 +endi +if $data54 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data55 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data62 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data63 != @tb1@ then + return -1 +endi +if $data64 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data65 != @21-11-11 09:00:06.000@ then + return -1 +endi +print execute sql select length(c8),floor(c3) from tb1; +sql select length(c8),floor(c3) from tb1; +if $data00 != @3@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @NULL@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @3@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @32767@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @-32767@ then + return -1 +endi +print execute sql select length(c8),length(concat(c8,c14)) from tb1; +sql select length(c8),length(concat(c8,c14)) from tb1; +if $data00 != @3@ then + return -1 +endi +if $data01 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @NULL@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @4@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @NULL@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @5@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @3@ then + return -1 +endi +print execute sql select length(concat(c8,c14)) from tb1 where c9 is not null and c15 is not null; +sql select length(concat(c8,c14)) from tb1 where c9 is not null and c15 is not null; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != @5@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +print execute sql select length(c14) from tb1 order by ts desc; +sql select length(c14) from tb1 order by ts desc; +if $data00 != @0@ then + return -1 +endi +if $data10 != @2@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @1@ then + return -1 +endi +print execute sql select length(concat(c8,c14)) from tb1 order by ts desc; +sql select length(concat(c8,c14)) from tb1 order by ts desc; +if $data00 != @3@ then + return -1 +endi +if $data10 != @5@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @NULL@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +print execute sql select length(concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +sql select length(concat(c8,c14)) from tb1 order by ts desc limit 3 offset 2; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +print execute sql select length(c14) from stb1; +sql select length(c14) from stb1; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +print execute sql select length(c14) from stb1 order by ts desc; +sql select length(c14) from stb1 order by ts desc; +if $data00 != @1@ then + if $data00 != @0@ then + return -1 + endi +endi +if $data20 != @1@ then + if $data20 != @2@ then + return -1 + endi +endi +if $data40 != @1@ then + if $data40 != @1@ then + return -1 + endi +endi +if $data60 != @1@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data80 != @1@ then + if $data80 != @1@ then + return -1 + endi +endi +print execute sql select length(c14),t1 from stb1 order by ts desc; +sql select length(c14),t1 from stb1 order by ts desc; +if $data00 != @1@ then + if $data00 != @0@ then + return -1 + endi +endi +if $data01 != @2@ then + if $data01 != @1@ then + return -1 + endi +endi +if $data20 != @1@ then + if $data20 != @2@ then + return -1 + endi +endi +if $data21 != @2@ then + if $data21 != @1@ then + return -1 + endi +endi +if $data40 != @1@ then + if $data40 != @1@ then + return -1 + endi +endi +if $data41 != @2@ then + if $data41 != @1@ then + return -1 + endi +endi +if $data60 != @1@ then + if $data60 != @NULL@ then + return -1 + endi +endi +if $data61 != @2@ then + if $data61 != @1@ then + return -1 + endi +endi +if $data80 != @1@ then + if $data80 != @1@ then + return -1 + endi +endi +if $data81 != @2@ then + if $data81 != @1@ then + return -1 + endi +endi +print execute sql select length(c15),tbname from stb1; +sql select length(c15),tbname from stb1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @tb1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @tb1@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @tb1@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @tb1@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @tb1@ then + return -1 +endi +if $data50 != @12@ then + return -1 +endi +if $data51 != @tb1@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @tb1@ then + return -1 +endi +if $data70 != @4@ then + return -1 +endi +if $data71 != @tb2@ then + return -1 +endi +if $data80 != @4@ then + return -1 +endi +if $data81 != @tb2@ then + return -1 +endi +if $data90 != @4@ then + return -1 +endi +if $data91 != @tb2@ then + return -1 +endi +print execute sql select length(c15),tbname from stb1 where t1 > 1; +sql select length(c15),tbname from stb1 where t1 > 1; +if $data00 != @4@ then + return -1 +endi +if $data01 != @tb2@ then + return -1 +endi +if $data10 != @4@ then + return -1 +endi +if $data11 != @tb2@ then + return -1 +endi +if $data20 != @4@ then + return -1 +endi +if $data21 != @tb2@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @tb2@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @tb2@ then + return -1 +endi +if $data50 != @4@ then + return -1 +endi +if $data51 != @tb2@ then + return -1 +endi +if $data60 != @4@ then + return -1 +endi +if $data61 != @tb2@ then + return -1 +endi +print execute sql select length(c8),length(c14) from tbn; +sql select length(c8),length(c14) from tbn; +if $data00 != @3@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select length(c8),length(c14) from tbn order by ts desc; +sql select length(c8),length(c14) from tbn order by ts desc; +if $data00 != @3@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +print execute sql select length(length(c8)) from tbn; +sql_error select length(length(c8)) from tbn; +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 interval(1s)); +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 interval(1s)); +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +print execute sql select length(c14) from (select * from stb1); +sql select length(c14) from (select * from stb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)); +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts; +if $data00 != @8@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @9@ then + return -1 +endi +if $data60 != @10@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @10@ then + return -1 +endi +if $data40 != @9@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @8@ then + return -1 +endi +if $data70 != @8@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data90 != @8@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc; +if $data00 != @NULL@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @9@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @8@ then + return -1 +endi +if $data70 != @8@ then + return -1 +endi +if $data80 != @8@ then + return -1 +endi +if $data90 != @10@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +sql select length(cast(a as binary(10))) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a; +if $data00 != @10@ then + return -1 +endi +if $data10 != @8@ then + return -1 +endi +if $data20 != @8@ then + return -1 +endi +if $data30 != @8@ then + return -1 +endi +if $data40 != @8@ then + return -1 +endi +if $data50 != @8@ then + return -1 +endi +if $data60 != @9@ then + return -1 +endi +if $data70 != @NULL@ then + return -1 +endi +if $data80 != @NULL@ then + return -1 +endi +if $data90 != @NULL@ then + return -1 +endi +print execute sql select length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +sql select length(cast(a as binary(10))) from (select abs(c2) as a from tb1); +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +print execute sql select length(tb1.c15),length(tb2.c15) from tb1,tb2 where tb1.ts=tb2.ts; +sql select length(tb1.c15),length(tb2.c15) from tb1,tb2 where tb1.ts=tb2.ts; +if $data00 != @4@ then + return -1 +endi +if $data01 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @4@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data21 != @4@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data50 != @12@ then + return -1 +endi +if $data51 != @4@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @4@ then + return -1 +endi +print execute sql select length(c15) from tb1 union all select length(c15) from tb2; +sql select length(c15) from tb1 union all select length(c15) from tb2; +if $data00 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @NULL@ then + return -1 +endi +if $data30 != @4@ then + return -1 +endi +if $data40 != @4@ then + return -1 +endi +if $data50 != @12@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @4@ then + return -1 +endi +if $data80 != @4@ then + return -1 +endi +if $data90 != @4@ then + return -1 +endi diff --git a/tests/script/general/compute/str_length2.sim b/tests/script/general/compute/str_length2.sim new file mode 100644 index 0000000000000000000000000000000000000000..94a735c4978874a6bf2e94a2801af4fdac6a8f24 --- /dev/null +++ b/tests/script/general/compute/str_length2.sim @@ -0,0 +1,309 @@ +sleep 100 +sql connect +sql use db + +$emptyString = @@ +print execute sql select length(stb1.c14),length(stba.c15) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +sql select length(stb1.c14),length(stba.c15) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts; +if $data00 != @1@ then + return -1 +endi +if $data01 != @4@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data11 != @4@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data21 != @4@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data31 != @4@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data41 != @4@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data51 != @4@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data61 != @4@ then + return -1 +endi +print execute sql select length(c14) as a from stb1 union all select length(c15) as a from stba; +sql select length(c14) as a from stb1 union all select length(c15) as a from stba; +if $data00 != @1@ then + return -1 +endi +if $data10 != @NULL@ then + return -1 +endi +if $data20 != @1@ then + return -1 +endi +if $data30 != @NULL@ then + return -1 +endi +if $data40 != @1@ then + return -1 +endi +if $data50 != @2@ then + return -1 +endi +if $data60 != @0@ then + return -1 +endi +if $data70 != @1@ then + return -1 +endi +if $data80 != @1@ then + return -1 +endi +if $data90 != @1@ then + return -1 +endi +print execute sql select length(c8) from stba; +sql select length(c8) from stba; +if $data00 != @3@ then + return -1 +endi +if $data10 != @3@ then + return -1 +endi +if $data20 != @3@ then + return -1 +endi +if $data30 != @3@ then + return -1 +endi +if $data40 != @3@ then + return -1 +endi +if $data50 != @3@ then + return -1 +endi +if $data60 != @3@ then + return -1 +endi +if $data70 != @3@ then + return -1 +endi +if $data80 != @3@ then + return -1 +endi +if $data90 != @3@ then + return -1 +endi +print execute sql select length(c9) from stba; +sql select length(c9) from stba; +if $data00 != @16@ then + return -1 +endi +if $data10 != @16@ then + return -1 +endi +if $data20 != @16@ then + return -1 +endi +if $data30 != @16@ then + return -1 +endi +if $data40 != @16@ then + return -1 +endi +if $data50 != @16@ then + return -1 +endi +if $data60 != @16@ then + return -1 +endi +if $data70 != @16@ then + return -1 +endi +if $data80 != @16@ then + return -1 +endi +if $data90 != @16@ then + return -1 +endi +print execute sql select length(cast(min(c2) as binary(20))) from tba1; +sql select length(cast(min(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select length(cast(max(c2) as binary(20))) from tba1; +sql select length(cast(max(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select length(cast(count(c2) as binary(20))) from tba1; +sql select length(cast(count(c2) as binary(20))) from tba1; +if $data00 != @2@ then + return -1 +endi +print execute sql select length(cast(sum(c2) as binary(20))) from tba1; +sql select length(cast(sum(c2) as binary(20))) from tba1; +if $data00 != @3@ then + return -1 +endi +print execute sql select length(cast(avg(c2) as binary(20))) from tba1; +sql select length(cast(avg(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(percentile(c2, 10) as binary(20))) from tba1; +sql select length(cast(percentile(c2, 10) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(apercentile(c2, 10) as binary(20))) from tba1; +sql select length(cast(apercentile(c2, 10) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(stddev(c2) as binary(20))) from tba1; +sql select length(cast(stddev(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(spread(c2) as binary(20))) from tba1; +sql select length(cast(spread(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(twa(c2) as binary(20))) from tba1; +sql select length(cast(twa(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +sql_error select length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +print execute sql select length(cast(interp(c2) as binary(20))) from tba1 every(1s) +sql select length(cast(interp(c2) as binary(20))) from tba1 every(1s) +if $data00 != @21-11-11 09:00:00.000@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data10 != @21-11-11 09:00:01.000@ then + return -1 +endi +if $data11 != @1@ then + return -1 +endi +if $data20 != @21-11-11 09:00:02.000@ then + return -1 +endi +if $data21 != @1@ then + return -1 +endi +if $data30 != @21-11-11 09:00:03.000@ then + return -1 +endi +if $data31 != @1@ then + return -1 +endi +if $data40 != @21-11-11 09:00:04.000@ then + return -1 +endi +if $data41 != @1@ then + return -1 +endi +if $data50 != @21-11-11 09:00:05.000@ then + return -1 +endi +if $data51 != @1@ then + return -1 +endi +if $data60 != @21-11-11 09:00:06.000@ then + return -1 +endi +if $data61 != @1@ then + return -1 +endi +if $data70 != @21-11-11 09:00:07.000@ then + return -1 +endi +if $data71 != @1@ then + return -1 +endi +if $data80 != @21-11-11 09:00:08.000@ then + return -1 +endi +if $data81 != @1@ then + return -1 +endi +if $data90 != @21-11-11 09:00:09.000@ then + return -1 +endi +if $data91 != @1@ then + return -1 +endi +print execute sql select length(cast(interp(c2) as binary(20))) stba every(1s) group by tbname; +sql_error select length(cast(interp(c2) as binary(20))) stba every(1s) group by tbname; +print execute sql select length(cast(elapsed(ts) as binary(20))) from tba1; +sql select length(cast(elapsed(ts) as binary(20))) from tba1; +if $data00 != @12@ then + return -1 +endi +print execute sql select length(cast(rate(c2) as binary(20))) from tba1; +sql select length(cast(rate(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(irate(c2) as binary(20))) from tba1; +sql select length(cast(irate(c2) as binary(20))) from tba1; +if $data00 != @8@ then + return -1 +endi +print execute sql select length(cast(first(c2) as binary(20))) from tba1; +sql select length(cast(first(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select length(cast(last(c2) as binary(20))) from tba1; +sql select length(cast(last(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select length(cast(last_row(c2) as binary(20))) from tba1; +sql select length(cast(last_row(c2) as binary(20))) from tba1; +if $data00 != @1@ then + return -1 +endi +print execute sql select length(cast(top(c2, 1) as binary(20))) from tba1; +sql_error select length(cast(top(c2, 1) as binary(20))) from tba1; +print execute sql select length(cast(bottom(c2, 1) as binary(20))) from tba1; +sql_error select length(cast(bottom(c2, 1) as binary(20))) from tba1; +print execute sql select length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +sql_error select length(cast(leastsquares(c2, 1, 1) as binary(20))) from tba1; +print execute sql select length(cast(derivative(c2, 1s, 0) as binary(20))) from tba1; +sql_error select length(cast(derivative(c2, 1s, 0) as binary(20))) from tba1; +print execute sql select length(cast(diff(c2) as binary(20))) from tba1; +sql_error select length(cast(diff(c2) as binary(20))) from tba1; +print execute sql select length(cast(csum(c2) as binary(20))) from tba1; +sql_error select length(cast(csum(c2) as binary(20))) from tba1; +print execute sql select length(cast(mavg(c2,2) as binary(20))) from tba1; +sql_error select length(cast(mavg(c2,2) as binary(20))) from tba1; +print execute sql select length(cast(sample(c2,2) as binary(20))) from tba1; +sql_error select length(cast(sample(c2,2) as binary(20))) from tba1; +print execute sql select length(cast(_block_dist() as binary(20))) from tba1; +sql_error select length(cast(_block_dist() as binary(20))) from tba1; diff --git a/tests/script/general/compute/string_funcs.sim b/tests/script/general/compute/string_funcs.sim new file mode 100644 index 0000000000000000000000000000000000000000..119883b8e857086caa5f7b6accf00a457256244b --- /dev/null +++ b/tests/script/general/compute/string_funcs.sim @@ -0,0 +1,111 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 6 +system sh/cfg.sh -n dnode1 -c cache -v 1 +system sh/cfg.sh -n dnode1 -c minRows -v 10 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect + +sql drop database if exists db +sql create database if not exists db +sql use db +sql create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned, c14 binary(2), c15 nchar(3)) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tb1 using stb1 tags(1,'1',1.0); +sql create table tb2 using stb1 tags(2,'2',2.0); +sql create table tb3 using stb1 tags(3,'3',3.0); + +sql insert into tb1 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"123","1234",1,1,1,1,'1','1'); +sql insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); +sql insert into tb1 values ('2021-11-11 09:00:02',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL,'2',NULL); +sql insert into tb1 values ('2021-11-11 09:00:03',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3,NULL,'3'); +sql insert into tb1 values ('2021-11-11 09:00:04',true,4,4,4,4,4,4,"456","4567",4,4,4,4,'4','4'); +sql insert into tb1 values ('2021-11-11 09:00:05',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807,'27','278'); +sql insert into tb1 values ('2021-11-11 09:00:06',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0,'',''); + +sql insert into tb2 values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1,'1','1'); +sql insert into tb2 values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2,'2','2'); +sql insert into tb2 values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3,'3','3'); +sql insert into tb2 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4,'4','4'); +sql insert into tb2 values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5,'5','5'); +sql insert into tb2 values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6,'6','6'); +sql insert into tb2 values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7,'7','7'); + +sql create table tbn (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned, c14 binary(2), c15 nchar(3)); + +sql insert into tbn values ('2021-11-11 09:00:00',true,1,1,1,1,1,1,"111","1111",1,1,1,1,'1','1'); +sql insert into tbn values ('2021-11-11 09:00:01',true,2,2,2,2,2,2,"222","2222",2,2,2,2,'2','2'); +sql insert into tbn values ('2021-11-11 09:00:02',true,3,3,2,3,3,3,"333","3333",3,3,3,3,'3','3'); +sql insert into tbn values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4,'4','4'); +sql insert into tbn values ('2021-11-11 09:00:04',true,5,5,5,5,5,5,"555","5555",5,5,5,5,'5','5'); +sql insert into tbn values ('2021-11-11 09:00:05',true,6,6,6,6,6,6,"666","6666",6,6,6,6,'6','6'); +sql insert into tbn values ('2021-11-11 09:00:06',true,7,7,7,7,7,7,"777","7777",7,7,7,7,'7','7'); + +run general/compute/str_concat.sim +run general/compute/str_concat_ws.sim +run general/compute/str_length.sim +run general/compute/str_char_length.sim + +sql create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned, c14 binary(2), c15 nchar(3)) TAGS(t1 int, t2 binary(10), t3 double); + +sql create table tba1 using stba tags(1,'1',1.0); + +sql insert into tba1 values ('2021-11-11 09:00:00',true, 1,1,1,1,1,1,"111","1111",1,1,1,1,'1','1'); +sql insert into tba1 values ('2021-11-11 09:00:01',true, 2,2,2,2,2,2,"222","2222",2,2,2,2,'2','2'); +sql insert into tba1 values ('2021-11-11 09:00:02',true, 3,3,2,3,3,3,"333","3333",3,3,3,3,'3','3'); +sql insert into tba1 values ('2021-11-11 09:00:03',false,4,4,4,4,4,4,"444","4444",4,4,4,4,'4','4'); +sql insert into tba1 values ('2021-11-11 09:00:04',true, 5,5,5,5,5,5,"555","5555",5,5,5,5,'5','5'); +sql insert into tba1 values ('2021-11-11 09:00:05',true, 6,6,6,6,6,6,"666","6666",6,6,6,6,'6','6'); +sql insert into tba1 values ('2021-11-11 09:00:06',true, 7,7,7,7,7,7,"777","7777",7,7,7,7,'7','7'); +sql insert into tba1 values ('2021-11-11 09:00:07',true, 8,8,8,8,8,8,"888","8888",8,8,8,8,'8','8'); +sql insert into tba1 values ('2021-11-11 09:00:08',true, 9,9,9,9,9,9,"999","9999",9,9,9,9,'9','9'); +sql insert into tba1 values ('2021-11-11 09:00:09',true, 0,0,0,0,0,0,"000","0000",0,0,0,0,'0','0'); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:10',true, 1,1,1,1,1,1,"111","1111",1,1,1,1,'1','1'); +sql insert into tba1 values ('2021-11-11 09:00:11',true, 2,2,2,2,2,2,"222","2222",2,2,2,2,'2','2'); +sql insert into tba1 values ('2021-11-11 09:00:12',true, 3,3,2,3,3,3,"333","3333",3,3,3,3,'3','3'); +sql insert into tba1 values ('2021-11-11 09:00:13',false,4,4,4,4,4,4,"444","4444",4,4,4,4,'4','4'); +sql insert into tba1 values ('2021-11-11 09:00:14',true, 5,5,5,5,5,5,"555","5555",5,5,5,5,'5','5'); +sql insert into tba1 values ('2021-11-11 09:00:15',true, 6,6,6,6,6,6,"666","6666",6,6,6,6,'6','6'); +sql insert into tba1 values ('2021-11-11 09:00:16',true, 7,7,7,7,7,7,"777","7777",7,7,7,7,'7','7'); +sql insert into tba1 values ('2021-11-11 09:00:17',true, 8,8,8,8,8,8,"888","8888",8,8,8,8,'8','8'); +sql insert into tba1 values ('2021-11-11 09:00:18',true, 9,9,9,9,9,9,"999","9999",9,9,9,9,'9','9'); +sql insert into tba1 values ('2021-11-11 09:00:19',true, 0,0,0,0,0,0,"000","0000",0,0,0,0,'0','0'); + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed + +sql insert into tba1 values ('2021-11-11 09:00:20',true, 1,1,1,1,1,1,"111","1111",1,1,1,1,'1','1'); +sql insert into tba1 values ('2021-11-11 09:00:21',true, 2,2,2,2,2,2,"222","2222",2,2,2,2,'2','2'); +sql insert into tba1 values ('2021-11-11 09:00:22',true, 3,3,2,3,3,3,"333","3333",3,3,3,3,'3','3'); +sql insert into tba1 values ('2021-11-11 09:00:23',false,4,4,4,4,4,4,"444","4444",4,4,4,4,'4','4'); +sql insert into tba1 values ('2021-11-11 09:00:24',true, 5,5,5,5,5,5,"555","5555",5,5,5,5,'5','5'); +sql insert into tba1 values ('2021-11-11 09:00:25',true, 6,6,6,6,6,6,"666","6666",6,6,6,6,'6','6'); +sql insert into tba1 values ('2021-11-11 09:00:26',true, 7,7,7,7,7,7,"777","7777",7,7,7,7,'7','7'); +sql insert into tba1 values ('2021-11-11 09:00:27',true, 8,8,8,8,8,8,"888","8888",8,8,8,8,'8','8'); +sql insert into tba1 values ('2021-11-11 09:00:28',true, 9,9,9,9,9,9,"999","9999",9,9,9,9,'9','9'); +sql insert into tba1 values ('2021-11-11 09:00:29',true, 0,0,0,0,0,0,"000","0000",0,0,0,0,'0','0'); + +run general/compute/str_concat.sim +run general/compute/str_concat2.sim +run general/compute/str_concat_ws.sim +run general/compute/str_concat_ws2.sim +run general/compute/str_length.sim +run general/compute/str_length2.sim +run general/compute/str_char_length.sim +run general/compute/str_char_length2.sim +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/table_group.sim b/tests/script/general/compute/table_group.sim new file mode 100644 index 0000000000000000000000000000000000000000..cbce7963c3ede6688a2a6d1d0934fa6dfbc7f25b --- /dev/null +++ b/tests/script/general/compute/table_group.sim @@ -0,0 +1,2485 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c minRows -v 10 +system sh/exec.sh -n dnode1 -s start +sleep 200 +sql connect + +$db = m_db_tbg_limit +$tb = ct +$mt = st + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt ( ts timestamp, f1 int, f2 int ) tags(t int) + +$tb1 = ct1 +$tb2 = ct2 +$tb3 = ct3 +$tbnum = 3 +$rownum = 10 + +$x = 0 +while $x < $rownum + $i = 1 + while $i <= $tbnum + $tbi = $tb . $i + $inc = $x * 60 + $inc1 = $inc + $i + $ms = 1601481600000 + $inc1 + $v1 = $x * 10 + $v = $v1 + $i + sql insert into $tbi using $mt tags( $i ) values ( $ms , $v , $v ) + $i = $i + 1 + endw + $x = $x + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +sleep 200 +sql connect +sql use $db + +$x = 0 +while $x < $rownum + $i = 1 + while $i <= $tbnum + $tbi = $tb . $i + $inc = $x * 60 + $inc1 = $inc + $i + $ms = 1601481700000 + $inc1 + $v1 = $x * 10 + $temp = $rownum * 10 + $v1 = $v1 + $temp + $v = $v1 + $i + sql insert into $tbi using $mt tags( $i ) values ( $ms , $v , $v ) + $i = $i + 1 + endw + $x = $x + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +sql connect +sql use $db + +print execute sql select csum(f1) from st group by tbname + +sql select csum(f1) from st group by tbname + +if $rows != 60 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data21 != @33@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data31 != @64@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data41 != @105@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data51 != @156@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data61 != @217@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data71 != @288@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data81 != @369@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data91 != @460@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname + +sql select diff(f1) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @10@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @10@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @10@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @10@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname + +sql select derivative(f1, 1s, 0) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @166.666666667@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @166.666666667@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @166.666666667@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @0.100542932@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname + +sql select mavg(f1,2) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @26.000000000@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @36.000000000@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @46.000000000@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @56.000000000@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @66.000000000@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @76.000000000@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @86.000000000@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @96.000000000@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname + +sql select csum(f1),t from st group by tbname + +if $rows != 60 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data21 != @33@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data31 != @64@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data41 != @105@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data51 != @156@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data61 != @217@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data71 != @288@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data81 != @369@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data91 != @460@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname + +sql select diff(f1),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @10@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @10@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @10@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @10@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname + +sql select derivative(f1, 1s, 0),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @166.666666667@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @166.666666667@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @166.666666667@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @0.100542932@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname + +sql select mavg(f1,2),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @26.000000000@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @36.000000000@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @46.000000000@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @56.000000000@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @66.000000000@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @76.000000000@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @86.000000000@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @96.000000000@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select csum(f1) from st group by tbname limit 2 + +sql select csum(f1) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.002@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data31 != @14@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.003@ then + return -1 +endi +if $data41 != @3@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data51 != @16@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname limit 2 + +sql select diff(f1) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname limit 2 + +sql select derivative(f1, 1s, 0) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname limit 2 + +sql select mavg(f1,2) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @7.000000000@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @17.000000000@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @8.000000000@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @18.000000000@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname limit 2 + +sql select csum(f1),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.002@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data31 != @14@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.003@ then + return -1 +endi +if $data41 != @3@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data51 != @16@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname limit 2 + +sql select diff(f1),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 + +sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname limit 2 + +sql select mavg(f1,2),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @7.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @17.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @8.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @18.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select csum(f1) from st group by tbname limit 2 offset 9 + +sql select csum(f1) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname limit 2 offset 9 + +sql select diff(f1) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname limit 2 offset 9 + +sql select csum(f1),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname limit 2 offset 9 + +sql select diff(f1),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t,tbname from st group by tbname limit 2 offset 9 + +sql select csum(f1),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t,tbname from st group by tbname limit 2 offset 9 + +sql select diff(f1),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t,tbname from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t,tbname from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print ================================ clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/testSuite.sim b/tests/script/general/compute/testSuite.sim index 25c93ed29339c326628b885c34ed8766299460aa..fd1e5252920cde997d9c5be2fd60730b90e49974 100644 --- a/tests/script/general/compute/testSuite.sim +++ b/tests/script/general/compute/testSuite.sim @@ -20,3 +20,9 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/math_funcs.sim +run general/compute/string_funcs.sim +run general/compute/scalar_pow.sim +run general/compute/scalar_triangle.sim +run general/compute/scalar_str_concat_len.sim +run general/compute/table_group.sim diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 502de9583e9727d2dbee4a5601f974d6a46173ba..9b0dc8e964cf39909b803fe5ea20a7bdff8ceb59 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -561,19 +561,19 @@ endi sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3; sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5; -if $rows != 10 then +if $rows != 11 then return -1 endi -if $data00 != 0.000000000 then +if $data00 != -0.002160000 then return -1 endi -if $data10 != 0.997600000 then +if $data10 != 0.000000000 then return -1 endi -if $data90 != 8.978400000 then +if $data90 != 7.980800000 then return -1 endi diff --git a/tests/script/general/parser/columnName_escape.sim b/tests/script/general/parser/columnName_escape.sim index dd3278d0dc98fa5378b7aed122dc39f6717372d5..13ad184518f18096d0d2088957fca28c19fbd86c 100644 --- a/tests/script/general/parser/columnName_escape.sim +++ b/tests/script/general/parser/columnName_escape.sim @@ -423,4 +423,48 @@ if $data04 != 1 then return -1 endi +print ======================= test show create table/stable +sql create table tb3 (ts timestamp, `123` int, `123 456` int, `123.abc` int) +sql create table stb3 (ts timestamp, `123` int, `123 456` int, `123.abc` int) tags (t1 int) +sql create table ctb3 using stb3 tags (1) + +sql show create table tb3; +if $rows != 1 then + return -1 +endi + +if $data00 != @tb3@ then + return -1 +endi + +if $data01 != @create table `tb3` (`ts` TIMESTAMP,`123` INT,`123 456` INT,`123.abc` INT)@ then + return -1 +endi + +sql show create stable stb3; +if $rows != 1 then + return -1 +endi + +if $data00 != @stb3@ then + return -1 +endi + +if $data01 != @create table `stb3` (`ts` TIMESTAMP,`123` INT,`123 456` INT,`123.abc` INT) TAGS (`t1` INT)@ then + return -1 +endi + +sql show create table ctb3; +if $rows != 1 then + return -1 +endi + +if $data00 != @ctb3@ then + return -1 +endi + +if $data01 != @CREATE TABLE `ctb3` USING `stb3` TAGS (1)@ then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/create_tb_with_timestamp_tag.sim b/tests/script/general/parser/create_tb_with_timestamp_tag.sim new file mode 100644 index 0000000000000000000000000000000000000000..452fd2ddbf7d475a00f2fdd334e5065b6da71dc9 --- /dev/null +++ b/tests/script/general/parser/create_tb_with_timestamp_tag.sim @@ -0,0 +1,115 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db precision 'ns' +sql use $db + +sql create stable st1 (ts timestamp , c1 int) tags(t1 timestamp, t2 int) + +sql create table t1_0 using st1 tags(now, 0) +#nanoseconds +sql create table t1_1 using st1 tags(now + 1b, 0) +#microseconds +sql create table t1_2 using st1 tags(now + 1u, 0) +#milliseconds +sql create table t1_3 using st1 tags(now + 1a, 0) +#seconds +sql create table t1_4 using st1 tags(now + 1s, 0) +#minutes +sql create table t1_5 using st1 tags(now + 1m, 0) +#hours +sql create table t1_6 using st1 tags(now + 1h, 0) +#days +sql create table t1_7 using st1 tags(now + 1d, 0) +#weeks +sql create table t1_8 using st1 tags(now + 1w, 0) +#months(not supported) +sql_error create table t1_9 using st1 tags(now + 1n, 0) +#years(not supported) +sql_error create table t1_10 using st1 tags(now + 1y, 0) + + +sql create stable st2 (ts timestamp , c1 int) tags(t1 timestamp, t2 int) + +sql create table t2_0 using st2 tags(now, 0) +#nanoseconds +sql create table t2_1 using st2 tags(now - 1b, 0) +#microseconds +sql create table t2_2 using st2 tags(now - 1u, 0) +#milliseconds +sql create table t2_3 using st2 tags(now - 1a, 0) +#seconds +sql create table t2_4 using st2 tags(now - 1s, 0) +#minutes +sql create table t2_5 using st2 tags(now - 1m, 0) +#hours +sql create table t2_6 using st2 tags(now - 1h, 0) +#days +sql create table t2_7 using st2 tags(now - 1d, 0) +#weeks +sql create table t2_8 using st2 tags(now - 1w, 0) +#months(not supported) +sql_error create table t2_9 using st2 tags(now - 1n, 0) +#years(not supported) +sql_error create table t2_10 using st2 tags(now - 1y, 0) + +sql insert into t1_0 values (now, 0) +sql insert into t1_1 values (now, 1) +sql insert into t1_2 values (now, 2) +sql insert into t1_3 values (now, 3) +sql insert into t1_4 values (now, 4) +sql insert into t1_5 values (now, 5) +sql insert into t1_6 values (now, 6) +sql insert into t1_7 values (now, 7) +sql insert into t1_8 values (now, 8) + +sql insert into t2_0 values (now, 0) +sql insert into t2_1 values (now, 1) +sql insert into t2_2 values (now, 2) +sql insert into t2_3 values (now, 3) +sql insert into t2_4 values (now, 4) +sql insert into t2_5 values (now, 5) +sql insert into t2_6 values (now, 6) +sql insert into t2_7 values (now, 7) +sql insert into t2_8 values (now, 8) + +sql select * from st1 + +if $rows != 9 then + return -1 +endi + +sql select * from st2 + +if $rows != 9 then + return -1 +endi + +sql create stable st3 (ts timestamp , c1 int) tags (t1 timestamp, t2 timestamp, t3 timestamp, t4 timestamp, t5 timestamp, t6 timestamp, t7 timestamp, t8 timestamp, t9 timestamp) +sql create table t3 using st3 tags(now, now + 1b, now + 1u, now + 1a, now + 1s, now + 1m, now + 1h, now + 1d, now + 1w) +sql insert into t3 values (now, 1) + +sql select * from st3 +if $rows != 1 then + return -1 +endi + +sql create stable st4 (ts timestamp , c1 int) tags (t1 timestamp, t2 timestamp, t3 timestamp, t4 timestamp, t5 timestamp, t6 timestamp, t7 timestamp, t8 timestamp, t9 timestamp) +sql create table t4 using st4 tags(now, now - 1b, now - 1u, now - 1a, now - 1s, now - 1m, now - 1h, now - 1d, now - 1w) +sql insert into t4 values (now, 1) + +sql select * from st4 +if $rows != 1 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index f6abb29afa7e35f88d3ed1bf80d2881559696e19..16803ea0965a66107a90aaf7ad37d715314a8258 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -419,12 +419,14 @@ if $data03 != @20-09-15 00:00:00.000@ then return -1 endi -sql select diff(val) from (select c1 val from nest_tb0); +sql_error select diff(val) from (select c1 val from nest_tb0); + +sql select diff(val) from (select ts,c1 val from nest_tb0); if $rows != 9999 then return -1 endi -if $data00 != @70-01-01 08:00:00.000@ then +if $data00 != @20-09-15 00:01:00.000@ then return -1 endi @@ -925,5 +927,18 @@ if $data00 != 24 then return -1 endi +sql select sum(a)/sum(b) from meters where ts >= '2021-09-30 15:00:00.000' and ts <= '2021-09-30 15:00:05.000' interval(1s) fill(null) group by area order by area; +if $rows != 12 then + return -1 +endi +if $data00 != @21-09-30 15:00:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data02 != 0 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/scalar_expression.sim b/tests/script/general/parser/scalar_expression.sim new file mode 100644 index 0000000000000000000000000000000000000000..9f8c9c38caa81613a58aa4de596d453eae162ad7 --- /dev/null +++ b/tests/script/general/parser/scalar_expression.sim @@ -0,0 +1,251 @@ + +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 500 +sql connect + +$dbPrefix = db +$tbPrefix = ct +$mtPrefix = st +$tbNum = 2 +$rowNum = 50 + +print =============== step1 create stable/table +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0.25 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , $x , $y , $x , $x , $x , $y , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + endw + + $i = $i + 1 +endw + +print ================= step2 + +print execute sql select floor(3.0)+ceil(4.0) from ct0 +sql select floor(3.0)+ceil(4.0) from ct0 +if $data00 != @7.000000000@ then + return -1 +endi +if $data10 != @7.000000000@ then + return -1 +endi +if $data20 != @7.000000000@ then + return -1 +endi +if $data30 != @7.000000000@ then + return -1 +endi +if $data40 != @7.000000000@ then + return -1 +endi +if $data50 != @7.000000000@ then + return -1 +endi +if $data60 != @7.000000000@ then + return -1 +endi +if $data70 != @7.000000000@ then + return -1 +endi +if $data80 != @7.000000000@ then + return -1 +endi +if $data90 != @7.000000000@ then + return -1 +endi +print execute sql select sum(c1)+3.0+4.0 from st0 +sql select sum(c1)+3.0+4.0 from st0 +if $data00 != @2457.000000000@ then + return -1 +endi +print execute sql select sin(log(avg(c1),sum(c2))+3)%4 from st0 +sql select sin(log(avg(c1),sum(c2))+3)%4 from st0 +if $data00 != @-0.265074286@ then + return -1 +endi +print execute sql select log(pow(length(concat('3','4')),2),c2) from st0 +sql select log(pow(length(concat('3','4')),2),c2) from st0 +if $data00 != @-1.000000000@ then + return -1 +endi +if $data10 != @inf@ then + return -1 +endi +if $data20 != @2.000000000@ then + return -1 +endi +if $data30 != @1.261859507@ then + return -1 +endi +if $data40 != @1.000000000@ then + return -1 +endi +if $data50 != @0.861353116@ then + return -1 +endi +if $data60 != @0.773705614@ then + return -1 +endi +if $data70 != @0.712414374@ then + return -1 +endi +if $data80 != @0.666666667@ then + return -1 +endi +if $data90 != @0.630929754@ then + return -1 +endi +print execute sql select round(log(pow(length(concat('3','4')),2),c2)+floor(c3))+2 from st0 +sql select round(log(pow(length(concat('3','4')),2),c2)+floor(c3))+2 from st0 +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @inf@ then + return -1 +endi +if $data20 != @6.000000000@ then + return -1 +endi +if $data30 != @6.000000000@ then + return -1 +endi +if $data40 != @7.000000000@ then + return -1 +endi +if $data50 != @8.000000000@ then + return -1 +endi +if $data60 != @9.000000000@ then + return -1 +endi +if $data70 != @10.000000000@ then + return -1 +endi +if $data80 != @11.000000000@ then + return -1 +endi +if $data90 != @12.000000000@ then + return -1 +endi +print execute sql select sin(pow(c1,log(c2,2))+pow(c2,2)) as val from ct0 union all select pow(c4,2)+tan(sin(c5)/cos(c6)) as val from ct1 +sql select sin(pow(c1,log(c2,2))+pow(c2,2)) as val from ct0 union all select pow(c4,2)+tan(sin(c5)/cos(c6)) as val from ct1 +if $data00 != @-nan@ then + return -1 +endi +if $data10 != @0.909297427@ then + return -1 +endi +if $data20 != @-0.279415498@ then + return -1 +endi +if $data30 != @0.843325058@ then + return -1 +endi +if $data40 != @0.551426681@ then + return -1 +endi +if $data50 != @-0.840606612@ then + return -1 +endi +if $data60 != @0.436161076@ then + return -1 +endi +if $data70 != @0.897498185@ then + return -1 +endi +if $data80 != @-0.885952778@ then + return -1 +endi +if $data90 != @0.429470715@ then + return -1 +endi +print execute sql select asin(c1) from st0 limit 1 +sql select asin(c1) from st0 limit 1 +if $data00 != @0.000000000@ then + return -1 +endi +print execute sql select pow(c1,2) from st0 limit 1 offset 2; +sql select pow(c1,2) from st0 limit 1 offset 2; +if $data00 != @4.000000000@ then + return -1 +endi +print execute sql select cos(c1) from db0.ct0, db0.ct1 where ct0.ts==ct1.ts +sql select cos(c1) from db0.ct0, db0.ct1 where ct0.ts==ct1.ts +if $data00 != @1.000000000@ then + return -1 +endi +if $data10 != @0.540302306@ then + return -1 +endi +if $data20 != @-0.416146837@ then + return -1 +endi +if $data30 != @-0.989992497@ then + return -1 +endi +if $data40 != @-0.653643621@ then + return -1 +endi +if $data50 != @0.283662185@ then + return -1 +endi +if $data60 != @0.960170287@ then + return -1 +endi +if $data70 != @0.753902254@ then + return -1 +endi +if $data80 != @-0.145500034@ then + return -1 +endi +if $data90 != @-0.911130262@ then + return -1 +endi +print ============== invalid expressions +$stb = st0 +sql_error select agg(c1)+c2 from $stb +sql_error select agg(c1+2) from $stb +sql_error select agg(ceil(c1))+c2 from $stb +sql_error select agg(ceil(c1)) from $stb +sql_error select asin(c1) from $stb group by tbname +sql_error select asin(c2) from $stb group by tgcol +sql_error select asin(c1) from $stb session_window(ts, 1s) +sql_error select cos(c1) from $stb state_window(f1) +sql_error select pow(c2,2) from $stb interval(1s) sliding(500a) +sql_error select pow(c2,2) from $stb state_window(f1) +sql_error select tan(c1) from $stb from d.st slimit 1 +sql_error select c1+2, tbname from $stb group by tbname +sql select sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(c1)))))))))))))))) from $stb +sql_error select sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(c1))))))))))))))))) from $stb +print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/parser/slimit_query.sim b/tests/script/general/parser/slimit_query.sim index 0a793f0611a5875f53ac75644f3156ad9aa6cc65..0dbcffa0a43b596d542c8799a9a94170e6f71f5a 100644 --- a/tests/script/general/parser/slimit_query.sim +++ b/tests/script/general/parser/slimit_query.sim @@ -31,6 +31,8 @@ $tsu = $tsu + $ts0 #sql_error select top(c1, 1) from $stb where ts >= $ts0 and ts <= $tsu slimit 5 offset 1 #sql_error select bottom(c1, 1) from $stb where ts >= $ts0 and ts <= $tsu slimit 5 offset 1 +sql_error select t1 from $stb slimit 5 offset 1; + ### select from stb + group by + slimit offset sql select max(c1), min(c2), avg(c3), sum(c4), spread(c5), sum(c6), count(c7), first(c8), last(c9) from $stb group by t1 slimit 5 soffset 0 if $rows != 5 then diff --git a/tests/script/general/parser/tagName_escape.sim b/tests/script/general/parser/tagName_escape.sim index 1dc9121a45ea23201d63dedfb7a6c446ee7b0e87..79073c335ef307770419f3b99736a70f94d73360 100644 --- a/tests/script/general/parser/tagName_escape.sim +++ b/tests/script/general/parser/tagName_escape.sim @@ -203,5 +203,35 @@ if $data24 != NULL then return -1 endi +print ======================= test show create table/stable +sql create stable stb3 (ts timestamp, c0 int) tags (`123` int, `123 456` int, `123.abc` int) +sql create table ctb3 using stb3 (`123`, `123 456`, `123.abc`) tags (1, 1, 1) + +sql show create table stb3; +if $rows != 1 then + return -1 +endi + +if $data00 != @stb3@ then + return -1 +endi + +if $data01 != @create table `stb3` (`ts` TIMESTAMP,`c0` INT) TAGS (`123` INT,`123 456` INT,`123.abc` INT)@ then + return -1 +endi + +sql show create table ctb3; +if $rows != 1 then + return -1 +endi + +if $data00 != @ctb3@ then + return -1 +endi + +if $data01 != @CREATE TABLE `ctb3` USING `stb3` TAGS (1,1,1)@ then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 2696b93acedd8c116dfd0922798012c27c8e8692..9a8f602901507bc4fc31d3902461394446a3067b 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -414,6 +414,7 @@ cd ../../../debug; make ./test.sh -f general/parser/between_and.sim ./test.sh -f general/parser/last_cache.sim ./test.sh -f unique/big/balance.sim +./test.sh -f general/parser/nestquery.sim ./test.sh -f general/parser/udf.sim ./test.sh -f general/parser/udf_dll.sim diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim index a705601d96f64f2690c3b933228b02b49d59af68..528dbbe04044ba4007d00edd68f31286f32ee889 100644 --- a/tests/script/regressionSuite.sim +++ b/tests/script/regressionSuite.sim @@ -38,6 +38,12 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/math_funcs.sim +run general/compute/string_funcs.sim +run general/compute/scalar_pow.sim +run general/compute/scalar_triangle.sim +run general/compute/scalar_str_concat_len.sim +run general/compute/table_group.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim @@ -140,6 +146,7 @@ run general/parser/tags_dynamically_specifiy.sim run general/parser/set_tag_vals.sim run general/parser/repeatAlter.sim run general/parser/precision_ns.sim +run general/parser/scalar_expression.sim ##unsupport run general/parser/slimit_alter_tags.sim run general/stable/disk.sim run general/stable/dnode3.sim diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim index 14a6e92f064f6077d549ad2c48c5ada3da83995a..7e647925d1d3d66d21f279ace852e3fc12496510 100644 --- a/tests/script/unique/dnode/alternativeRole.sim +++ b/tests/script/unique/dnode/alternativeRole.sim @@ -30,35 +30,50 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 system sh/exec.sh -n dnode3 -s start -sleep 5000 +sleep 3000 + +$x = 0 +show1: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi sql show dnodes print dnode1 $data5_1 -print dnode1 $data5_2 -print dnode1 $data5_3 +print dnode2 $data5_2 +print dnode3 $data5_3 if $data5_1 != mnode then - return -1 + goto show1 endi if $data5_2 != vnode then - return -1 + goto show1 endi if $data5_3 != any then - return -1 + goto show1 endi +show2: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi + sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data2_3 if $data2_1 != master then - return -1 + goto show2 endi if $data2_2 != null then - return -1 + goto show2 endi if $data2_3 != slave then - return -1 + goto show2 endi print ========== step2 @@ -72,26 +87,28 @@ sql create table d1.t6 (ts timestamp, i int) sql create table d1.t7 (ts timestamp, i int) sql create table d1.t8 (ts timestamp, i int) +show3: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi + sql show dnodes print dnode1 $data2_1 print dnode2 $data2_2 print dnode3 $data2_3 if $data2_1 != 0 then - return -1 + goto show3 endi if $data2_2 != 1 then - return -1 + goto show3 endi if $data2_3 != 1 then - return -1 + goto show3 endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/system-test/0-management/0-database/.gitkeep b/tests/system-test/0-management/0-database/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/0-management/1-stable/.gitkeep b/tests/system-test/0-management/1-stable/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/0-management/1-stable/create_col_tag.py b/tests/system-test/0-management/1-stable/create_col_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..d195e73321ea24a8b9de1f2ca5f9c07f9182dd65 --- /dev/null +++ b/tests/system-test/0-management/1-stable/create_col_tag.py @@ -0,0 +1,853 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +import time +import os +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def caseDescription(self): + ''' + case1:The escape char "`" can be used for both tag name and column name + case2:create stable(column&tag); insert data; show stable; show create table; add stable(column&tag);change stable(tag);drop stable(column&tag);modify stable(column&tag)(binary和nchar);drop stable; + case3:create stable_child; insert data; show stable_child; show create stable_child; drop stable_child; + case4:create regular_table(column); insert data; show regular_table; show create regular_table; add regular_table(column);drop regular_table(column);modify regular_table(column)(binary和nchar);drop regular_table; + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + now = time.time() + self.ts = int(round(now * 1000)) + + def table1_checkall(self,sql): + tdLog.info(sql) + tdSql.query(sql) + tdSql.checkData(0,1,1) + tdSql.checkData(0,2,2) + tdSql.checkData(0,3,3) + tdSql.checkData(0,4,4) + tdSql.checkData(0,5,'True') + tdSql.checkData(0,6,6) + tdSql.checkData(0,7,7) + tdSql.checkData(0,8,8) + tdSql.checkData(0,9,9) + tdSql.checkData(0,10,'1970-01-01 08:00:00.010') + + def table1_checkall_1(self,sql): + tdSql.query(sql) + tdSql.checkData(0,1,1) + + def table1_checkall_2(self,sql): + self.table1_checkall_1(sql) + tdSql.checkData(0,2,2) + + def table1_checkall_3(self,sql): + self.table1_checkall_2(sql) + tdSql.checkData(0,3,3) + + def table1_checkall_4(self,sql): + self.table1_checkall_3(sql) + tdSql.checkData(0,4,4) + + def table1_checkall_5(self,sql): + self.table1_checkall_4(sql) + tdSql.checkData(0,5,'True') + + def table1_checkall_6(self,sql): + self.table1_checkall_5(sql) + tdSql.checkData(0,6,6) + + def table1_checkall_7(self,sql): + self.table1_checkall_6(sql) + tdSql.checkData(0,7,7) + + def table1_checkall_8(self,sql): + self.table1_checkall_7(sql) + tdSql.checkData(0,8,8) + + def table1_checkall_9(self,sql): + self.table1_checkall_8(sql) + tdSql.checkData(0,9,9) + + def table1_checkall_10(self,sql): + self.table1_checkall_9(sql) + tdSql.checkData(0,10,'1970-01-01 08:00:00.010') + + def run(self): + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf 0-management/1-stable/%s.sql" % testcaseFilename ) + tdSql.prepare() + + print("==============step1") + print("prepare data") + + # case for defect: https://jira.taosdata.com:18080/browse/TD-2693 + tdSql.execute("create database db2") + tdSql.execute("use db2") + + print("==============new version [escape character] for stable==============") + print("==============step1,#create db.stable,db.table; insert db.table; show db.table; select db.table; drop db.table;") + print("prepare data") + + self.stb1 = "stable_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + self.tb1 = "table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + + self.col_base = "123~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + + self.col_int = "stable_col_int%s" %self.col_base + print(self.col_int) + self.col_bigint = "stable_col_bigint%s" %self.col_base + self.col_smallint = "stable_col_smallint%s" %self.col_base + self.col_tinyint = "stable_col_tinyint%s" %self.col_base + self.col_bool = "stable_col_bool%s" %self.col_base + self.col_binary = "stable_col_binary%s" %self.col_base + self.col_nchar = "stable_col_nchar%s" %self.col_base + self.col_float = "stable_col_float%s" %self.col_base + self.col_double = "stable_col_double%s" %self.col_base + self.col_ts = "stable_col_ts%s" %self.col_base + + self.tag_base = "abc~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + self.tag_int = "stable_tag_int%s" %self.tag_base + self.tag_bigint = "stable_tag_bigint%s" %self.tag_base + self.tag_smallint = "stable_tag_smallint%s" %self.tag_base + self.tag_tinyint = "stable_tag_tinyint%s" %self.tag_base + self.tag_bool = "stable_tag_bool%s" %self.tag_base + self.tag_binary = "stable_tag_binary%s" %self.tag_base + self.tag_nchar = "stable_tag_nchar%s" %self.tag_base + self.tag_float = "stable_tag_float%s" %self.tag_base + self.tag_double = "stable_tag_double%s" %self.tag_base + self.tag_ts = "stable_tag_ts%s" %self.tag_base + + tdSql.execute('''create stable db.`%s` (ts timestamp, `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) + tags(loc nchar(20), `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp);''' + %(self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, + self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) + tdSql.query("describe db.`%s` ; " %self.stb1) + tdSql.checkRows(22) + + tdSql.query("select _block_dist() from db.`%s` ; " %self.stb1) + tdSql.checkRows(0) + + tdSql.query("show create stable db.`%s` ; " %self.stb1) + tdSql.checkData(0, 0, self.stb1) + tdSql.checkData(0, 1, "create table `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)\ + TAGS (`loc` NCHAR(20),`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" + %(self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, + self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) + + tdSql.execute("create table db.`table!1` using db.`%s` tags('table_1' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')" %self.stb1) + tdSql.query("describe db.`table!1` ; ") + tdSql.checkRows(22) + + time.sleep(10) + tdSql.query("show create table db.`table!1` ; ") + tdSql.checkData(0, 0, "table!1") + tdSql.checkData(0, 1, "CREATE TABLE `table!1` USING `%s` TAGS (\"table_1\",0,0,0,0,false,\"0\",\"0\",0.000000,0.000000,\"0\")" %self.stb1) + + tdSql.execute("insert into db.`table!1` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)") + sql = " select * from db.`table!1`; " + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`table!1`; '''\ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db.`table!1`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) + sql = " select * from db.`table!1`; " + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from db.`table!1`; ") + tdSql.checkData(0, 0, 2) + tdSql.query("select _block_dist() from db.`%s` ; " %self.stb1) + tdSql.checkRows(1) + + tdSql.execute("create table db.`%s` using db.`%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" %(self.tb1,self.stb1)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.query("show create table db.`%s` ; " %self.tb1) + tdSql.checkData(0, 0, self.tb1) + tdSql.checkData(0, 1, "CREATE TABLE `%s` USING `%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" %(self.tb1,self.stb1)) + + tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.tb1) + sql = "select * from db.`%s` ; " %self.tb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s` ; '''\ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts, self.tb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db.`%s`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ + %(self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) + sql = " select * from db.`%s` ; " %self.tb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = " select * from db.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ + %(self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from db.`%s`; " %self.tb1) + tdSql.checkData(0, 0, 2) + sql = "select * from db.`%s` ; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + tdSql.query("select count(*) from db.`%s`; " %self.stb1) + tdSql.checkData(0, 0, 4) + + sql = "select * from (select * from db.`%s`) ; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + tdSql.query("select count(*) from (select * from db.`%s`) ; " %self.stb1) + tdSql.checkData(0, 0, 4) + + sql = "select * from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`\ + where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1, \ + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + tdSql.query("show db.stables like 'stable_1%' ") + tdSql.checkRows(1) + tdSql.query("show db.tables like 'table%' ") + tdSql.checkRows(2) + + self.cr_tb1 = "create_table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + tdSql.execute("create table db.`%s` as select avg(`%s`) from db.`%s` where ts > now interval(1m) sliding(30s);" %(self.cr_tb1,self.col_bigint,self.stb1)) + tdSql.query("show db.tables like 'create_table_%' ") + tdSql.checkRows(1) + + print("==============drop\ add\ change\ modify column or tag") + print("==============drop==============") + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_ts)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(21) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_double)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(20) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_float)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(19) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_nchar)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(18) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_binary)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(17) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_bool)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(16) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_tinyint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(15) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_smallint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(14) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_bigint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(13) + tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_int)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(12) + + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_ts)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_9(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(11) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_double)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_8(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(10) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_float)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_7(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(9) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_nchar)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_6(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(8) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_binary)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_5(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(7) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_bool)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_4(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(6) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_tinyint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_3(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(5) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_smallint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_2(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_bigint)) + sql = " select * from db.`%s`; " %self.stb1 + datacheck = self.table1_checkall_1(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(3) + tdSql.error("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_int)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(3) + + print("==============add==============") + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` bigint; " %(self.stb1, self.col_bigint)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` smallint; " %(self.stb1, self.col_smallint)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(5) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` tinyint; " %(self.stb1, self.col_tinyint)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(6) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` bool; " %(self.stb1, self.col_bool)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(7) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` binary(20); " %(self.stb1, self.col_binary)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(8) + + tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6)" %self.tb1) + sql = "select * from db.`%s` order by ts desc; " %self.tb1 + datacheck = self.table1_checkall_5(sql) + + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` nchar(20); " %(self.stb1, self.col_nchar)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(9) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` float; " %(self.stb1, self.col_float)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(10) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` double; " %(self.stb1, self.col_double)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(11) + tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` timestamp; " %(self.stb1, self.col_ts)) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(12) + + tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.tb1) + sql = "select * from db.`%s` order by ts desc; " %self.tb1 + datacheck = self.table1_checkall(sql) + + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` int; " %(self.stb1, self.tag_int)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(13) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` bigint; " %(self.stb1, self.tag_bigint)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(14) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` smallint; " %(self.stb1, self.tag_smallint)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(15) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` tinyint; " %(self.stb1, self.tag_tinyint)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(16) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` bool; " %(self.stb1, self.tag_bool)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(17) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` binary(20); " %(self.stb1, self.tag_binary)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(18) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` nchar(20); " %(self.stb1, self.tag_nchar)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(19) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` float; " %(self.stb1, self.tag_float)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(20) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` double; " %(self.stb1, self.tag_double)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(21) + tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` timestamp; " %(self.stb1, self.tag_ts)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + + print("==============change==============") + self.tag_base_change = "abcdas" + self.tag_int_change = "stable_tag_int%s" %self.tag_base_change + self.tag_bigint_change = "stable_tag_bigint%s" %self.tag_base_change + self.tag_smallint_change = "stable_tag_smallint%s" %self.tag_base_change + self.tag_tinyint_change = "stable_tag_tinyint%s" %self.tag_base_change + self.tag_bool_change = "stable_tag_bool%s" %self.tag_base_change + self.tag_binary_change = "stable_tag_binary%s" %self.tag_base_change + self.tag_nchar_change = "stable_tag_nchar%s" %self.tag_base_change + self.tag_float_change = "stable_tag_float%s" %self.tag_base_change + self.tag_double_change = "stable_tag_double%s" %self.tag_base_change + self.tag_ts_change = "stable_tag_ts%s" %self.tag_base_change + + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_int, self.tag_int_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_bigint, self.tag_bigint_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_smallint, self.tag_smallint_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_tinyint, self.tag_tinyint_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_bool, self.tag_bool_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_binary, self.tag_binary_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_nchar, self.tag_nchar_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_float, self.tag_float_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_double, self.tag_double_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_ts, self.tag_ts_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + + print("==============modify==============") + # TD-10810 + tdSql.execute("ALTER STABLE db.`%s` MODIFY TAG `%s` binary(30); ; " %(self.stb1, self.tag_binary_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER STABLE db.`%s` MODIFY TAG `%s` nchar(30); ; " %(self.stb1, self.tag_nchar_change)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + + tdSql.execute("ALTER STABLE db.`%s` MODIFY COLUMN `%s` binary(30); ; " %(self.stb1, self.col_binary)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + tdSql.execute("ALTER STABLE db.`%s` MODIFY COLUMN `%s` nchar(30); ; " %(self.stb1, self.col_nchar)) + sql = " select * from db.`%s` order by ts desc; " %self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " %self.tb1) + tdSql.checkRows(22) + + print("==============drop table\stable") + try: + tdSql.execute("drop table db.`%s` " %self.tb1) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from db.`%s`" %self.tb1) + tdSql.query("show db.stables like 'stable_1%' ") + tdSql.checkRows(1) + + try: + tdSql.execute("drop table db.`%s` " %self.stb1) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from db.`%s`" %self.tb1) + tdSql.error("select * from db.`%s`" %self.stb1) + + + print("==============step2,#create stable,table; insert table; show table; select table; drop table") + + self.stb2 = "stable_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + self.tb2 = "table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + + tdSql.execute("create stable `%s` (ts timestamp, i int) tags(j int);" %self.stb2) + tdSql.query("describe `%s` ; "%self.stb2) + tdSql.checkRows(3) + + tdSql.query("select _block_dist() from `%s` ; " %self.stb2) + tdSql.checkRows(0) + + tdSql.query("show create stable `%s` ; " %self.stb2) + tdSql.checkData(0, 0, self.stb2) + tdSql.checkData(0, 1, "create table `%s` (`ts` TIMESTAMP,`i` INT) TAGS (`j` INT)" %self.stb2) + + tdSql.execute("create table `table!2` using `%s` tags(1)" %self.stb2) + tdSql.query("describe `table!2` ; ") + tdSql.checkRows(3) + + time.sleep(10) + + tdSql.query("show create table `table!2` ; ") + tdSql.checkData(0, 0, "table!2") + tdSql.checkData(0, 1, "CREATE TABLE `table!2` USING `%s` TAGS (1)" %self.stb2) + tdSql.execute("insert into `table!2` values(now, 1)") + tdSql.query("select * from `table!2`; ") + tdSql.checkRows(1) + tdSql.query("select count(*) from `table!2`; ") + tdSql.checkData(0, 0, 1) + tdSql.query("select _block_dist() from `%s` ; " %self.stb2) + tdSql.checkRows(1) + + tdSql.execute("create table `%s` using `%s` tags(1)" %(self.tb2,self.stb2)) + tdSql.query("describe `%s` ; " %self.tb2) + tdSql.checkRows(3) + tdSql.query("show create table `%s` ; " %self.tb2) + tdSql.checkData(0, 0, self.tb2) + tdSql.checkData(0, 1, "CREATE TABLE `%s` USING `%s` TAGS (1)" %(self.tb2,self.stb2)) + tdSql.execute("insert into `%s` values(now, 1)" %self.tb2) + tdSql.query("select * from `%s` ; " %self.tb2) + tdSql.checkRows(1) + tdSql.query("select count(*) from `%s`; " %self.tb2) + tdSql.checkData(0, 0, 1) + tdSql.query("select * from `%s` ; " %self.stb2) + tdSql.checkRows(2) + tdSql.query("select count(*) from `%s`; " %self.stb2) + tdSql.checkData(0, 0, 2) + + tdSql.query("select * from (select * from `%s`) ; " %self.stb2) + tdSql.checkRows(2) + tdSql.query("select count(*) from (select * from `%s` ); " %self.stb2) + tdSql.checkData(0, 0, 2) + + tdSql.query("show stables like 'stable_2%' ") + tdSql.checkRows(1) + tdSql.query("show tables like 'table%' ") + tdSql.checkRows(2) + + + #TD-10536 + self.cr_tb2 = "create_table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + tdSql.execute("create table `%s` as select * from `%s` ;" %(self.cr_tb2,self.stb2)) + tdSql.query("show db.tables like 'create_table_%' ") + tdSql.checkRows(1) + + print("==============drop table\stable") + try: + tdSql.execute("drop table `%s` " %self.tb2) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from `%s`" %self.tb2) + tdSql.query("show stables like 'stable_2%' ") + tdSql.checkRows(1) + + try: + tdSql.execute("drop table `%s` " %self.stb2) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from `%s`" %self.tb2) + tdSql.error("select * from `%s`" %self.stb2) + + + print("==============step3,#create regular_table; insert regular_table; show regular_table; select regular_table; drop regular_table") + self.regular_table = "regular_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + + self.regular_col_base = "123@#$%^&*()-_+=[]{};:,<.>/?~!$%^" + + self.col_int = "regular_table_col_int%s" %self.regular_col_base + print(self.col_int) + self.col_bigint = "regular_table_col_bigint%s" %self.regular_col_base + self.col_smallint = "regular_table_col_smallint%s" %self.regular_col_base + self.col_tinyint = "regular_table_col_tinyint%s" %self.regular_col_base + self.col_bool = "regular_table_col_bool%s" %self.regular_col_base + self.col_binary = "regular_table_col_binary%s" %self.regular_col_base + self.col_nchar = "regular_table_col_nchar%s" %self.regular_col_base + self.col_float = "regular_table_col_float%s" %self.regular_col_base + self.col_double = "regular_table_col_double%s" %self.regular_col_base + self.col_ts = "regular_table_col_ts%s" %self.regular_col_base + + tdSql.execute("create table `%s` (ts timestamp,`%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , \ + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) ;"\ + %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + tdSql.query("describe `%s` ; "%self.regular_table) + tdSql.checkRows(11) + + tdSql.query("select _block_dist() from `%s` ; " %self.regular_table) + tdSql.checkRows(1) + + tdSql.query("show create table `%s` ; " %self.regular_table) + tdSql.checkData(0, 0, self.regular_table) + tdSql.checkData(0, 1, "create table `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" + %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + + tdSql.execute("insert into `%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.regular_table) + sql = "select * from `%s` ; " %self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`; '''\ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db2.`%s` (ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ + %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = " select * from db2.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ + %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from `%s`; " %self.regular_table) + tdSql.checkData(0, 0, 2) + tdSql.query("select _block_dist() from `%s` ; " %self.regular_table) + tdSql.checkRows(1) + + sql = "select * from (select * from `%s`) ; " %self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`\ + where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ + %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table, \ + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from (select * from `%s` ); " %self.regular_table) + tdSql.checkData(0, 0, 2) + + tdSql.query("show tables like 'regular_table%' ") + tdSql.checkRows(1) + + self.crr_tb = "create_r_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + tdSql.execute("create table `%s` as select * from `%s` ;" %(self.crr_tb,self.regular_table)) + tdSql.query("show db2.tables like 'create_r_table%' ") + tdSql.checkRows(1) + + + print("==============drop\ add\ change\ modify column ") + print("==============drop==============") + tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_ts)) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall_9(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(10) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_double)) + sql = " select * from `%s`; " %self.regular_table + datacheck = self.table1_checkall_8(sql) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(9) + tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_float)) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall_7(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(8) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_nchar)) + sql = " select * from `%s`; " %self.regular_table + datacheck = self.table1_checkall_6(sql) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(7) + tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_binary)) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall_5(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(6) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_bool)) + sql = " select * from `%s`; " %self.regular_table + datacheck = self.table1_checkall_4(sql) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(5) + tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_tinyint)) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall_3(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_smallint)) + sql = " select * from `%s`; " %self.regular_table + datacheck = self.table1_checkall_2(sql) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(3) + tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_bigint)) + sql = " select * from db2.`%s`; " %self.regular_table + datacheck = self.table1_checkall_1(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(2) + tdSql.error("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_int)) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(2) + + print("==============add==============") + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` bigint; " %(self.regular_table, self.col_bigint)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(3) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` smallint; " %(self.regular_table, self.col_smallint)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` tinyint; " %(self.regular_table, self.col_tinyint)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(5) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` bool; " %(self.regular_table, self.col_bool)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(6) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` binary(20); " %(self.regular_table, self.col_binary)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(7) + + tdSql.execute("insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6)" %self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " %self.regular_table + datacheck = self.table1_checkall_5(sql) + + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` nchar(20); " %(self.regular_table, self.col_nchar)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(8) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` float; " %(self.regular_table, self.col_float)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(9) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` double; " %(self.regular_table, self.col_double)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(10) + tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` timestamp; " %(self.regular_table, self.col_ts)) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(11) + + tdSql.execute("insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " %self.regular_table + datacheck = self.table1_checkall(sql) + + + print("==============change, regular not support==============") + + + print("==============modify==============") + # TD-10810 + tdSql.execute("ALTER TABLE db2.`%s` MODIFY COLUMN `%s` binary(30); ; " %(self.regular_table, self.col_binary)) + sql = " select * from db2.`%s` order by ts desc; " %self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe db2.`%s` ; " %self.regular_table) + tdSql.checkRows(11) + tdSql.execute("ALTER TABLE `%s` MODIFY COLUMN `%s` nchar(30); ; " %(self.regular_table, self.col_nchar)) + sql = " select * from `%s` order by ts desc; " %self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe `%s` ; " %self.regular_table) + tdSql.checkRows(11) + + + print("==============drop table\stable") + try: + tdSql.execute("drop table `%s` " %self.regular_table) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from `%s`" %self.regular_table) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/0-management/2-table/.gitkeep b/tests/system-test/0-management/2-table/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/0-management/3-tag/.gitkeep b/tests/system-test/0-management/3-tag/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/0-management/4-others/.gitkeep b/tests/system-test/0-management/4-others/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/1-insert/0-sql/.gitkeep b/tests/system-test/1-insert/0-sql/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/1-insert/0-sql/basic.py b/tests/system-test/1-insert/0-sql/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..3604224c512d4a9f85de30a9069136801d343503 --- /dev/null +++ b/tests/system-test/1-insert/0-sql/basic.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: insert 倒序插入 + case2: 语法解析错误同时meta请求也发出去了导致callback中处理逻辑失效 + case3: [TD-XXXX]insert语句在values之间加入多个逗号 + ''' + return + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.execute('create table tb (ts timestamp, speed int)') + + insertRows = 10 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into tb values (now + %dm, %d)' % + (i, i)) + + tdLog.info("insert earlier data") + tdSql.execute('insert into tb values (now - 5m , 10)') + tdSql.execute('insert into tb values (now - 6m , 10)') + tdSql.execute('insert into tb values (now - 7m , 10)') + tdSql.execute('insert into tb values (now - 8m , 10)') + + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4) + + # test case for https://jira.taosdata.com:18080/browse/TD-3716: + tdSql.error("insert into tb(now, 1)") + # test case for TD-10717 + tdSql.error("insert into tb values(now,1),,(now+1s,1)") + tdSql.execute("insert into tb values(now+2s,1),(now+3s,1),(now+4s,1)") + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4 +3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/1-stmt/.gitkeep b/tests/system-test/1-insert/1-stmt/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/1-insert/2-schemaless/json/.gitkeep b/tests/system-test/1-insert/2-schemaless/json/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/1-insert/2-schemaless/line/.gitkeep b/tests/system-test/1-insert/2-schemaless/line/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/1-insert/2-schemaless/telnet/.gitkeep b/tests/system-test/1-insert/2-schemaless/telnet/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/0-aggregate/.gitkeep b/tests/system-test/2-query/0-aggregate/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/1-select/.gitkeep b/tests/system-test/2-query/1-select/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/2-compute/.gitkeep b/tests/system-test/2-query/2-compute/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/3-join/.gitkeep b/tests/system-test/2-query/3-join/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/4-union/.gitkeep b/tests/system-test/2-query/4-union/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/5-session/.gitkeep b/tests/system-test/2-query/5-session/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/6-state_window/.gitkeep b/tests/system-test/2-query/6-state_window/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/7-nest/.gitkeep b/tests/system-test/2-query/7-nest/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/8-udf/.gitkeep b/tests/system-test/2-query/8-udf/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/9-others/.gitkeep b/tests/system-test/2-query/9-others/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/2-query/9-others/TD-11389.py b/tests/system-test/2-query/9-others/TD-11389.py new file mode 100644 index 0000000000000000000000000000000000000000..a818ab42a7185d13278802141889d38f05c05a1f --- /dev/null +++ b/tests/system-test/2-query/9-others/TD-11389.py @@ -0,0 +1,93 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from posixpath import split +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + def caseDescription(self): + + ''' + case1 : [TD-11389] : + this test case is an test case for cache error , it will let the cached data obtained by the client that has connected to taosd incorrect, + root cause : table schema is changed, tag hostname size is increased through schema-less insertion. The schema cache of client taos is not refreshed. + + ''' + return + + + def run(self): + tdSql.prepare() + tdSql.execute("create database if not exists testdb keep 36500;") + tdSql.execute("use testdb;") + tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);") + for i in range(self.num): + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00)) + tdSql.query("select distinct(hostname) from st;") + tdSql.checkRows(10) + + binPath = self.getBuildPath() + "/build/bin/" + os.system( "taos -s ' ALTER STABLE testdb.st MODIFY TAG hostname binary(100); '" ) + os.system("taos -s ' insert into testdb.sub_test using testdb.st tags(\"host_10000000000000000000\" , 100) values (now , 100 , 100.0 ); '") + + + tdLog.info (" ===============The correct result should be 11 rows ,there is error query result ====================") + + os.system("taos -s ' select distinct(hostname) from testdb.st '") + + # this bug will occor at this connect ,it should get 11 rows ,but return 10 rows ,this error is caused by cache + + for i in range(10): + + tdSql.checkRows(11) # query 10 times every 10 second , test cache refresh + sleep(10) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/system-test/3-connectors/c#/.gitkeep b/tests/system-test/3-connectors/c#/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/3-connectors/go/.gitkeep b/tests/system-test/3-connectors/go/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/3-connectors/java/.gitkeep b/tests/system-test/3-connectors/java/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/3-connectors/nodejs/.gitkeep b/tests/system-test/3-connectors/nodejs/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/3-connectors/restful/.gitkeep b/tests/system-test/3-connectors/restful/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/3-connectors/rust/.gitkeep b/tests/system-test/3-connectors/rust/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/4-taosAdapter/.gitkeep b/tests/system-test/4-taosAdapter/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/5-taos-tools/taosbenchmark/.gitkeep b/tests/system-test/5-taos-tools/taosbenchmark/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/5-taos-tools/taosdump/.gitkeep b/tests/system-test/5-taos-tools/taosdump/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/6-cluster/.gitkeep b/tests/system-test/6-cluster/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/7-customer/.gitkeep b/tests/system-test/7-customer/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh new file mode 100755 index 0000000000000000000000000000000000000000..d2fea19cb537e4cc196b06998059bcbf328d09a8 --- /dev/null +++ b/tests/system-test/fulltest.sh @@ -0,0 +1,4 @@ +python3 test.py -f 1-insert/0-sql/basic.py +python3 test.py -f 0-management/1-stable/create_col_tag.py + +#python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389 \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py new file mode 100644 index 0000000000000000000000000000000000000000..b39b95c9030e14a2442883991cadb7d21e5e7a5d --- /dev/null +++ b/tests/system-test/test.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import subprocess +import time +from distutils.log import warn as printf +from fabric2 import Connection +sys.path.append("../pytest") +from util.log import * +from util.dnodes import * +from util.cases import * + +import taos + + +if __name__ == "__main__": + + fileName = "all" + deployPath = "" + masterIp = "" + testCluster = False + valgrind = 0 + logSql = True + stop = 0 + restart = False + windows = 0 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-f Name of test case file written by Python') + tdLog.printNoPrefix('-p Deploy Path for Simulator') + tdLog.printNoPrefix('-m Master Ip for Simulator') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-c Test Cluster Flag') + tdLog.printNoPrefix('-g valgrind Test Flag') + tdLog.printNoPrefix('-r taosd restart test') + tdLog.printNoPrefix('-w taos on windows') + sys.exit(0) + + if key in ['-r', '--restart']: + restart = True + + if key in ['-f', '--file']: + fileName = value + + if key in ['-p', '--path']: + deployPath = value + + if key in ['-m', '--master']: + masterIp = value + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-c', '--cluster']: + testCluster = True + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if key in ['-w', '--windows']: + windows = 1 + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -TERM %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + tdLog.info('stop All dnodes') + + if masterIp == "": + host = '127.0.0.1' + else: + host = masterIp + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + if windows: + tdCases.logSql(logSql) + tdLog.info("Procedures for testing self-deployment") + td_clinet = TDSimClient("C:\\TDengine") + td_clinet.deploy() + remote_conn = Connection("root@%s"%host) + with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): + remote_conn.run("python3 ./test.py") + conn = taos.connect( + host="%s"%(host), + config=td_clinet.cfgDir) + tdCases.runOneWindows(conn, fileName) + else: + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + tdDnodes.deploy(1,ucase.updatecfgDict) + except : + tdDnodes.deploy(1,{}) + else: + pass + tdDnodes.deploy(1,{}) + tdDnodes.start(1) + + + + tdCases.logSql(logSql) + + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) + else: + tdLog.info("Procedures for testing self-deployment") + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + if fileName == "all": + tdCases.runAllLinux(conn) + else: + tdCases.runOneWindows(conn, fileName) + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + conn = taos.connect( host, config=tdDnodes.getSimCfgPath()) + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") + else: + tdLog.info("not need to query") + conn.close() diff --git a/tests/test-all.sh b/tests/test-all.sh index 266dac85b0eddde932dd8e71d660dc16d9437904..bfd2b04f027084d348f65a2d858427c3389c0774 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -158,7 +158,13 @@ function runPyCaseOneByOne { } function runPyCaseOneByOnefq() { - cd $tests_dir/pytest + if [[ $3 =~ system ]] ; then + cd $tests_dir/system-test + elif [[ $3 =~ develop ]] ; then + cd $tests_dir/develop-test + else + cd $tests_dir/pytest + fi if [[ $1 =~ full ]] ; then start=1 end=`sed -n '$=' fulltest.sh` @@ -361,6 +367,12 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != " elif [ "$1" == "p4" ]; then echo "### run Python_4 test ###" runPyCaseOneByOnefq p4 1 + elif [ "$1" == "system-test" ]; then + echo "### run system-test test ###" + runPyCaseOneByOnefq full 1 system + elif [ "$1" == "develop-test" ]; then + echo "### run develop-test test ###" + runPyCaseOneByOnefq full 1 develop elif [ "$1" == "b2" ] || [ "$1" == "b3" ]; then exit $(($totalFailed + $totalPyFailed)) elif [ "$1" == "smoke" ] || [ -z "$1" ]; then