diff --git a/.gitignore b/.gitignore index e6e327327c2a72bc2262d9b2923314c950c78cf6..1ff11080569e9312369f6e9c00463e25853fd38b 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,6 @@ tests/hdfs/ nmake/ sln/ hdfs/ -c/ taoshebei/ taosdalipu/ Target/ @@ -79,3 +78,15 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/ tests/examples/JDBC/JDBCDemo/.classpath tests/examples/JDBC/JDBCDemo/.project tests/examples/JDBC/JDBCDemo/.settings/ + +# Emacs +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +TAGS diff --git a/.gitmodules b/.gitmodules index 156226d54486c17e64b9c514e47e3a7dc3fe6942..74afbbf9973abec6423633b848181b349de4ed6f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,9 +4,9 @@ [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin url = https://github.com/taosdata/grafanaplugin +[submodule "src/connector/hivemq-tdengine-extension"] + path = src/connector/hivemq-tdengine-extension + url = https://github.com/huskar-t/hivemq-tdengine-extension.git [submodule "tests/examples/rust"] path = tests/examples/rust url = https://github.com/songtianyi/tdengine-rust-bindings.git -[submodule "src/connector/hivemq-tdengine-extension"] - path = src/connector/hivemq-tdengine-extension - url = https://github.com/huskar-t/hivemq-tdengine-extension.git \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index a55b5fbed97c08117f23488cf3e0d60b894316e7..7bb36fe1b001473cf5641ad195959581affeb2cb 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) IF (CMAKE_VERSION VERSION_LESS 3.0) PROJECT(TDengine CXX) SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") diff --git a/Jenkinsfile b/Jenkinsfile index 3119b5031934307452a1b502d8606c32194f4fa2..8d2429c137414fdb63260f94c0c99a6d264d8c48 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,7 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } - +def skipstage=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -45,7 +45,7 @@ def pre_test(){ git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|.*src/connector|Jenkinsfile' || exit 0 + git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' || exit 0 cd ${WK} git reset --hard HEAD~10 git checkout develop @@ -63,6 +63,7 @@ def pre_test(){ ''' return 1 } + pipeline { agent none @@ -72,12 +73,37 @@ pipeline { } stages { - + stage('pre_build'){ + agent{label 'master'} + when { + changeRequest() + } + steps { + sh''' + cp -r ${WORKSPACE} ${WORKSPACE}.tes + cd ${WORKSPACE}.tes + git checkout develop + git pull + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + ''' + script{ + env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + } + println env.skipstage + sh''' + rm -rf ${WORKSPACE}.tes + ''' + } + } stage('Parallel test stage') { //only build pr when { changeRequest() + expression { + env.skipstage != 0 + } } parallel { stage('python_1_s1') { @@ -102,12 +128,12 @@ pipeline { pre_test() timeout(time: 45, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - find pytest -name '*'sql|xargs rm -rf - ./test-all.sh p2 - date''' + sh ''' + date + cd ${WKC}/tests + find pytest -name '*'sql|xargs rm -rf + ./test-all.sh p2 + date''' } } } @@ -127,7 +153,7 @@ pipeline { stage('test_b1_s2') { agent{label 'b1'} steps { - timeout(time: 90, unit: 'MINUTES'){ + timeout(time: 45, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -139,6 +165,7 @@ pipeline { stage('test_crash_gen_s3') { agent{label "b2"} + steps { pre_test() catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -147,20 +174,22 @@ pipeline { ./crash_gen.sh -a -p -t 4 -s 2000 ''' } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_crash_gen_val_log.sh + ''' + timeout(time: 45, unit: 'MINUTES'){ sh ''' - cd ${WKC}/tests/pytest - ./handle_crash_gen_val_log.sh + date + cd ${WKC}/tests + ./test-all.sh b2fq + date ''' - } - timeout(time: 45, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b2fq - date - ''' - } + } + } } @@ -194,6 +223,12 @@ pipeline { date cd ${WKC}/tests ./test-all.sh b4fq + cd ${WKC}/tests + ./test-all.sh p4 + cd ${WKC}/tests + ./test-all.sh full jdbc + cd ${WKC}/tests + ./test-all.sh full unit date''' } } @@ -240,12 +275,11 @@ pipeline { } } } - post { - + post { success { emailext ( - subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", + body: """ @@ -261,29 +295,29 @@ pipeline { - ''', + """, to: "${env.CHANGE_AUTHOR_EMAIL}", from: "support@taosdata.com" ) } failure { emailext ( - subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL", + body: """ @@ -299,21 +333,21 @@ pipeline { - ''', + """, to: "${env.CHANGE_AUTHOR_EMAIL}", from: "support@taosdata.com" ) diff --git a/README.md b/README.md index ded262b6741aff4ec94187295c97c8ce7cf20a31..489b6d0a4e12db06c9f2c1274e7a87f22cf76655 100644 --- a/README.md +++ b/README.md @@ -57,9 +57,7 @@ sudo apt-get install -y maven ### Centos 7: ```bash -sudo yum install -y gcc gcc-c++ make cmake3 epel-release git -sudo yum remove -y cmake -sudo ln -s /usr/bin/cmake3 /usr/bin/cmake +sudo yum install -y gcc gcc-c++ make cmake git ``` To install openjdk-8: @@ -110,7 +108,8 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` -To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below: +TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 and arm32 platform. +You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct: aarch64: ```bash diff --git a/cmake/install.inc b/cmake/install.inc index 2f0404334c06125e6ee8241ab91b22064ab20b89..0ea79589caef1bf3ec72f4234f99e87328759c33 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.21-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/platform.inc b/cmake/platform.inc index 889f6c73cf2be982df7f1db207114e58d6af3e92..dcd0183e27b58f2ebd22c584fab359e025044cba 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -78,29 +78,58 @@ ELSE() EXIT () ENDIF () -# if generate ARM version: -# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 -IF (${CPUTYPE} MATCHES "aarch32") - SET(TD_LINUX TRUE) - SET(TD_LINUX_32 FALSE) - SET(TD_ARM_32 TRUE) - MESSAGE(STATUS "input cpuType: aarch32") -ELSEIF (${CPUTYPE} MATCHES "aarch64") - SET(TD_LINUX TRUE) - SET(TD_LINUX_64 FALSE) - SET(TD_ARM_64 TRUE) - MESSAGE(STATUS "input cpuType: aarch64") -ELSEIF (${CPUTYPE} MATCHES "mips64") - SET(TD_LINUX TRUE) - SET(TD_LINUX_64 FALSE) - SET(TD_MIPS_64 TRUE) - MESSAGE(STATUS "input cpuType: mips64") -ELSEIF (${CPUTYPE} MATCHES "x64") - MESSAGE(STATUS "input cpuType: x64") -ELSEIF (${CPUTYPE} MATCHES "x86") - MESSAGE(STATUS "input cpuType: x86") +IF ("${CPUTYPE}" STREQUAL "") + MESSAGE(STATUS "The current platform " ${CMAKE_SYSTEM_PROCESSOR} " is detected") + + IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)") + MESSAGE(STATUS "The current platform is amd64") + MESSAGE(STATUS "Set CPUTYPE to x64") + SET(CPUTYPE "x64") + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)") + MESSAGE(STATUS "The current platform is x86") + MESSAGE(STATUS "Set CPUTYPE to x86") + SET(CPUTYPE "x32") + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l") + MESSAGE(STATUS "Set CPUTYPE to aarch32") + SET(CPUTYPE "aarch32") + MESSAGE(STATUS "Set CPUTYPE to aarch32") + SET(TD_LINUX TRUE) + SET(TD_LINUX_32 FALSE) + SET(TD_ARM_32 TRUE) + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + SET(CPUTYPE "aarch64") + MESSAGE(STATUS "Set CPUTYPE to aarch64") + SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) + SET(TD_ARM_64 TRUE) + ENDIF () + ELSE () - MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE}) + # if generate ARM version: + # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 + IF (${CPUTYPE} MATCHES "aarch32") + SET(TD_LINUX TRUE) + SET(TD_LINUX_32 FALSE) + SET(TD_ARM_32 TRUE) + MESSAGE(STATUS "input cpuType: aarch32") + ELSEIF (${CPUTYPE} MATCHES "aarch64") + SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) + SET(TD_ARM_64 TRUE) + MESSAGE(STATUS "input cpuType: aarch64") + ELSEIF (${CPUTYPE} MATCHES "mips64") + SET(TD_LINUX TRUE) + SET(TD_LINUX_64 FALSE) + SET(TD_MIPS_64 TRUE) + MESSAGE(STATUS "input cpuType: mips64") + ELSEIF (${CPUTYPE} MATCHES "x64") + MESSAGE(STATUS "input cpuType: x64") + ELSEIF (${CPUTYPE} MATCHES "x86") + MESSAGE(STATUS "input cpuType: x86") + ELSE () + MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE}) + ENDIF () + ENDIF () # cmake -DOSTYPE=Ningsi diff --git a/cmake/version.inc b/cmake/version.inc index f9927bf1c61c62dd27123e1c6aec158c4d1c09cf..962f1f60406f3657a379ee99427e110985988c44 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.14.0") + SET(TD_VER_NUMBER "2.0.16.0") ENDIF () IF (DEFINED VERCOMPATIBLE) @@ -13,28 +13,40 @@ ELSE () SET(TD_VER_COMPATIBLE "2.0.0.0") ENDIF () +find_program(HAVE_GIT NAMES git) + IF (DEFINED GITINFO) SET(TD_VER_GIT ${GITINFO}) +ELSEIF (HAVE_GIT) + execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${TD_COMMUNITY_DIR} OUTPUT_VARIABLE GIT_COMMITID) + message(STATUS "git log result:${GIT_COMMITID}") + IF (GIT_COMMITID) + string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) + SET(TD_VER_GIT ${GIT_COMMITID}) + ELSE () + message(STATUS "not a git repository") + SET(TD_VER_GIT "no git commit id") + ENDIF () ELSE () - execute_process( - COMMAND git log -1 --format=%H - WORKING_DIRECTORY ${TD_COMMUNITY_DIR} - OUTPUT_VARIABLE GIT_COMMITID - ) - string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) - SET(TD_VER_GIT ${GIT_COMMITID}) + message(STATUS "no git cmd") + SET(TD_VER_GIT "no git commit id") ENDIF () IF (DEFINED GITINFOI) SET(TD_VER_GIT_INTERNAL ${GITINFOI}) +ELSEIF (HAVE_GIT) + execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID) + message(STATUS "git log result:${GIT_COMMITID}") + IF (GIT_COMMITID) + string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) + SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID}) + ELSE () + message(STATUS "not a git repository") + SET(TD_VER_GIT "no git commit id") + ENDIF () ELSE () - execute_process( - COMMAND git log -1 --format=%H - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMITID - ) - string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) - SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID}) + message(STATUS "no git cmd") + SET(TD_VER_GIT_INTERNAL "no git commit id") ENDIF () IF (DEFINED VERDATE) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index a4db6fd5fbc3e867cd30d2a1c871e48a7bfbd5eb..cfc17442f5c21c2d002ba42c45ce523c80eb957f 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(zlib-1.2.11) diff --git a/deps/MQTT-C/CMakeLists.txt b/deps/MQTT-C/CMakeLists.txt index 36ede467acfa05f893cbe99ed518458d57986c79..15b35525210ec90e6e2efbdcd0e6128cb4d34f91 100644 --- a/deps/MQTT-C/CMakeLists.txt +++ b/deps/MQTT-C/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) # MQTT-C build options option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) diff --git a/deps/MsvcLibX/CMakeLists.txt b/deps/MsvcLibX/CMakeLists.txt index c02e4c7a4d89cdf657756ec9786d5a624419d182..4428579e1c098425c9d72d7d58a5fda15cd34c93 100644 --- a/deps/MsvcLibX/CMakeLists.txt +++ b/deps/MsvcLibX/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/iconv/CMakeLists.txt b/deps/iconv/CMakeLists.txt index f26f109735ee81bc999b9539185ce60ccb976bfd..286070fa9071f8fcd1949850cec87c1ced3245d7 100644 --- a/deps/iconv/CMakeLists.txt +++ b/deps/iconv/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/libcurl/include/curl/curl.h b/deps/libcurl/include/curl/curl.h deleted file mode 100644 index 84229bb698b77a5ed5d67057772ed23188e191e8..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/curl.h +++ /dev/null @@ -1,2415 +0,0 @@ -#ifndef __CURL_CURL_H -#define __CURL_CURL_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2015, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* - * If you have libcurl problems, all docs and details are found here: - * http://curl.haxx.se/libcurl/ - * - * curl-library mailing list subscription and unsubscription web interface: - * http://cool.haxx.se/mailman/listinfo/curl-library/ - */ - -#include "curlver.h" /* libcurl version defines */ -#include "curlbuild.h" /* libcurl build definitions */ -#include "curlrules.h" /* libcurl rules enforcement */ - -/* - * Define WIN32 when build target is Win32 API - */ - -#if (defined(_WIN32) || defined(__WIN32__)) && \ - !defined(WIN32) && !defined(__SYMBIAN32__) -#define WIN32 -#endif - -#include -#include - -#if defined(__FreeBSD__) && (__FreeBSD__ >= 2) -/* Needed for __FreeBSD_version symbol definition */ -#include -#endif - -/* The include stuff here below is mainly for time_t! */ -#include -#include - -#if defined(WIN32) && !defined(_WIN32_WCE) && !defined(__CYGWIN__) -#if !(defined(_WINSOCKAPI_) || defined(_WINSOCK_H) || \ - defined(__LWIP_OPT_H__) || defined(LWIP_HDR_OPT_H)) -/* The check above prevents the winsock2 inclusion if winsock.h already was - included, since they can't co-exist without problems */ -#include -#include -#endif -#endif - -/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish - libc5-based Linux systems. Only include it on systems that are known to - require it! */ -#if defined(_AIX) || defined(__NOVELL_LIBC__) || defined(__NetBSD__) || \ - defined(__minix) || defined(__SYMBIAN32__) || defined(__INTEGRITY) || \ - defined(ANDROID) || defined(__ANDROID__) || defined(__OpenBSD__) || \ - (defined(__FreeBSD_version) && (__FreeBSD_version < 800000)) -#include -#endif - -#if !defined(WIN32) && !defined(_WIN32_WCE) -#include -#endif - -#if !defined(WIN32) && !defined(__WATCOMC__) && !defined(__VXWORKS__) -#include -#endif - -#ifdef __BEOS__ -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void CURL; - -/* - * libcurl external API function linkage decorations. - */ - -#ifdef CURL_STATICLIB -# define CURL_EXTERN -#elif defined(WIN32) || defined(_WIN32) || defined(__SYMBIAN32__) -# if defined(BUILDING_LIBCURL) -# define CURL_EXTERN __declspec(dllexport) -# else -# define CURL_EXTERN __declspec(dllimport) -# endif -#elif defined(BUILDING_LIBCURL) && defined(CURL_HIDDEN_SYMBOLS) -# define CURL_EXTERN CURL_EXTERN_SYMBOL -#else -# define CURL_EXTERN -#endif - -#ifndef curl_socket_typedef -/* socket typedef */ -#if defined(WIN32) && !defined(__LWIP_OPT_H__) && !defined(LWIP_HDR_OPT_H) -typedef SOCKET curl_socket_t; -#define CURL_SOCKET_BAD INVALID_SOCKET -#else -typedef int curl_socket_t; -#define CURL_SOCKET_BAD -1 -#endif -#define curl_socket_typedef -#endif /* curl_socket_typedef */ - -struct curl_httppost { - struct curl_httppost *next; /* next entry in the list */ - char *name; /* pointer to allocated name */ - long namelength; /* length of name length */ - char *contents; /* pointer to allocated data contents */ - long contentslength; /* length of contents field, see also - CURL_HTTPPOST_LARGE */ - char *buffer; /* pointer to allocated buffer contents */ - long bufferlength; /* length of buffer field */ - char *contenttype; /* Content-Type */ - struct curl_slist* contentheader; /* list of extra headers for this form */ - struct curl_httppost *more; /* if one field name has more than one - file, this link should link to following - files */ - long flags; /* as defined below */ - -/* specified content is a file name */ -#define CURL_HTTPPOST_FILENAME (1<<0) -/* specified content is a file name */ -#define CURL_HTTPPOST_READFILE (1<<1) -/* name is only stored pointer do not free in formfree */ -#define CURL_HTTPPOST_PTRNAME (1<<2) -/* contents is only stored pointer do not free in formfree */ -#define CURL_HTTPPOST_PTRCONTENTS (1<<3) -/* upload file from buffer */ -#define CURL_HTTPPOST_BUFFER (1<<4) -/* upload file from pointer contents */ -#define CURL_HTTPPOST_PTRBUFFER (1<<5) -/* upload file contents by using the regular read callback to get the data and - pass the given pointer as custom pointer */ -#define CURL_HTTPPOST_CALLBACK (1<<6) -/* use size in 'contentlen', added in 7.46.0 */ -#define CURL_HTTPPOST_LARGE (1<<7) - - char *showfilename; /* The file name to show. If not set, the - actual file name will be used (if this - is a file part) */ - void *userp; /* custom pointer used for - HTTPPOST_CALLBACK posts */ - curl_off_t contentlen; /* alternative length of contents - field. Used if CURL_HTTPPOST_LARGE is - set. Added in 7.46.0 */ -}; - -/* This is the CURLOPT_PROGRESSFUNCTION callback proto. It is now considered - deprecated but was the only choice up until 7.31.0 */ -typedef int (*curl_progress_callback)(void *clientp, - double dltotal, - double dlnow, - double ultotal, - double ulnow); - -/* This is the CURLOPT_XFERINFOFUNCTION callback proto. It was introduced in - 7.32.0, it avoids floating point and provides more detailed information. */ -typedef int (*curl_xferinfo_callback)(void *clientp, - curl_off_t dltotal, - curl_off_t dlnow, - curl_off_t ultotal, - curl_off_t ulnow); - -#ifndef CURL_MAX_WRITE_SIZE - /* Tests have proven that 20K is a very bad buffer size for uploads on - Windows, while 16K for some odd reason performed a lot better. - We do the ifndef check to allow this value to easier be changed at build - time for those who feel adventurous. The practical minimum is about - 400 bytes since libcurl uses a buffer of this size as a scratch area - (unrelated to network send operations). */ -#define CURL_MAX_WRITE_SIZE 16384 -#endif - -#ifndef CURL_MAX_HTTP_HEADER -/* The only reason to have a max limit for this is to avoid the risk of a bad - server feeding libcurl with a never-ending header that will cause reallocs - infinitely */ -#define CURL_MAX_HTTP_HEADER (100*1024) -#endif - -/* This is a magic return code for the write callback that, when returned, - will signal libcurl to pause receiving on the current transfer. */ -#define CURL_WRITEFUNC_PAUSE 0x10000001 - -typedef size_t (*curl_write_callback)(char *buffer, - size_t size, - size_t nitems, - void *outstream); - - - -/* enumeration of file types */ -typedef enum { - CURLFILETYPE_FILE = 0, - CURLFILETYPE_DIRECTORY, - CURLFILETYPE_SYMLINK, - CURLFILETYPE_DEVICE_BLOCK, - CURLFILETYPE_DEVICE_CHAR, - CURLFILETYPE_NAMEDPIPE, - CURLFILETYPE_SOCKET, - CURLFILETYPE_DOOR, /* is possible only on Sun Solaris now */ - - CURLFILETYPE_UNKNOWN /* should never occur */ -} curlfiletype; - -#define CURLFINFOFLAG_KNOWN_FILENAME (1<<0) -#define CURLFINFOFLAG_KNOWN_FILETYPE (1<<1) -#define CURLFINFOFLAG_KNOWN_TIME (1<<2) -#define CURLFINFOFLAG_KNOWN_PERM (1<<3) -#define CURLFINFOFLAG_KNOWN_UID (1<<4) -#define CURLFINFOFLAG_KNOWN_GID (1<<5) -#define CURLFINFOFLAG_KNOWN_SIZE (1<<6) -#define CURLFINFOFLAG_KNOWN_HLINKCOUNT (1<<7) - -/* Content of this structure depends on information which is known and is - achievable (e.g. by FTP LIST parsing). Please see the url_easy_setopt(3) man - page for callbacks returning this structure -- some fields are mandatory, - some others are optional. The FLAG field has special meaning. */ -struct curl_fileinfo { - char *filename; - curlfiletype filetype; - time_t time; - unsigned int perm; - int uid; - int gid; - curl_off_t size; - long int hardlinks; - - struct { - /* If some of these fields is not NULL, it is a pointer to b_data. */ - char *time; - char *perm; - char *user; - char *group; - char *target; /* pointer to the target filename of a symlink */ - } strings; - - unsigned int flags; - - /* used internally */ - char * b_data; - size_t b_size; - size_t b_used; -}; - -/* return codes for CURLOPT_CHUNK_BGN_FUNCTION */ -#define CURL_CHUNK_BGN_FUNC_OK 0 -#define CURL_CHUNK_BGN_FUNC_FAIL 1 /* tell the lib to end the task */ -#define CURL_CHUNK_BGN_FUNC_SKIP 2 /* skip this chunk over */ - -/* if splitting of data transfer is enabled, this callback is called before - download of an individual chunk started. Note that parameter "remains" works - only for FTP wildcard downloading (for now), otherwise is not used */ -typedef long (*curl_chunk_bgn_callback)(const void *transfer_info, - void *ptr, - int remains); - -/* return codes for CURLOPT_CHUNK_END_FUNCTION */ -#define CURL_CHUNK_END_FUNC_OK 0 -#define CURL_CHUNK_END_FUNC_FAIL 1 /* tell the lib to end the task */ - -/* If splitting of data transfer is enabled this callback is called after - download of an individual chunk finished. - Note! After this callback was set then it have to be called FOR ALL chunks. - Even if downloading of this chunk was skipped in CHUNK_BGN_FUNC. - This is the reason why we don't need "transfer_info" parameter in this - callback and we are not interested in "remains" parameter too. */ -typedef long (*curl_chunk_end_callback)(void *ptr); - -/* return codes for FNMATCHFUNCTION */ -#define CURL_FNMATCHFUNC_MATCH 0 /* string corresponds to the pattern */ -#define CURL_FNMATCHFUNC_NOMATCH 1 /* pattern doesn't match the string */ -#define CURL_FNMATCHFUNC_FAIL 2 /* an error occurred */ - -/* callback type for wildcard downloading pattern matching. If the - string matches the pattern, return CURL_FNMATCHFUNC_MATCH value, etc. */ -typedef int (*curl_fnmatch_callback)(void *ptr, - const char *pattern, - const char *string); - -/* These are the return codes for the seek callbacks */ -#define CURL_SEEKFUNC_OK 0 -#define CURL_SEEKFUNC_FAIL 1 /* fail the entire transfer */ -#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking can't be done, so - libcurl might try other means instead */ -typedef int (*curl_seek_callback)(void *instream, - curl_off_t offset, - int origin); /* 'whence' */ - -/* This is a return code for the read callback that, when returned, will - signal libcurl to immediately abort the current transfer. */ -#define CURL_READFUNC_ABORT 0x10000000 -/* This is a return code for the read callback that, when returned, will - signal libcurl to pause sending data on the current transfer. */ -#define CURL_READFUNC_PAUSE 0x10000001 - -typedef size_t (*curl_read_callback)(char *buffer, - size_t size, - size_t nitems, - void *instream); - -typedef enum { - CURLSOCKTYPE_IPCXN, /* socket created for a specific IP connection */ - CURLSOCKTYPE_ACCEPT, /* socket created by accept() call */ - CURLSOCKTYPE_LAST /* never use */ -} curlsocktype; - -/* The return code from the sockopt_callback can signal information back - to libcurl: */ -#define CURL_SOCKOPT_OK 0 -#define CURL_SOCKOPT_ERROR 1 /* causes libcurl to abort and return - CURLE_ABORTED_BY_CALLBACK */ -#define CURL_SOCKOPT_ALREADY_CONNECTED 2 - -typedef int (*curl_sockopt_callback)(void *clientp, - curl_socket_t curlfd, - curlsocktype purpose); - -struct curl_sockaddr { - int family; - int socktype; - int protocol; - unsigned int addrlen; /* addrlen was a socklen_t type before 7.18.0 but it - turned really ugly and painful on the systems that - lack this type */ - struct sockaddr addr; -}; - -typedef curl_socket_t -(*curl_opensocket_callback)(void *clientp, - curlsocktype purpose, - struct curl_sockaddr *address); - -typedef int -(*curl_closesocket_callback)(void *clientp, curl_socket_t item); - -typedef enum { - CURLIOE_OK, /* I/O operation successful */ - CURLIOE_UNKNOWNCMD, /* command was unknown to callback */ - CURLIOE_FAILRESTART, /* failed to restart the read */ - CURLIOE_LAST /* never use */ -} curlioerr; - -typedef enum { - CURLIOCMD_NOP, /* no operation */ - CURLIOCMD_RESTARTREAD, /* restart the read stream from start */ - CURLIOCMD_LAST /* never use */ -} curliocmd; - -typedef curlioerr (*curl_ioctl_callback)(CURL *handle, - int cmd, - void *clientp); - -/* - * The following typedef's are signatures of malloc, free, realloc, strdup and - * calloc respectively. Function pointers of these types can be passed to the - * curl_global_init_mem() function to set user defined memory management - * callback routines. - */ -typedef void *(*curl_malloc_callback)(size_t size); -typedef void (*curl_free_callback)(void *ptr); -typedef void *(*curl_realloc_callback)(void *ptr, size_t size); -typedef char *(*curl_strdup_callback)(const char *str); -typedef void *(*curl_calloc_callback)(size_t nmemb, size_t size); - -/* the kind of data that is passed to information_callback*/ -typedef enum { - CURLINFO_TEXT = 0, - CURLINFO_HEADER_IN, /* 1 */ - CURLINFO_HEADER_OUT, /* 2 */ - CURLINFO_DATA_IN, /* 3 */ - CURLINFO_DATA_OUT, /* 4 */ - CURLINFO_SSL_DATA_IN, /* 5 */ - CURLINFO_SSL_DATA_OUT, /* 6 */ - CURLINFO_END -} curl_infotype; - -typedef int (*curl_debug_callback) - (CURL *handle, /* the handle/transfer this concerns */ - curl_infotype type, /* what kind of data */ - char *data, /* points to the data */ - size_t size, /* size of the data pointed to */ - void *userptr); /* whatever the user please */ - -/* All possible error codes from all sorts of curl functions. Future versions - may return other values, stay prepared. - - Always add new return codes last. Never *EVER* remove any. The return - codes must remain the same! - */ - -typedef enum { - CURLE_OK = 0, - CURLE_UNSUPPORTED_PROTOCOL, /* 1 */ - CURLE_FAILED_INIT, /* 2 */ - CURLE_URL_MALFORMAT, /* 3 */ - CURLE_NOT_BUILT_IN, /* 4 - [was obsoleted in August 2007 for - 7.17.0, reused in April 2011 for 7.21.5] */ - CURLE_COULDNT_RESOLVE_PROXY, /* 5 */ - CURLE_COULDNT_RESOLVE_HOST, /* 6 */ - CURLE_COULDNT_CONNECT, /* 7 */ - CURLE_FTP_WEIRD_SERVER_REPLY, /* 8 */ - CURLE_REMOTE_ACCESS_DENIED, /* 9 a service was denied by the server - due to lack of access - when login fails - this is not returned. */ - CURLE_FTP_ACCEPT_FAILED, /* 10 - [was obsoleted in April 2006 for - 7.15.4, reused in Dec 2011 for 7.24.0]*/ - CURLE_FTP_WEIRD_PASS_REPLY, /* 11 */ - CURLE_FTP_ACCEPT_TIMEOUT, /* 12 - timeout occurred accepting server - [was obsoleted in August 2007 for 7.17.0, - reused in Dec 2011 for 7.24.0]*/ - CURLE_FTP_WEIRD_PASV_REPLY, /* 13 */ - CURLE_FTP_WEIRD_227_FORMAT, /* 14 */ - CURLE_FTP_CANT_GET_HOST, /* 15 */ - CURLE_HTTP2, /* 16 - A problem in the http2 framing layer. - [was obsoleted in August 2007 for 7.17.0, - reused in July 2014 for 7.38.0] */ - CURLE_FTP_COULDNT_SET_TYPE, /* 17 */ - CURLE_PARTIAL_FILE, /* 18 */ - CURLE_FTP_COULDNT_RETR_FILE, /* 19 */ - CURLE_OBSOLETE20, /* 20 - NOT USED */ - CURLE_QUOTE_ERROR, /* 21 - quote command failure */ - CURLE_HTTP_RETURNED_ERROR, /* 22 */ - CURLE_WRITE_ERROR, /* 23 */ - CURLE_OBSOLETE24, /* 24 - NOT USED */ - CURLE_UPLOAD_FAILED, /* 25 - failed upload "command" */ - CURLE_READ_ERROR, /* 26 - couldn't open/read from file */ - CURLE_OUT_OF_MEMORY, /* 27 */ - /* Note: CURLE_OUT_OF_MEMORY may sometimes indicate a conversion error - instead of a memory allocation error if CURL_DOES_CONVERSIONS - is defined - */ - CURLE_OPERATION_TIMEDOUT, /* 28 - the timeout time was reached */ - CURLE_OBSOLETE29, /* 29 - NOT USED */ - CURLE_FTP_PORT_FAILED, /* 30 - FTP PORT operation failed */ - CURLE_FTP_COULDNT_USE_REST, /* 31 - the REST command failed */ - CURLE_OBSOLETE32, /* 32 - NOT USED */ - CURLE_RANGE_ERROR, /* 33 - RANGE "command" didn't work */ - CURLE_HTTP_POST_ERROR, /* 34 */ - CURLE_SSL_CONNECT_ERROR, /* 35 - wrong when connecting with SSL */ - CURLE_BAD_DOWNLOAD_RESUME, /* 36 - couldn't resume download */ - CURLE_FILE_COULDNT_READ_FILE, /* 37 */ - CURLE_LDAP_CANNOT_BIND, /* 38 */ - CURLE_LDAP_SEARCH_FAILED, /* 39 */ - CURLE_OBSOLETE40, /* 40 - NOT USED */ - CURLE_FUNCTION_NOT_FOUND, /* 41 */ - CURLE_ABORTED_BY_CALLBACK, /* 42 */ - CURLE_BAD_FUNCTION_ARGUMENT, /* 43 */ - CURLE_OBSOLETE44, /* 44 - NOT USED */ - CURLE_INTERFACE_FAILED, /* 45 - CURLOPT_INTERFACE failed */ - CURLE_OBSOLETE46, /* 46 - NOT USED */ - CURLE_TOO_MANY_REDIRECTS , /* 47 - catch endless re-direct loops */ - CURLE_UNKNOWN_OPTION, /* 48 - User specified an unknown option */ - CURLE_TELNET_OPTION_SYNTAX , /* 49 - Malformed telnet option */ - CURLE_OBSOLETE50, /* 50 - NOT USED */ - CURLE_PEER_FAILED_VERIFICATION, /* 51 - peer's certificate or fingerprint - wasn't verified fine */ - CURLE_GOT_NOTHING, /* 52 - when this is a specific error */ - CURLE_SSL_ENGINE_NOTFOUND, /* 53 - SSL crypto engine not found */ - CURLE_SSL_ENGINE_SETFAILED, /* 54 - can not set SSL crypto engine as - default */ - CURLE_SEND_ERROR, /* 55 - failed sending network data */ - CURLE_RECV_ERROR, /* 56 - failure in receiving network data */ - CURLE_OBSOLETE57, /* 57 - NOT IN USE */ - CURLE_SSL_CERTPROBLEM, /* 58 - problem with the local certificate */ - CURLE_SSL_CIPHER, /* 59 - couldn't use specified cipher */ - CURLE_SSL_CACERT, /* 60 - problem with the CA cert (path?) */ - CURLE_BAD_CONTENT_ENCODING, /* 61 - Unrecognized/bad encoding */ - CURLE_LDAP_INVALID_URL, /* 62 - Invalid LDAP URL */ - CURLE_FILESIZE_EXCEEDED, /* 63 - Maximum file size exceeded */ - CURLE_USE_SSL_FAILED, /* 64 - Requested FTP SSL level failed */ - CURLE_SEND_FAIL_REWIND, /* 65 - Sending the data requires a rewind - that failed */ - CURLE_SSL_ENGINE_INITFAILED, /* 66 - failed to initialise ENGINE */ - CURLE_LOGIN_DENIED, /* 67 - user, password or similar was not - accepted and we failed to login */ - CURLE_TFTP_NOTFOUND, /* 68 - file not found on server */ - CURLE_TFTP_PERM, /* 69 - permission problem on server */ - CURLE_REMOTE_DISK_FULL, /* 70 - out of disk space on server */ - CURLE_TFTP_ILLEGAL, /* 71 - Illegal TFTP operation */ - CURLE_TFTP_UNKNOWNID, /* 72 - Unknown transfer ID */ - CURLE_REMOTE_FILE_EXISTS, /* 73 - File already exists */ - CURLE_TFTP_NOSUCHUSER, /* 74 - No such user */ - CURLE_CONV_FAILED, /* 75 - conversion failed */ - CURLE_CONV_REQD, /* 76 - caller must register conversion - callbacks using curl_easy_setopt options - CURLOPT_CONV_FROM_NETWORK_FUNCTION, - CURLOPT_CONV_TO_NETWORK_FUNCTION, and - CURLOPT_CONV_FROM_UTF8_FUNCTION */ - CURLE_SSL_CACERT_BADFILE, /* 77 - could not load CACERT file, missing - or wrong format */ - CURLE_REMOTE_FILE_NOT_FOUND, /* 78 - remote file not found */ - CURLE_SSH, /* 79 - error from the SSH layer, somewhat - generic so the error message will be of - interest when this has happened */ - - CURLE_SSL_SHUTDOWN_FAILED, /* 80 - Failed to shut down the SSL - connection */ - CURLE_AGAIN, /* 81 - socket is not ready for send/recv, - wait till it's ready and try again (Added - in 7.18.2) */ - CURLE_SSL_CRL_BADFILE, /* 82 - could not load CRL file, missing or - wrong format (Added in 7.19.0) */ - CURLE_SSL_ISSUER_ERROR, /* 83 - Issuer check failed. (Added in - 7.19.0) */ - CURLE_FTP_PRET_FAILED, /* 84 - a PRET command failed */ - CURLE_RTSP_CSEQ_ERROR, /* 85 - mismatch of RTSP CSeq numbers */ - CURLE_RTSP_SESSION_ERROR, /* 86 - mismatch of RTSP Session Ids */ - CURLE_FTP_BAD_FILE_LIST, /* 87 - unable to parse FTP file list */ - CURLE_CHUNK_FAILED, /* 88 - chunk callback reported error */ - CURLE_NO_CONNECTION_AVAILABLE, /* 89 - No connection available, the - session will be queued */ - CURLE_SSL_PINNEDPUBKEYNOTMATCH, /* 90 - specified pinned public key did not - match */ - CURLE_SSL_INVALIDCERTSTATUS, /* 91 - invalid certificate status */ - CURL_LAST /* never use! */ -} CURLcode; - -#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all - the obsolete stuff removed! */ - -/* Previously obsolete error code re-used in 7.38.0 */ -#define CURLE_OBSOLETE16 CURLE_HTTP2 - -/* Previously obsolete error codes re-used in 7.24.0 */ -#define CURLE_OBSOLETE10 CURLE_FTP_ACCEPT_FAILED -#define CURLE_OBSOLETE12 CURLE_FTP_ACCEPT_TIMEOUT - -/* compatibility with older names */ -#define CURLOPT_ENCODING CURLOPT_ACCEPT_ENCODING - -/* The following were added in 7.21.5, April 2011 */ -#define CURLE_UNKNOWN_TELNET_OPTION CURLE_UNKNOWN_OPTION - -/* The following were added in 7.17.1 */ -/* These are scheduled to disappear by 2009 */ -#define CURLE_SSL_PEER_CERTIFICATE CURLE_PEER_FAILED_VERIFICATION - -/* The following were added in 7.17.0 */ -/* These are scheduled to disappear by 2009 */ -#define CURLE_OBSOLETE CURLE_OBSOLETE50 /* no one should be using this! */ -#define CURLE_BAD_PASSWORD_ENTERED CURLE_OBSOLETE46 -#define CURLE_BAD_CALLING_ORDER CURLE_OBSOLETE44 -#define CURLE_FTP_USER_PASSWORD_INCORRECT CURLE_OBSOLETE10 -#define CURLE_FTP_CANT_RECONNECT CURLE_OBSOLETE16 -#define CURLE_FTP_COULDNT_GET_SIZE CURLE_OBSOLETE32 -#define CURLE_FTP_COULDNT_SET_ASCII CURLE_OBSOLETE29 -#define CURLE_FTP_WEIRD_USER_REPLY CURLE_OBSOLETE12 -#define CURLE_FTP_WRITE_ERROR CURLE_OBSOLETE20 -#define CURLE_LIBRARY_NOT_FOUND CURLE_OBSOLETE40 -#define CURLE_MALFORMAT_USER CURLE_OBSOLETE24 -#define CURLE_SHARE_IN_USE CURLE_OBSOLETE57 -#define CURLE_URL_MALFORMAT_USER CURLE_NOT_BUILT_IN - -#define CURLE_FTP_ACCESS_DENIED CURLE_REMOTE_ACCESS_DENIED -#define CURLE_FTP_COULDNT_SET_BINARY CURLE_FTP_COULDNT_SET_TYPE -#define CURLE_FTP_QUOTE_ERROR CURLE_QUOTE_ERROR -#define CURLE_TFTP_DISKFULL CURLE_REMOTE_DISK_FULL -#define CURLE_TFTP_EXISTS CURLE_REMOTE_FILE_EXISTS -#define CURLE_HTTP_RANGE_ERROR CURLE_RANGE_ERROR -#define CURLE_FTP_SSL_FAILED CURLE_USE_SSL_FAILED - -/* The following were added earlier */ - -#define CURLE_OPERATION_TIMEOUTED CURLE_OPERATION_TIMEDOUT - -#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR -#define CURLE_HTTP_PORT_FAILED CURLE_INTERFACE_FAILED -#define CURLE_FTP_COULDNT_STOR_FILE CURLE_UPLOAD_FAILED - -#define CURLE_FTP_PARTIAL_FILE CURLE_PARTIAL_FILE -#define CURLE_FTP_BAD_DOWNLOAD_RESUME CURLE_BAD_DOWNLOAD_RESUME - -/* This was the error code 50 in 7.7.3 and a few earlier versions, this - is no longer used by libcurl but is instead #defined here only to not - make programs break */ -#define CURLE_ALREADY_COMPLETE 99999 - -/* Provide defines for really old option names */ -#define CURLOPT_FILE CURLOPT_WRITEDATA /* name changed in 7.9.7 */ -#define CURLOPT_INFILE CURLOPT_READDATA /* name changed in 7.9.7 */ -#define CURLOPT_WRITEHEADER CURLOPT_HEADERDATA - -/* Since long deprecated options with no code in the lib that does anything - with them. */ -#define CURLOPT_WRITEINFO CURLOPT_OBSOLETE40 -#define CURLOPT_CLOSEPOLICY CURLOPT_OBSOLETE72 - -#endif /*!CURL_NO_OLDIES*/ - -/* This prototype applies to all conversion callbacks */ -typedef CURLcode (*curl_conv_callback)(char *buffer, size_t length); - -typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl, /* easy handle */ - void *ssl_ctx, /* actually an - OpenSSL SSL_CTX */ - void *userptr); - -typedef enum { - CURLPROXY_HTTP = 0, /* added in 7.10, new in 7.19.4 default is to use - CONNECT HTTP/1.1 */ - CURLPROXY_HTTP_1_0 = 1, /* added in 7.19.4, force to use CONNECT - HTTP/1.0 */ - CURLPROXY_SOCKS4 = 4, /* support added in 7.15.2, enum existed already - in 7.10 */ - CURLPROXY_SOCKS5 = 5, /* added in 7.10 */ - CURLPROXY_SOCKS4A = 6, /* added in 7.18.0 */ - CURLPROXY_SOCKS5_HOSTNAME = 7 /* Use the SOCKS5 protocol but pass along the - host name rather than the IP address. added - in 7.18.0 */ -} curl_proxytype; /* this enum was added in 7.10 */ - -/* - * Bitmasks for CURLOPT_HTTPAUTH and CURLOPT_PROXYAUTH options: - * - * CURLAUTH_NONE - No HTTP authentication - * CURLAUTH_BASIC - HTTP Basic authentication (default) - * CURLAUTH_DIGEST - HTTP Digest authentication - * CURLAUTH_NEGOTIATE - HTTP Negotiate (SPNEGO) authentication - * CURLAUTH_GSSNEGOTIATE - Alias for CURLAUTH_NEGOTIATE (deprecated) - * CURLAUTH_NTLM - HTTP NTLM authentication - * CURLAUTH_DIGEST_IE - HTTP Digest authentication with IE flavour - * CURLAUTH_NTLM_WB - HTTP NTLM authentication delegated to winbind helper - * CURLAUTH_ONLY - Use together with a single other type to force no - * authentication or just that single type - * CURLAUTH_ANY - All fine types set - * CURLAUTH_ANYSAFE - All fine types except Basic - */ - -#define CURLAUTH_NONE ((unsigned long)0) -#define CURLAUTH_BASIC (((unsigned long)1)<<0) -#define CURLAUTH_DIGEST (((unsigned long)1)<<1) -#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2) -/* Deprecated since the advent of CURLAUTH_NEGOTIATE */ -#define CURLAUTH_GSSNEGOTIATE CURLAUTH_NEGOTIATE -#define CURLAUTH_NTLM (((unsigned long)1)<<3) -#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4) -#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5) -#define CURLAUTH_ONLY (((unsigned long)1)<<31) -#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE) -#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE)) - -#define CURLSSH_AUTH_ANY ~0 /* all types supported by the server */ -#define CURLSSH_AUTH_NONE 0 /* none allowed, silly but complete */ -#define CURLSSH_AUTH_PUBLICKEY (1<<0) /* public/private key files */ -#define CURLSSH_AUTH_PASSWORD (1<<1) /* password */ -#define CURLSSH_AUTH_HOST (1<<2) /* host key files */ -#define CURLSSH_AUTH_KEYBOARD (1<<3) /* keyboard interactive */ -#define CURLSSH_AUTH_AGENT (1<<4) /* agent (ssh-agent, pageant...) */ -#define CURLSSH_AUTH_DEFAULT CURLSSH_AUTH_ANY - -#define CURLGSSAPI_DELEGATION_NONE 0 /* no delegation (default) */ -#define CURLGSSAPI_DELEGATION_POLICY_FLAG (1<<0) /* if permitted by policy */ -#define CURLGSSAPI_DELEGATION_FLAG (1<<1) /* delegate always */ - -#define CURL_ERROR_SIZE 256 - -enum curl_khtype { - CURLKHTYPE_UNKNOWN, - CURLKHTYPE_RSA1, - CURLKHTYPE_RSA, - CURLKHTYPE_DSS -}; - -struct curl_khkey { - const char *key; /* points to a zero-terminated string encoded with base64 - if len is zero, otherwise to the "raw" data */ - size_t len; - enum curl_khtype keytype; -}; - -/* this is the set of return values expected from the curl_sshkeycallback - callback */ -enum curl_khstat { - CURLKHSTAT_FINE_ADD_TO_FILE, - CURLKHSTAT_FINE, - CURLKHSTAT_REJECT, /* reject the connection, return an error */ - CURLKHSTAT_DEFER, /* do not accept it, but we can't answer right now so - this causes a CURLE_DEFER error but otherwise the - connection will be left intact etc */ - CURLKHSTAT_LAST /* not for use, only a marker for last-in-list */ -}; - -/* this is the set of status codes pass in to the callback */ -enum curl_khmatch { - CURLKHMATCH_OK, /* match */ - CURLKHMATCH_MISMATCH, /* host found, key mismatch! */ - CURLKHMATCH_MISSING, /* no matching host/key found */ - CURLKHMATCH_LAST /* not for use, only a marker for last-in-list */ -}; - -typedef int - (*curl_sshkeycallback) (CURL *easy, /* easy handle */ - const struct curl_khkey *knownkey, /* known */ - const struct curl_khkey *foundkey, /* found */ - enum curl_khmatch, /* libcurl's view on the keys */ - void *clientp); /* custom pointer passed from app */ - -/* parameter for the CURLOPT_USE_SSL option */ -typedef enum { - CURLUSESSL_NONE, /* do not attempt to use SSL */ - CURLUSESSL_TRY, /* try using SSL, proceed anyway otherwise */ - CURLUSESSL_CONTROL, /* SSL for the control connection or fail */ - CURLUSESSL_ALL, /* SSL for all communication or fail */ - CURLUSESSL_LAST /* not an option, never use */ -} curl_usessl; - -/* Definition of bits for the CURLOPT_SSL_OPTIONS argument: */ - -/* - ALLOW_BEAST tells libcurl to allow the BEAST SSL vulnerability in the - name of improving interoperability with older servers. Some SSL libraries - have introduced work-arounds for this flaw but those work-arounds sometimes - make the SSL communication fail. To regain functionality with those broken - servers, a user can this way allow the vulnerability back. */ -#define CURLSSLOPT_ALLOW_BEAST (1<<0) - -/* - NO_REVOKE tells libcurl to disable certificate revocation checks for those - SSL backends where such behavior is present. */ -#define CURLSSLOPT_NO_REVOKE (1<<1) - -#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all - the obsolete stuff removed! */ - -/* Backwards compatibility with older names */ -/* These are scheduled to disappear by 2009 */ - -#define CURLFTPSSL_NONE CURLUSESSL_NONE -#define CURLFTPSSL_TRY CURLUSESSL_TRY -#define CURLFTPSSL_CONTROL CURLUSESSL_CONTROL -#define CURLFTPSSL_ALL CURLUSESSL_ALL -#define CURLFTPSSL_LAST CURLUSESSL_LAST -#define curl_ftpssl curl_usessl -#endif /*!CURL_NO_OLDIES*/ - -/* parameter for the CURLOPT_FTP_SSL_CCC option */ -typedef enum { - CURLFTPSSL_CCC_NONE, /* do not send CCC */ - CURLFTPSSL_CCC_PASSIVE, /* Let the server initiate the shutdown */ - CURLFTPSSL_CCC_ACTIVE, /* Initiate the shutdown */ - CURLFTPSSL_CCC_LAST /* not an option, never use */ -} curl_ftpccc; - -/* parameter for the CURLOPT_FTPSSLAUTH option */ -typedef enum { - CURLFTPAUTH_DEFAULT, /* let libcurl decide */ - CURLFTPAUTH_SSL, /* use "AUTH SSL" */ - CURLFTPAUTH_TLS, /* use "AUTH TLS" */ - CURLFTPAUTH_LAST /* not an option, never use */ -} curl_ftpauth; - -/* parameter for the CURLOPT_FTP_CREATE_MISSING_DIRS option */ -typedef enum { - CURLFTP_CREATE_DIR_NONE, /* do NOT create missing dirs! */ - CURLFTP_CREATE_DIR, /* (FTP/SFTP) if CWD fails, try MKD and then CWD - again if MKD succeeded, for SFTP this does - similar magic */ - CURLFTP_CREATE_DIR_RETRY, /* (FTP only) if CWD fails, try MKD and then CWD - again even if MKD failed! */ - CURLFTP_CREATE_DIR_LAST /* not an option, never use */ -} curl_ftpcreatedir; - -/* parameter for the CURLOPT_FTP_FILEMETHOD option */ -typedef enum { - CURLFTPMETHOD_DEFAULT, /* let libcurl pick */ - CURLFTPMETHOD_MULTICWD, /* single CWD operation for each path part */ - CURLFTPMETHOD_NOCWD, /* no CWD at all */ - CURLFTPMETHOD_SINGLECWD, /* one CWD to full dir, then work on file */ - CURLFTPMETHOD_LAST /* not an option, never use */ -} curl_ftpmethod; - -/* bitmask defines for CURLOPT_HEADEROPT */ -#define CURLHEADER_UNIFIED 0 -#define CURLHEADER_SEPARATE (1<<0) - -/* CURLPROTO_ defines are for the CURLOPT_*PROTOCOLS options */ -#define CURLPROTO_HTTP (1<<0) -#define CURLPROTO_HTTPS (1<<1) -#define CURLPROTO_FTP (1<<2) -#define CURLPROTO_FTPS (1<<3) -#define CURLPROTO_SCP (1<<4) -#define CURLPROTO_SFTP (1<<5) -#define CURLPROTO_TELNET (1<<6) -#define CURLPROTO_LDAP (1<<7) -#define CURLPROTO_LDAPS (1<<8) -#define CURLPROTO_DICT (1<<9) -#define CURLPROTO_FILE (1<<10) -#define CURLPROTO_TFTP (1<<11) -#define CURLPROTO_IMAP (1<<12) -#define CURLPROTO_IMAPS (1<<13) -#define CURLPROTO_POP3 (1<<14) -#define CURLPROTO_POP3S (1<<15) -#define CURLPROTO_SMTP (1<<16) -#define CURLPROTO_SMTPS (1<<17) -#define CURLPROTO_RTSP (1<<18) -#define CURLPROTO_RTMP (1<<19) -#define CURLPROTO_RTMPT (1<<20) -#define CURLPROTO_RTMPE (1<<21) -#define CURLPROTO_RTMPTE (1<<22) -#define CURLPROTO_RTMPS (1<<23) -#define CURLPROTO_RTMPTS (1<<24) -#define CURLPROTO_GOPHER (1<<25) -#define CURLPROTO_SMB (1<<26) -#define CURLPROTO_SMBS (1<<27) -#define CURLPROTO_ALL (~0) /* enable everything */ - -/* long may be 32 or 64 bits, but we should never depend on anything else - but 32 */ -#define CURLOPTTYPE_LONG 0 -#define CURLOPTTYPE_OBJECTPOINT 10000 -#define CURLOPTTYPE_STRINGPOINT 10000 -#define CURLOPTTYPE_FUNCTIONPOINT 20000 -#define CURLOPTTYPE_OFF_T 30000 - -/* *STRINGPOINT is an alias for OBJECTPOINT to allow tools to extract the - string options from the header file */ - -/* name is uppercase CURLOPT_, - type is one of the defined CURLOPTTYPE_ - number is unique identifier */ -#ifdef CINIT -#undef CINIT -#endif - -#ifdef CURL_ISOCPP -#define CINIT(na,t,nu) CURLOPT_ ## na = CURLOPTTYPE_ ## t + nu -#else -/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */ -#define LONG CURLOPTTYPE_LONG -#define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT -#define STRINGPOINT CURLOPTTYPE_OBJECTPOINT -#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT -#define OFF_T CURLOPTTYPE_OFF_T -#define CINIT(name,type,number) CURLOPT_/**/name = type + number -#endif - -/* - * This macro-mania below setups the CURLOPT_[what] enum, to be used with - * curl_easy_setopt(). The first argument in the CINIT() macro is the [what] - * word. - */ - -typedef enum { - /* This is the FILE * or void * the regular output should be written to. */ - CINIT(WRITEDATA, OBJECTPOINT, 1), - - /* The full URL to get/put */ - CINIT(URL, STRINGPOINT, 2), - - /* Port number to connect to, if other than default. */ - CINIT(PORT, LONG, 3), - - /* Name of proxy to use. */ - CINIT(PROXY, STRINGPOINT, 4), - - /* "user:password;options" to use when fetching. */ - CINIT(USERPWD, STRINGPOINT, 5), - - /* "user:password" to use with proxy. */ - CINIT(PROXYUSERPWD, STRINGPOINT, 6), - - /* Range to get, specified as an ASCII string. */ - CINIT(RANGE, STRINGPOINT, 7), - - /* not used */ - - /* Specified file stream to upload from (use as input): */ - CINIT(READDATA, OBJECTPOINT, 9), - - /* Buffer to receive error messages in, must be at least CURL_ERROR_SIZE - * bytes big. If this is not used, error messages go to stderr instead: */ - CINIT(ERRORBUFFER, OBJECTPOINT, 10), - - /* Function that will be called to store the output (instead of fwrite). The - * parameters will use fwrite() syntax, make sure to follow them. */ - CINIT(WRITEFUNCTION, FUNCTIONPOINT, 11), - - /* Function that will be called to read the input (instead of fread). The - * parameters will use fread() syntax, make sure to follow them. */ - CINIT(READFUNCTION, FUNCTIONPOINT, 12), - - /* Time-out the read operation after this amount of seconds */ - CINIT(TIMEOUT, LONG, 13), - - /* If the CURLOPT_INFILE is used, this can be used to inform libcurl about - * how large the file being sent really is. That allows better error - * checking and better verifies that the upload was successful. -1 means - * unknown size. - * - * For large file support, there is also a _LARGE version of the key - * which takes an off_t type, allowing platforms with larger off_t - * sizes to handle larger files. See below for INFILESIZE_LARGE. - */ - CINIT(INFILESIZE, LONG, 14), - - /* POST static input fields. */ - CINIT(POSTFIELDS, OBJECTPOINT, 15), - - /* Set the referrer page (needed by some CGIs) */ - CINIT(REFERER, STRINGPOINT, 16), - - /* Set the FTP PORT string (interface name, named or numerical IP address) - Use i.e '-' to use default address. */ - CINIT(FTPPORT, STRINGPOINT, 17), - - /* Set the User-Agent string (examined by some CGIs) */ - CINIT(USERAGENT, STRINGPOINT, 18), - - /* If the download receives less than "low speed limit" bytes/second - * during "low speed time" seconds, the operations is aborted. - * You could i.e if you have a pretty high speed connection, abort if - * it is less than 2000 bytes/sec during 20 seconds. - */ - - /* Set the "low speed limit" */ - CINIT(LOW_SPEED_LIMIT, LONG, 19), - - /* Set the "low speed time" */ - CINIT(LOW_SPEED_TIME, LONG, 20), - - /* Set the continuation offset. - * - * Note there is also a _LARGE version of this key which uses - * off_t types, allowing for large file offsets on platforms which - * use larger-than-32-bit off_t's. Look below for RESUME_FROM_LARGE. - */ - CINIT(RESUME_FROM, LONG, 21), - - /* Set cookie in request: */ - CINIT(COOKIE, STRINGPOINT, 22), - - /* This points to a linked list of headers, struct curl_slist kind. This - list is also used for RTSP (in spite of its name) */ - CINIT(HTTPHEADER, OBJECTPOINT, 23), - - /* This points to a linked list of post entries, struct curl_httppost */ - CINIT(HTTPPOST, OBJECTPOINT, 24), - - /* name of the file keeping your private SSL-certificate */ - CINIT(SSLCERT, STRINGPOINT, 25), - - /* password for the SSL or SSH private key */ - CINIT(KEYPASSWD, STRINGPOINT, 26), - - /* send TYPE parameter? */ - CINIT(CRLF, LONG, 27), - - /* send linked-list of QUOTE commands */ - CINIT(QUOTE, OBJECTPOINT, 28), - - /* send FILE * or void * to store headers to, if you use a callback it - is simply passed to the callback unmodified */ - CINIT(HEADERDATA, OBJECTPOINT, 29), - - /* point to a file to read the initial cookies from, also enables - "cookie awareness" */ - CINIT(COOKIEFILE, STRINGPOINT, 31), - - /* What version to specifically try to use. - See CURL_SSLVERSION defines below. */ - CINIT(SSLVERSION, LONG, 32), - - /* What kind of HTTP time condition to use, see defines */ - CINIT(TIMECONDITION, LONG, 33), - - /* Time to use with the above condition. Specified in number of seconds - since 1 Jan 1970 */ - CINIT(TIMEVALUE, LONG, 34), - - /* 35 = OBSOLETE */ - - /* Custom request, for customizing the get command like - HTTP: DELETE, TRACE and others - FTP: to use a different list command - */ - CINIT(CUSTOMREQUEST, STRINGPOINT, 36), - - /* FILE handle to use instead of stderr */ - CINIT(STDERR, OBJECTPOINT, 37), - - /* 38 is not used */ - - /* send linked-list of post-transfer QUOTE commands */ - CINIT(POSTQUOTE, OBJECTPOINT, 39), - - CINIT(OBSOLETE40, OBJECTPOINT, 40), /* OBSOLETE, do not use! */ - - CINIT(VERBOSE, LONG, 41), /* talk a lot */ - CINIT(HEADER, LONG, 42), /* throw the header out too */ - CINIT(NOPROGRESS, LONG, 43), /* shut off the progress meter */ - CINIT(NOBODY, LONG, 44), /* use HEAD to get http document */ - CINIT(FAILONERROR, LONG, 45), /* no output on http error codes >= 400 */ - CINIT(UPLOAD, LONG, 46), /* this is an upload */ - CINIT(POST, LONG, 47), /* HTTP POST method */ - CINIT(DIRLISTONLY, LONG, 48), /* bare names when listing directories */ - - CINIT(APPEND, LONG, 50), /* Append instead of overwrite on upload! */ - - /* Specify whether to read the user+password from the .netrc or the URL. - * This must be one of the CURL_NETRC_* enums below. */ - CINIT(NETRC, LONG, 51), - - CINIT(FOLLOWLOCATION, LONG, 52), /* use Location: Luke! */ - - CINIT(TRANSFERTEXT, LONG, 53), /* transfer data in text/ASCII format */ - CINIT(PUT, LONG, 54), /* HTTP PUT */ - - /* 55 = OBSOLETE */ - - /* DEPRECATED - * Function that will be called instead of the internal progress display - * function. This function should be defined as the curl_progress_callback - * prototype defines. */ - CINIT(PROGRESSFUNCTION, FUNCTIONPOINT, 56), - - /* Data passed to the CURLOPT_PROGRESSFUNCTION and CURLOPT_XFERINFOFUNCTION - callbacks */ - CINIT(PROGRESSDATA, OBJECTPOINT, 57), -#define CURLOPT_XFERINFODATA CURLOPT_PROGRESSDATA - - /* We want the referrer field set automatically when following locations */ - CINIT(AUTOREFERER, LONG, 58), - - /* Port of the proxy, can be set in the proxy string as well with: - "[host]:[port]" */ - CINIT(PROXYPORT, LONG, 59), - - /* size of the POST input data, if strlen() is not good to use */ - CINIT(POSTFIELDSIZE, LONG, 60), - - /* tunnel non-http operations through a HTTP proxy */ - CINIT(HTTPPROXYTUNNEL, LONG, 61), - - /* Set the interface string to use as outgoing network interface */ - CINIT(INTERFACE, STRINGPOINT, 62), - - /* Set the krb4/5 security level, this also enables krb4/5 awareness. This - * is a string, 'clear', 'safe', 'confidential' or 'private'. If the string - * is set but doesn't match one of these, 'private' will be used. */ - CINIT(KRBLEVEL, STRINGPOINT, 63), - - /* Set if we should verify the peer in ssl handshake, set 1 to verify. */ - CINIT(SSL_VERIFYPEER, LONG, 64), - - /* The CApath or CAfile used to validate the peer certificate - this option is used only if SSL_VERIFYPEER is true */ - CINIT(CAINFO, STRINGPOINT, 65), - - /* 66 = OBSOLETE */ - /* 67 = OBSOLETE */ - - /* Maximum number of http redirects to follow */ - CINIT(MAXREDIRS, LONG, 68), - - /* Pass a long set to 1 to get the date of the requested document (if - possible)! Pass a zero to shut it off. */ - CINIT(FILETIME, LONG, 69), - - /* This points to a linked list of telnet options */ - CINIT(TELNETOPTIONS, OBJECTPOINT, 70), - - /* Max amount of cached alive connections */ - CINIT(MAXCONNECTS, LONG, 71), - - CINIT(OBSOLETE72, LONG, 72), /* OBSOLETE, do not use! */ - - /* 73 = OBSOLETE */ - - /* Set to explicitly use a new connection for the upcoming transfer. - Do not use this unless you're absolutely sure of this, as it makes the - operation slower and is less friendly for the network. */ - CINIT(FRESH_CONNECT, LONG, 74), - - /* Set to explicitly forbid the upcoming transfer's connection to be re-used - when done. Do not use this unless you're absolutely sure of this, as it - makes the operation slower and is less friendly for the network. */ - CINIT(FORBID_REUSE, LONG, 75), - - /* Set to a file name that contains random data for libcurl to use to - seed the random engine when doing SSL connects. */ - CINIT(RANDOM_FILE, STRINGPOINT, 76), - - /* Set to the Entropy Gathering Daemon socket pathname */ - CINIT(EGDSOCKET, STRINGPOINT, 77), - - /* Time-out connect operations after this amount of seconds, if connects are - OK within this time, then fine... This only aborts the connect phase. */ - CINIT(CONNECTTIMEOUT, LONG, 78), - - /* Function that will be called to store headers (instead of fwrite). The - * parameters will use fwrite() syntax, make sure to follow them. */ - CINIT(HEADERFUNCTION, FUNCTIONPOINT, 79), - - /* Set this to force the HTTP request to get back to GET. Only really usable - if POST, PUT or a custom request have been used first. - */ - CINIT(HTTPGET, LONG, 80), - - /* Set if we should verify the Common name from the peer certificate in ssl - * handshake, set 1 to check existence, 2 to ensure that it matches the - * provided hostname. */ - CINIT(SSL_VERIFYHOST, LONG, 81), - - /* Specify which file name to write all known cookies in after completed - operation. Set file name to "-" (dash) to make it go to stdout. */ - CINIT(COOKIEJAR, STRINGPOINT, 82), - - /* Specify which SSL ciphers to use */ - CINIT(SSL_CIPHER_LIST, STRINGPOINT, 83), - - /* Specify which HTTP version to use! This must be set to one of the - CURL_HTTP_VERSION* enums set below. */ - CINIT(HTTP_VERSION, LONG, 84), - - /* Specifically switch on or off the FTP engine's use of the EPSV command. By - default, that one will always be attempted before the more traditional - PASV command. */ - CINIT(FTP_USE_EPSV, LONG, 85), - - /* type of the file keeping your SSL-certificate ("DER", "PEM", "ENG") */ - CINIT(SSLCERTTYPE, STRINGPOINT, 86), - - /* name of the file keeping your private SSL-key */ - CINIT(SSLKEY, STRINGPOINT, 87), - - /* type of the file keeping your private SSL-key ("DER", "PEM", "ENG") */ - CINIT(SSLKEYTYPE, STRINGPOINT, 88), - - /* crypto engine for the SSL-sub system */ - CINIT(SSLENGINE, STRINGPOINT, 89), - - /* set the crypto engine for the SSL-sub system as default - the param has no meaning... - */ - CINIT(SSLENGINE_DEFAULT, LONG, 90), - - /* Non-zero value means to use the global dns cache */ - CINIT(DNS_USE_GLOBAL_CACHE, LONG, 91), /* DEPRECATED, do not use! */ - - /* DNS cache timeout */ - CINIT(DNS_CACHE_TIMEOUT, LONG, 92), - - /* send linked-list of pre-transfer QUOTE commands */ - CINIT(PREQUOTE, OBJECTPOINT, 93), - - /* set the debug function */ - CINIT(DEBUGFUNCTION, FUNCTIONPOINT, 94), - - /* set the data for the debug function */ - CINIT(DEBUGDATA, OBJECTPOINT, 95), - - /* mark this as start of a cookie session */ - CINIT(COOKIESESSION, LONG, 96), - - /* The CApath directory used to validate the peer certificate - this option is used only if SSL_VERIFYPEER is true */ - CINIT(CAPATH, STRINGPOINT, 97), - - /* Instruct libcurl to use a smaller receive buffer */ - CINIT(BUFFERSIZE, LONG, 98), - - /* Instruct libcurl to not use any signal/alarm handlers, even when using - timeouts. This option is useful for multi-threaded applications. - See libcurl-the-guide for more background information. */ - CINIT(NOSIGNAL, LONG, 99), - - /* Provide a CURLShare for mutexing non-ts data */ - CINIT(SHARE, OBJECTPOINT, 100), - - /* indicates type of proxy. accepted values are CURLPROXY_HTTP (default), - CURLPROXY_SOCKS4, CURLPROXY_SOCKS4A and CURLPROXY_SOCKS5. */ - CINIT(PROXYTYPE, LONG, 101), - - /* Set the Accept-Encoding string. Use this to tell a server you would like - the response to be compressed. Before 7.21.6, this was known as - CURLOPT_ENCODING */ - CINIT(ACCEPT_ENCODING, STRINGPOINT, 102), - - /* Set pointer to private data */ - CINIT(PRIVATE, OBJECTPOINT, 103), - - /* Set aliases for HTTP 200 in the HTTP Response header */ - CINIT(HTTP200ALIASES, OBJECTPOINT, 104), - - /* Continue to send authentication (user+password) when following locations, - even when hostname changed. This can potentially send off the name - and password to whatever host the server decides. */ - CINIT(UNRESTRICTED_AUTH, LONG, 105), - - /* Specifically switch on or off the FTP engine's use of the EPRT command ( - it also disables the LPRT attempt). By default, those ones will always be - attempted before the good old traditional PORT command. */ - CINIT(FTP_USE_EPRT, LONG, 106), - - /* Set this to a bitmask value to enable the particular authentications - methods you like. Use this in combination with CURLOPT_USERPWD. - Note that setting multiple bits may cause extra network round-trips. */ - CINIT(HTTPAUTH, LONG, 107), - - /* Set the ssl context callback function, currently only for OpenSSL ssl_ctx - in second argument. The function must be matching the - curl_ssl_ctx_callback proto. */ - CINIT(SSL_CTX_FUNCTION, FUNCTIONPOINT, 108), - - /* Set the userdata for the ssl context callback function's third - argument */ - CINIT(SSL_CTX_DATA, OBJECTPOINT, 109), - - /* FTP Option that causes missing dirs to be created on the remote server. - In 7.19.4 we introduced the convenience enums for this option using the - CURLFTP_CREATE_DIR prefix. - */ - CINIT(FTP_CREATE_MISSING_DIRS, LONG, 110), - - /* Set this to a bitmask value to enable the particular authentications - methods you like. Use this in combination with CURLOPT_PROXYUSERPWD. - Note that setting multiple bits may cause extra network round-trips. */ - CINIT(PROXYAUTH, LONG, 111), - - /* FTP option that changes the timeout, in seconds, associated with - getting a response. This is different from transfer timeout time and - essentially places a demand on the FTP server to acknowledge commands - in a timely manner. */ - CINIT(FTP_RESPONSE_TIMEOUT, LONG, 112), -#define CURLOPT_SERVER_RESPONSE_TIMEOUT CURLOPT_FTP_RESPONSE_TIMEOUT - - /* Set this option to one of the CURL_IPRESOLVE_* defines (see below) to - tell libcurl to resolve names to those IP versions only. This only has - affect on systems with support for more than one, i.e IPv4 _and_ IPv6. */ - CINIT(IPRESOLVE, LONG, 113), - - /* Set this option to limit the size of a file that will be downloaded from - an HTTP or FTP server. - - Note there is also _LARGE version which adds large file support for - platforms which have larger off_t sizes. See MAXFILESIZE_LARGE below. */ - CINIT(MAXFILESIZE, LONG, 114), - - /* See the comment for INFILESIZE above, but in short, specifies - * the size of the file being uploaded. -1 means unknown. - */ - CINIT(INFILESIZE_LARGE, OFF_T, 115), - - /* Sets the continuation offset. There is also a LONG version of this; - * look above for RESUME_FROM. - */ - CINIT(RESUME_FROM_LARGE, OFF_T, 116), - - /* Sets the maximum size of data that will be downloaded from - * an HTTP or FTP server. See MAXFILESIZE above for the LONG version. - */ - CINIT(MAXFILESIZE_LARGE, OFF_T, 117), - - /* Set this option to the file name of your .netrc file you want libcurl - to parse (using the CURLOPT_NETRC option). If not set, libcurl will do - a poor attempt to find the user's home directory and check for a .netrc - file in there. */ - CINIT(NETRC_FILE, STRINGPOINT, 118), - - /* Enable SSL/TLS for FTP, pick one of: - CURLUSESSL_TRY - try using SSL, proceed anyway otherwise - CURLUSESSL_CONTROL - SSL for the control connection or fail - CURLUSESSL_ALL - SSL for all communication or fail - */ - CINIT(USE_SSL, LONG, 119), - - /* The _LARGE version of the standard POSTFIELDSIZE option */ - CINIT(POSTFIELDSIZE_LARGE, OFF_T, 120), - - /* Enable/disable the TCP Nagle algorithm */ - CINIT(TCP_NODELAY, LONG, 121), - - /* 122 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */ - /* 123 OBSOLETE. Gone in 7.16.0 */ - /* 124 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */ - /* 125 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */ - /* 126 OBSOLETE, used in 7.12.3. Gone in 7.13.0 */ - /* 127 OBSOLETE. Gone in 7.16.0 */ - /* 128 OBSOLETE. Gone in 7.16.0 */ - - /* When FTP over SSL/TLS is selected (with CURLOPT_USE_SSL), this option - can be used to change libcurl's default action which is to first try - "AUTH SSL" and then "AUTH TLS" in this order, and proceed when a OK - response has been received. - - Available parameters are: - CURLFTPAUTH_DEFAULT - let libcurl decide - CURLFTPAUTH_SSL - try "AUTH SSL" first, then TLS - CURLFTPAUTH_TLS - try "AUTH TLS" first, then SSL - */ - CINIT(FTPSSLAUTH, LONG, 129), - - CINIT(IOCTLFUNCTION, FUNCTIONPOINT, 130), - CINIT(IOCTLDATA, OBJECTPOINT, 131), - - /* 132 OBSOLETE. Gone in 7.16.0 */ - /* 133 OBSOLETE. Gone in 7.16.0 */ - - /* zero terminated string for pass on to the FTP server when asked for - "account" info */ - CINIT(FTP_ACCOUNT, STRINGPOINT, 134), - - /* feed cookie into cookie engine */ - CINIT(COOKIELIST, STRINGPOINT, 135), - - /* ignore Content-Length */ - CINIT(IGNORE_CONTENT_LENGTH, LONG, 136), - - /* Set to non-zero to skip the IP address received in a 227 PASV FTP server - response. Typically used for FTP-SSL purposes but is not restricted to - that. libcurl will then instead use the same IP address it used for the - control connection. */ - CINIT(FTP_SKIP_PASV_IP, LONG, 137), - - /* Select "file method" to use when doing FTP, see the curl_ftpmethod - above. */ - CINIT(FTP_FILEMETHOD, LONG, 138), - - /* Local port number to bind the socket to */ - CINIT(LOCALPORT, LONG, 139), - - /* Number of ports to try, including the first one set with LOCALPORT. - Thus, setting it to 1 will make no additional attempts but the first. - */ - CINIT(LOCALPORTRANGE, LONG, 140), - - /* no transfer, set up connection and let application use the socket by - extracting it with CURLINFO_LASTSOCKET */ - CINIT(CONNECT_ONLY, LONG, 141), - - /* Function that will be called to convert from the - network encoding (instead of using the iconv calls in libcurl) */ - CINIT(CONV_FROM_NETWORK_FUNCTION, FUNCTIONPOINT, 142), - - /* Function that will be called to convert to the - network encoding (instead of using the iconv calls in libcurl) */ - CINIT(CONV_TO_NETWORK_FUNCTION, FUNCTIONPOINT, 143), - - /* Function that will be called to convert from UTF8 - (instead of using the iconv calls in libcurl) - Note that this is used only for SSL certificate processing */ - CINIT(CONV_FROM_UTF8_FUNCTION, FUNCTIONPOINT, 144), - - /* if the connection proceeds too quickly then need to slow it down */ - /* limit-rate: maximum number of bytes per second to send or receive */ - CINIT(MAX_SEND_SPEED_LARGE, OFF_T, 145), - CINIT(MAX_RECV_SPEED_LARGE, OFF_T, 146), - - /* Pointer to command string to send if USER/PASS fails. */ - CINIT(FTP_ALTERNATIVE_TO_USER, STRINGPOINT, 147), - - /* callback function for setting socket options */ - CINIT(SOCKOPTFUNCTION, FUNCTIONPOINT, 148), - CINIT(SOCKOPTDATA, OBJECTPOINT, 149), - - /* set to 0 to disable session ID re-use for this transfer, default is - enabled (== 1) */ - CINIT(SSL_SESSIONID_CACHE, LONG, 150), - - /* allowed SSH authentication methods */ - CINIT(SSH_AUTH_TYPES, LONG, 151), - - /* Used by scp/sftp to do public/private key authentication */ - CINIT(SSH_PUBLIC_KEYFILE, STRINGPOINT, 152), - CINIT(SSH_PRIVATE_KEYFILE, STRINGPOINT, 153), - - /* Send CCC (Clear Command Channel) after authentication */ - CINIT(FTP_SSL_CCC, LONG, 154), - - /* Same as TIMEOUT and CONNECTTIMEOUT, but with ms resolution */ - CINIT(TIMEOUT_MS, LONG, 155), - CINIT(CONNECTTIMEOUT_MS, LONG, 156), - - /* set to zero to disable the libcurl's decoding and thus pass the raw body - data to the application even when it is encoded/compressed */ - CINIT(HTTP_TRANSFER_DECODING, LONG, 157), - CINIT(HTTP_CONTENT_DECODING, LONG, 158), - - /* Permission used when creating new files and directories on the remote - server for protocols that support it, SFTP/SCP/FILE */ - CINIT(NEW_FILE_PERMS, LONG, 159), - CINIT(NEW_DIRECTORY_PERMS, LONG, 160), - - /* Set the behaviour of POST when redirecting. Values must be set to one - of CURL_REDIR* defines below. This used to be called CURLOPT_POST301 */ - CINIT(POSTREDIR, LONG, 161), - - /* used by scp/sftp to verify the host's public key */ - CINIT(SSH_HOST_PUBLIC_KEY_MD5, STRINGPOINT, 162), - - /* Callback function for opening socket (instead of socket(2)). Optionally, - callback is able change the address or refuse to connect returning - CURL_SOCKET_BAD. The callback should have type - curl_opensocket_callback */ - CINIT(OPENSOCKETFUNCTION, FUNCTIONPOINT, 163), - CINIT(OPENSOCKETDATA, OBJECTPOINT, 164), - - /* POST volatile input fields. */ - CINIT(COPYPOSTFIELDS, OBJECTPOINT, 165), - - /* set transfer mode (;type=) when doing FTP via an HTTP proxy */ - CINIT(PROXY_TRANSFER_MODE, LONG, 166), - - /* Callback function for seeking in the input stream */ - CINIT(SEEKFUNCTION, FUNCTIONPOINT, 167), - CINIT(SEEKDATA, OBJECTPOINT, 168), - - /* CRL file */ - CINIT(CRLFILE, STRINGPOINT, 169), - - /* Issuer certificate */ - CINIT(ISSUERCERT, STRINGPOINT, 170), - - /* (IPv6) Address scope */ - CINIT(ADDRESS_SCOPE, LONG, 171), - - /* Collect certificate chain info and allow it to get retrievable with - CURLINFO_CERTINFO after the transfer is complete. */ - CINIT(CERTINFO, LONG, 172), - - /* "name" and "pwd" to use when fetching. */ - CINIT(USERNAME, STRINGPOINT, 173), - CINIT(PASSWORD, STRINGPOINT, 174), - - /* "name" and "pwd" to use with Proxy when fetching. */ - CINIT(PROXYUSERNAME, STRINGPOINT, 175), - CINIT(PROXYPASSWORD, STRINGPOINT, 176), - - /* Comma separated list of hostnames defining no-proxy zones. These should - match both hostnames directly, and hostnames within a domain. For - example, local.com will match local.com and www.local.com, but NOT - notlocal.com or www.notlocal.com. For compatibility with other - implementations of this, .local.com will be considered to be the same as - local.com. A single * is the only valid wildcard, and effectively - disables the use of proxy. */ - CINIT(NOPROXY, STRINGPOINT, 177), - - /* block size for TFTP transfers */ - CINIT(TFTP_BLKSIZE, LONG, 178), - - /* Socks Service */ - CINIT(SOCKS5_GSSAPI_SERVICE, STRINGPOINT, 179), - - /* Socks Service */ - CINIT(SOCKS5_GSSAPI_NEC, LONG, 180), - - /* set the bitmask for the protocols that are allowed to be used for the - transfer, which thus helps the app which takes URLs from users or other - external inputs and want to restrict what protocol(s) to deal - with. Defaults to CURLPROTO_ALL. */ - CINIT(PROTOCOLS, LONG, 181), - - /* set the bitmask for the protocols that libcurl is allowed to follow to, - as a subset of the CURLOPT_PROTOCOLS ones. That means the protocol needs - to be set in both bitmasks to be allowed to get redirected to. Defaults - to all protocols except FILE and SCP. */ - CINIT(REDIR_PROTOCOLS, LONG, 182), - - /* set the SSH knownhost file name to use */ - CINIT(SSH_KNOWNHOSTS, STRINGPOINT, 183), - - /* set the SSH host key callback, must point to a curl_sshkeycallback - function */ - CINIT(SSH_KEYFUNCTION, FUNCTIONPOINT, 184), - - /* set the SSH host key callback custom pointer */ - CINIT(SSH_KEYDATA, OBJECTPOINT, 185), - - /* set the SMTP mail originator */ - CINIT(MAIL_FROM, STRINGPOINT, 186), - - /* set the list of SMTP mail receiver(s) */ - CINIT(MAIL_RCPT, OBJECTPOINT, 187), - - /* FTP: send PRET before PASV */ - CINIT(FTP_USE_PRET, LONG, 188), - - /* RTSP request method (OPTIONS, SETUP, PLAY, etc...) */ - CINIT(RTSP_REQUEST, LONG, 189), - - /* The RTSP session identifier */ - CINIT(RTSP_SESSION_ID, STRINGPOINT, 190), - - /* The RTSP stream URI */ - CINIT(RTSP_STREAM_URI, STRINGPOINT, 191), - - /* The Transport: header to use in RTSP requests */ - CINIT(RTSP_TRANSPORT, STRINGPOINT, 192), - - /* Manually initialize the client RTSP CSeq for this handle */ - CINIT(RTSP_CLIENT_CSEQ, LONG, 193), - - /* Manually initialize the server RTSP CSeq for this handle */ - CINIT(RTSP_SERVER_CSEQ, LONG, 194), - - /* The stream to pass to INTERLEAVEFUNCTION. */ - CINIT(INTERLEAVEDATA, OBJECTPOINT, 195), - - /* Let the application define a custom write method for RTP data */ - CINIT(INTERLEAVEFUNCTION, FUNCTIONPOINT, 196), - - /* Turn on wildcard matching */ - CINIT(WILDCARDMATCH, LONG, 197), - - /* Directory matching callback called before downloading of an - individual file (chunk) started */ - CINIT(CHUNK_BGN_FUNCTION, FUNCTIONPOINT, 198), - - /* Directory matching callback called after the file (chunk) - was downloaded, or skipped */ - CINIT(CHUNK_END_FUNCTION, FUNCTIONPOINT, 199), - - /* Change match (fnmatch-like) callback for wildcard matching */ - CINIT(FNMATCH_FUNCTION, FUNCTIONPOINT, 200), - - /* Let the application define custom chunk data pointer */ - CINIT(CHUNK_DATA, OBJECTPOINT, 201), - - /* FNMATCH_FUNCTION user pointer */ - CINIT(FNMATCH_DATA, OBJECTPOINT, 202), - - /* send linked-list of name:port:address sets */ - CINIT(RESOLVE, OBJECTPOINT, 203), - - /* Set a username for authenticated TLS */ - CINIT(TLSAUTH_USERNAME, STRINGPOINT, 204), - - /* Set a password for authenticated TLS */ - CINIT(TLSAUTH_PASSWORD, STRINGPOINT, 205), - - /* Set authentication type for authenticated TLS */ - CINIT(TLSAUTH_TYPE, STRINGPOINT, 206), - - /* Set to 1 to enable the "TE:" header in HTTP requests to ask for - compressed transfer-encoded responses. Set to 0 to disable the use of TE: - in outgoing requests. The current default is 0, but it might change in a - future libcurl release. - - libcurl will ask for the compressed methods it knows of, and if that - isn't any, it will not ask for transfer-encoding at all even if this - option is set to 1. - - */ - CINIT(TRANSFER_ENCODING, LONG, 207), - - /* Callback function for closing socket (instead of close(2)). The callback - should have type curl_closesocket_callback */ - CINIT(CLOSESOCKETFUNCTION, FUNCTIONPOINT, 208), - CINIT(CLOSESOCKETDATA, OBJECTPOINT, 209), - - /* allow GSSAPI credential delegation */ - CINIT(GSSAPI_DELEGATION, LONG, 210), - - /* Set the name servers to use for DNS resolution */ - CINIT(DNS_SERVERS, STRINGPOINT, 211), - - /* Time-out accept operations (currently for FTP only) after this amount - of miliseconds. */ - CINIT(ACCEPTTIMEOUT_MS, LONG, 212), - - /* Set TCP keepalive */ - CINIT(TCP_KEEPALIVE, LONG, 213), - - /* non-universal keepalive knobs (Linux, AIX, HP-UX, more) */ - CINIT(TCP_KEEPIDLE, LONG, 214), - CINIT(TCP_KEEPINTVL, LONG, 215), - - /* Enable/disable specific SSL features with a bitmask, see CURLSSLOPT_* */ - CINIT(SSL_OPTIONS, LONG, 216), - - /* Set the SMTP auth originator */ - CINIT(MAIL_AUTH, STRINGPOINT, 217), - - /* Enable/disable SASL initial response */ - CINIT(SASL_IR, LONG, 218), - - /* Function that will be called instead of the internal progress display - * function. This function should be defined as the curl_xferinfo_callback - * prototype defines. (Deprecates CURLOPT_PROGRESSFUNCTION) */ - CINIT(XFERINFOFUNCTION, FUNCTIONPOINT, 219), - - /* The XOAUTH2 bearer token */ - CINIT(XOAUTH2_BEARER, STRINGPOINT, 220), - - /* Set the interface string to use as outgoing network - * interface for DNS requests. - * Only supported by the c-ares DNS backend */ - CINIT(DNS_INTERFACE, STRINGPOINT, 221), - - /* Set the local IPv4 address to use for outgoing DNS requests. - * Only supported by the c-ares DNS backend */ - CINIT(DNS_LOCAL_IP4, STRINGPOINT, 222), - - /* Set the local IPv4 address to use for outgoing DNS requests. - * Only supported by the c-ares DNS backend */ - CINIT(DNS_LOCAL_IP6, STRINGPOINT, 223), - - /* Set authentication options directly */ - CINIT(LOGIN_OPTIONS, STRINGPOINT, 224), - - /* Enable/disable TLS NPN extension (http2 over ssl might fail without) */ - CINIT(SSL_ENABLE_NPN, LONG, 225), - - /* Enable/disable TLS ALPN extension (http2 over ssl might fail without) */ - CINIT(SSL_ENABLE_ALPN, LONG, 226), - - /* Time to wait for a response to a HTTP request containing an - * Expect: 100-continue header before sending the data anyway. */ - CINIT(EXPECT_100_TIMEOUT_MS, LONG, 227), - - /* This points to a linked list of headers used for proxy requests only, - struct curl_slist kind */ - CINIT(PROXYHEADER, OBJECTPOINT, 228), - - /* Pass in a bitmask of "header options" */ - CINIT(HEADEROPT, LONG, 229), - - /* The public key in DER form used to validate the peer public key - this option is used only if SSL_VERIFYPEER is true */ - CINIT(PINNEDPUBLICKEY, STRINGPOINT, 230), - - /* Path to Unix domain socket */ - CINIT(UNIX_SOCKET_PATH, STRINGPOINT, 231), - - /* Set if we should verify the certificate status. */ - CINIT(SSL_VERIFYSTATUS, LONG, 232), - - /* Set if we should enable TLS false start. */ - CINIT(SSL_FALSESTART, LONG, 233), - - /* Do not squash dot-dot sequences */ - CINIT(PATH_AS_IS, LONG, 234), - - /* Proxy Service Name */ - CINIT(PROXY_SERVICE_NAME, STRINGPOINT, 235), - - /* Service Name */ - CINIT(SERVICE_NAME, STRINGPOINT, 236), - - /* Wait/don't wait for pipe/mutex to clarify */ - CINIT(PIPEWAIT, LONG, 237), - - /* Set the protocol used when curl is given a URL without a protocol */ - CINIT(DEFAULT_PROTOCOL, STRINGPOINT, 238), - - /* Set stream weight, 1 - 256 (default is 16) */ - CINIT(STREAM_WEIGHT, LONG, 239), - - /* Set stream dependency on another CURL handle */ - CINIT(STREAM_DEPENDS, OBJECTPOINT, 240), - - /* Set E-xclusive stream dependency on another CURL handle */ - CINIT(STREAM_DEPENDS_E, OBJECTPOINT, 241), - - CURLOPT_LASTENTRY /* the last unused */ -} CURLoption; - -#ifndef CURL_NO_OLDIES /* define this to test if your app builds with all - the obsolete stuff removed! */ - -/* Backwards compatibility with older names */ -/* These are scheduled to disappear by 2011 */ - -/* This was added in version 7.19.1 */ -#define CURLOPT_POST301 CURLOPT_POSTREDIR - -/* These are scheduled to disappear by 2009 */ - -/* The following were added in 7.17.0 */ -#define CURLOPT_SSLKEYPASSWD CURLOPT_KEYPASSWD -#define CURLOPT_FTPAPPEND CURLOPT_APPEND -#define CURLOPT_FTPLISTONLY CURLOPT_DIRLISTONLY -#define CURLOPT_FTP_SSL CURLOPT_USE_SSL - -/* The following were added earlier */ - -#define CURLOPT_SSLCERTPASSWD CURLOPT_KEYPASSWD -#define CURLOPT_KRB4LEVEL CURLOPT_KRBLEVEL - -#else -/* This is set if CURL_NO_OLDIES is defined at compile-time */ -#undef CURLOPT_DNS_USE_GLOBAL_CACHE /* soon obsolete */ -#endif - - - /* Below here follows defines for the CURLOPT_IPRESOLVE option. If a host - name resolves addresses using more than one IP protocol version, this - option might be handy to force libcurl to use a specific IP version. */ -#define CURL_IPRESOLVE_WHATEVER 0 /* default, resolves addresses to all IP - versions that your system allows */ -#define CURL_IPRESOLVE_V4 1 /* resolve to IPv4 addresses */ -#define CURL_IPRESOLVE_V6 2 /* resolve to IPv6 addresses */ - - /* three convenient "aliases" that follow the name scheme better */ -#define CURLOPT_RTSPHEADER CURLOPT_HTTPHEADER - - /* These enums are for use with the CURLOPT_HTTP_VERSION option. */ -enum { - CURL_HTTP_VERSION_NONE, /* setting this means we don't care, and that we'd - like the library to choose the best possible - for us! */ - CURL_HTTP_VERSION_1_0, /* please use HTTP 1.0 in the request */ - CURL_HTTP_VERSION_1_1, /* please use HTTP 1.1 in the request */ - CURL_HTTP_VERSION_2_0, /* please use HTTP 2 in the request */ - CURL_HTTP_VERSION_2TLS, /* use version 2 for HTTPS, version 1.1 for HTTP */ - - CURL_HTTP_VERSION_LAST /* *ILLEGAL* http version */ -}; - -/* Convenience definition simple because the name of the version is HTTP/2 and - not 2.0. The 2_0 version of the enum name was set while the version was - still planned to be 2.0 and we stick to it for compatibility. */ -#define CURL_HTTP_VERSION_2 CURL_HTTP_VERSION_2_0 - -/* - * Public API enums for RTSP requests - */ -enum { - CURL_RTSPREQ_NONE, /* first in list */ - CURL_RTSPREQ_OPTIONS, - CURL_RTSPREQ_DESCRIBE, - CURL_RTSPREQ_ANNOUNCE, - CURL_RTSPREQ_SETUP, - CURL_RTSPREQ_PLAY, - CURL_RTSPREQ_PAUSE, - CURL_RTSPREQ_TEARDOWN, - CURL_RTSPREQ_GET_PARAMETER, - CURL_RTSPREQ_SET_PARAMETER, - CURL_RTSPREQ_RECORD, - CURL_RTSPREQ_RECEIVE, - CURL_RTSPREQ_LAST /* last in list */ -}; - - /* These enums are for use with the CURLOPT_NETRC option. */ -enum CURL_NETRC_OPTION { - CURL_NETRC_IGNORED, /* The .netrc will never be read. - * This is the default. */ - CURL_NETRC_OPTIONAL, /* A user:password in the URL will be preferred - * to one in the .netrc. */ - CURL_NETRC_REQUIRED, /* A user:password in the URL will be ignored. - * Unless one is set programmatically, the .netrc - * will be queried. */ - CURL_NETRC_LAST -}; - -enum { - CURL_SSLVERSION_DEFAULT, - CURL_SSLVERSION_TLSv1, /* TLS 1.x */ - CURL_SSLVERSION_SSLv2, - CURL_SSLVERSION_SSLv3, - CURL_SSLVERSION_TLSv1_0, - CURL_SSLVERSION_TLSv1_1, - CURL_SSLVERSION_TLSv1_2, - - CURL_SSLVERSION_LAST /* never use, keep last */ -}; - -enum CURL_TLSAUTH { - CURL_TLSAUTH_NONE, - CURL_TLSAUTH_SRP, - CURL_TLSAUTH_LAST /* never use, keep last */ -}; - -/* symbols to use with CURLOPT_POSTREDIR. - CURL_REDIR_POST_301, CURL_REDIR_POST_302 and CURL_REDIR_POST_303 - can be bitwise ORed so that CURL_REDIR_POST_301 | CURL_REDIR_POST_302 - | CURL_REDIR_POST_303 == CURL_REDIR_POST_ALL */ - -#define CURL_REDIR_GET_ALL 0 -#define CURL_REDIR_POST_301 1 -#define CURL_REDIR_POST_302 2 -#define CURL_REDIR_POST_303 4 -#define CURL_REDIR_POST_ALL \ - (CURL_REDIR_POST_301|CURL_REDIR_POST_302|CURL_REDIR_POST_303) - -typedef enum { - CURL_TIMECOND_NONE, - - CURL_TIMECOND_IFMODSINCE, - CURL_TIMECOND_IFUNMODSINCE, - CURL_TIMECOND_LASTMOD, - - CURL_TIMECOND_LAST -} curl_TimeCond; - - -/* curl_strequal() and curl_strnequal() are subject for removal in a future - libcurl, see lib/README.curlx for details */ -CURL_EXTERN int (curl_strequal)(const char *s1, const char *s2); -CURL_EXTERN int (curl_strnequal)(const char *s1, const char *s2, size_t n); - -/* name is uppercase CURLFORM_ */ -#ifdef CFINIT -#undef CFINIT -#endif - -#ifdef CURL_ISOCPP -#define CFINIT(name) CURLFORM_ ## name -#else -/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */ -#define CFINIT(name) CURLFORM_/**/name -#endif - -typedef enum { - CFINIT(NOTHING), /********* the first one is unused ************/ - - /* */ - CFINIT(COPYNAME), - CFINIT(PTRNAME), - CFINIT(NAMELENGTH), - CFINIT(COPYCONTENTS), - CFINIT(PTRCONTENTS), - CFINIT(CONTENTSLENGTH), - CFINIT(FILECONTENT), - CFINIT(ARRAY), - CFINIT(OBSOLETE), - CFINIT(FILE), - - CFINIT(BUFFER), - CFINIT(BUFFERPTR), - CFINIT(BUFFERLENGTH), - - CFINIT(CONTENTTYPE), - CFINIT(CONTENTHEADER), - CFINIT(FILENAME), - CFINIT(END), - CFINIT(OBSOLETE2), - - CFINIT(STREAM), - CFINIT(CONTENTLEN), /* added in 7.46.0, provide a curl_off_t length */ - - CURLFORM_LASTENTRY /* the last unused */ -} CURLformoption; - -#undef CFINIT /* done */ - -/* structure to be used as parameter for CURLFORM_ARRAY */ -struct curl_forms { - CURLformoption option; - const char *value; -}; - -/* use this for multipart formpost building */ -/* Returns code for curl_formadd() - * - * Returns: - * CURL_FORMADD_OK on success - * CURL_FORMADD_MEMORY if the FormInfo allocation fails - * CURL_FORMADD_OPTION_TWICE if one option is given twice for one Form - * CURL_FORMADD_NULL if a null pointer was given for a char - * CURL_FORMADD_MEMORY if the allocation of a FormInfo struct failed - * CURL_FORMADD_UNKNOWN_OPTION if an unknown option was used - * CURL_FORMADD_INCOMPLETE if the some FormInfo is not complete (or error) - * CURL_FORMADD_MEMORY if a curl_httppost struct cannot be allocated - * CURL_FORMADD_MEMORY if some allocation for string copying failed. - * CURL_FORMADD_ILLEGAL_ARRAY if an illegal option is used in an array - * - ***************************************************************************/ -typedef enum { - CURL_FORMADD_OK, /* first, no error */ - - CURL_FORMADD_MEMORY, - CURL_FORMADD_OPTION_TWICE, - CURL_FORMADD_NULL, - CURL_FORMADD_UNKNOWN_OPTION, - CURL_FORMADD_INCOMPLETE, - CURL_FORMADD_ILLEGAL_ARRAY, - CURL_FORMADD_DISABLED, /* libcurl was built with this disabled */ - - CURL_FORMADD_LAST /* last */ -} CURLFORMcode; - -/* - * NAME curl_formadd() - * - * DESCRIPTION - * - * Pretty advanced function for building multi-part formposts. Each invoke - * adds one part that together construct a full post. Then use - * CURLOPT_HTTPPOST to send it off to libcurl. - */ -CURL_EXTERN CURLFORMcode curl_formadd(struct curl_httppost **httppost, - struct curl_httppost **last_post, - ...); - -/* - * callback function for curl_formget() - * The void *arg pointer will be the one passed as second argument to - * curl_formget(). - * The character buffer passed to it must not be freed. - * Should return the buffer length passed to it as the argument "len" on - * success. - */ -typedef size_t (*curl_formget_callback)(void *arg, const char *buf, - size_t len); - -/* - * NAME curl_formget() - * - * DESCRIPTION - * - * Serialize a curl_httppost struct built with curl_formadd(). - * Accepts a void pointer as second argument which will be passed to - * the curl_formget_callback function. - * Returns 0 on success. - */ -CURL_EXTERN int curl_formget(struct curl_httppost *form, void *arg, - curl_formget_callback append); -/* - * NAME curl_formfree() - * - * DESCRIPTION - * - * Free a multipart formpost previously built with curl_formadd(). - */ -CURL_EXTERN void curl_formfree(struct curl_httppost *form); - -/* - * NAME curl_getenv() - * - * DESCRIPTION - * - * Returns a malloc()'ed string that MUST be curl_free()ed after usage is - * complete. DEPRECATED - see lib/README.curlx - */ -CURL_EXTERN char *curl_getenv(const char *variable); - -/* - * NAME curl_version() - * - * DESCRIPTION - * - * Returns a static ascii string of the libcurl version. - */ -CURL_EXTERN char *curl_version(void); - -/* - * NAME curl_easy_escape() - * - * DESCRIPTION - * - * Escapes URL strings (converts all letters consider illegal in URLs to their - * %XX versions). This function returns a new allocated string or NULL if an - * error occurred. - */ -CURL_EXTERN char *curl_easy_escape(CURL *handle, - const char *string, - int length); - -/* the previous version: */ -CURL_EXTERN char *curl_escape(const char *string, - int length); - - -/* - * NAME curl_easy_unescape() - * - * DESCRIPTION - * - * Unescapes URL encoding in strings (converts all %XX codes to their 8bit - * versions). This function returns a new allocated string or NULL if an error - * occurred. - * Conversion Note: On non-ASCII platforms the ASCII %XX codes are - * converted into the host encoding. - */ -CURL_EXTERN char *curl_easy_unescape(CURL *handle, - const char *string, - int length, - int *outlength); - -/* the previous version */ -CURL_EXTERN char *curl_unescape(const char *string, - int length); - -/* - * NAME curl_free() - * - * DESCRIPTION - * - * Provided for de-allocation in the same translation unit that did the - * allocation. Added in libcurl 7.10 - */ -CURL_EXTERN void curl_free(void *p); - -/* - * NAME curl_global_init() - * - * DESCRIPTION - * - * curl_global_init() should be invoked exactly once for each application that - * uses libcurl and before any call of other libcurl functions. - * - * This function is not thread-safe! - */ -CURL_EXTERN CURLcode curl_global_init(long flags); - -/* - * NAME curl_global_init_mem() - * - * DESCRIPTION - * - * curl_global_init() or curl_global_init_mem() should be invoked exactly once - * for each application that uses libcurl. This function can be used to - * initialize libcurl and set user defined memory management callback - * functions. Users can implement memory management routines to check for - * memory leaks, check for mis-use of the curl library etc. User registered - * callback routines with be invoked by this library instead of the system - * memory management routines like malloc, free etc. - */ -CURL_EXTERN CURLcode curl_global_init_mem(long flags, - curl_malloc_callback m, - curl_free_callback f, - curl_realloc_callback r, - curl_strdup_callback s, - curl_calloc_callback c); - -/* - * NAME curl_global_cleanup() - * - * DESCRIPTION - * - * curl_global_cleanup() should be invoked exactly once for each application - * that uses libcurl - */ -CURL_EXTERN void curl_global_cleanup(void); - -/* linked-list structure for the CURLOPT_QUOTE option (and other) */ -struct curl_slist { - char *data; - struct curl_slist *next; -}; - -/* - * NAME curl_slist_append() - * - * DESCRIPTION - * - * Appends a string to a linked list. If no list exists, it will be created - * first. Returns the new list, after appending. - */ -CURL_EXTERN struct curl_slist *curl_slist_append(struct curl_slist *, - const char *); - -/* - * NAME curl_slist_free_all() - * - * DESCRIPTION - * - * free a previously built curl_slist. - */ -CURL_EXTERN void curl_slist_free_all(struct curl_slist *); - -/* - * NAME curl_getdate() - * - * DESCRIPTION - * - * Returns the time, in seconds since 1 Jan 1970 of the time string given in - * the first argument. The time argument in the second parameter is unused - * and should be set to NULL. - */ -CURL_EXTERN time_t curl_getdate(const char *p, const time_t *unused); - -/* info about the certificate chain, only for OpenSSL builds. Asked - for with CURLOPT_CERTINFO / CURLINFO_CERTINFO */ -struct curl_certinfo { - int num_of_certs; /* number of certificates with information */ - struct curl_slist **certinfo; /* for each index in this array, there's a - linked list with textual information in the - format "name: value" */ -}; - -/* enum for the different supported SSL backends */ -typedef enum { - CURLSSLBACKEND_NONE = 0, - CURLSSLBACKEND_OPENSSL = 1, - CURLSSLBACKEND_GNUTLS = 2, - CURLSSLBACKEND_NSS = 3, - CURLSSLBACKEND_OBSOLETE4 = 4, /* Was QSOSSL. */ - CURLSSLBACKEND_GSKIT = 5, - CURLSSLBACKEND_POLARSSL = 6, - CURLSSLBACKEND_CYASSL = 7, - CURLSSLBACKEND_SCHANNEL = 8, - CURLSSLBACKEND_DARWINSSL = 9, - CURLSSLBACKEND_AXTLS = 10, - CURLSSLBACKEND_MBEDTLS = 11 -} curl_sslbackend; - -/* Information about the SSL library used and the respective internal SSL - handle, which can be used to obtain further information regarding the - connection. Asked for with CURLINFO_TLS_SESSION. */ -struct curl_tlssessioninfo { - curl_sslbackend backend; - void *internals; -}; - -#define CURLINFO_STRING 0x100000 -#define CURLINFO_LONG 0x200000 -#define CURLINFO_DOUBLE 0x300000 -#define CURLINFO_SLIST 0x400000 -#define CURLINFO_SOCKET 0x500000 -#define CURLINFO_MASK 0x0fffff -#define CURLINFO_TYPEMASK 0xf00000 - -typedef enum { - CURLINFO_NONE, /* first, never use this */ - CURLINFO_EFFECTIVE_URL = CURLINFO_STRING + 1, - CURLINFO_RESPONSE_CODE = CURLINFO_LONG + 2, - CURLINFO_TOTAL_TIME = CURLINFO_DOUBLE + 3, - CURLINFO_NAMELOOKUP_TIME = CURLINFO_DOUBLE + 4, - CURLINFO_CONNECT_TIME = CURLINFO_DOUBLE + 5, - CURLINFO_PRETRANSFER_TIME = CURLINFO_DOUBLE + 6, - CURLINFO_SIZE_UPLOAD = CURLINFO_DOUBLE + 7, - CURLINFO_SIZE_DOWNLOAD = CURLINFO_DOUBLE + 8, - CURLINFO_SPEED_DOWNLOAD = CURLINFO_DOUBLE + 9, - CURLINFO_SPEED_UPLOAD = CURLINFO_DOUBLE + 10, - CURLINFO_HEADER_SIZE = CURLINFO_LONG + 11, - CURLINFO_REQUEST_SIZE = CURLINFO_LONG + 12, - CURLINFO_SSL_VERIFYRESULT = CURLINFO_LONG + 13, - CURLINFO_FILETIME = CURLINFO_LONG + 14, - CURLINFO_CONTENT_LENGTH_DOWNLOAD = CURLINFO_DOUBLE + 15, - CURLINFO_CONTENT_LENGTH_UPLOAD = CURLINFO_DOUBLE + 16, - CURLINFO_STARTTRANSFER_TIME = CURLINFO_DOUBLE + 17, - CURLINFO_CONTENT_TYPE = CURLINFO_STRING + 18, - CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19, - CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20, - CURLINFO_PRIVATE = CURLINFO_STRING + 21, - CURLINFO_HTTP_CONNECTCODE = CURLINFO_LONG + 22, - CURLINFO_HTTPAUTH_AVAIL = CURLINFO_LONG + 23, - CURLINFO_PROXYAUTH_AVAIL = CURLINFO_LONG + 24, - CURLINFO_OS_ERRNO = CURLINFO_LONG + 25, - CURLINFO_NUM_CONNECTS = CURLINFO_LONG + 26, - CURLINFO_SSL_ENGINES = CURLINFO_SLIST + 27, - CURLINFO_COOKIELIST = CURLINFO_SLIST + 28, - CURLINFO_LASTSOCKET = CURLINFO_LONG + 29, - CURLINFO_FTP_ENTRY_PATH = CURLINFO_STRING + 30, - CURLINFO_REDIRECT_URL = CURLINFO_STRING + 31, - CURLINFO_PRIMARY_IP = CURLINFO_STRING + 32, - CURLINFO_APPCONNECT_TIME = CURLINFO_DOUBLE + 33, - CURLINFO_CERTINFO = CURLINFO_SLIST + 34, - CURLINFO_CONDITION_UNMET = CURLINFO_LONG + 35, - CURLINFO_RTSP_SESSION_ID = CURLINFO_STRING + 36, - CURLINFO_RTSP_CLIENT_CSEQ = CURLINFO_LONG + 37, - CURLINFO_RTSP_SERVER_CSEQ = CURLINFO_LONG + 38, - CURLINFO_RTSP_CSEQ_RECV = CURLINFO_LONG + 39, - CURLINFO_PRIMARY_PORT = CURLINFO_LONG + 40, - CURLINFO_LOCAL_IP = CURLINFO_STRING + 41, - CURLINFO_LOCAL_PORT = CURLINFO_LONG + 42, - CURLINFO_TLS_SESSION = CURLINFO_SLIST + 43, - CURLINFO_ACTIVESOCKET = CURLINFO_SOCKET + 44, - /* Fill in new entries below here! */ - - CURLINFO_LASTONE = 44 -} CURLINFO; - -/* CURLINFO_RESPONSE_CODE is the new name for the option previously known as - CURLINFO_HTTP_CODE */ -#define CURLINFO_HTTP_CODE CURLINFO_RESPONSE_CODE - -typedef enum { - CURLCLOSEPOLICY_NONE, /* first, never use this */ - - CURLCLOSEPOLICY_OLDEST, - CURLCLOSEPOLICY_LEAST_RECENTLY_USED, - CURLCLOSEPOLICY_LEAST_TRAFFIC, - CURLCLOSEPOLICY_SLOWEST, - CURLCLOSEPOLICY_CALLBACK, - - CURLCLOSEPOLICY_LAST /* last, never use this */ -} curl_closepolicy; - -#define CURL_GLOBAL_SSL (1<<0) -#define CURL_GLOBAL_WIN32 (1<<1) -#define CURL_GLOBAL_ALL (CURL_GLOBAL_SSL|CURL_GLOBAL_WIN32) -#define CURL_GLOBAL_NOTHING 0 -#define CURL_GLOBAL_DEFAULT CURL_GLOBAL_ALL -#define CURL_GLOBAL_ACK_EINTR (1<<2) - - -/***************************************************************************** - * Setup defines, protos etc for the sharing stuff. - */ - -/* Different data locks for a single share */ -typedef enum { - CURL_LOCK_DATA_NONE = 0, - /* CURL_LOCK_DATA_SHARE is used internally to say that - * the locking is just made to change the internal state of the share - * itself. - */ - CURL_LOCK_DATA_SHARE, - CURL_LOCK_DATA_COOKIE, - CURL_LOCK_DATA_DNS, - CURL_LOCK_DATA_SSL_SESSION, - CURL_LOCK_DATA_CONNECT, - CURL_LOCK_DATA_LAST -} curl_lock_data; - -/* Different lock access types */ -typedef enum { - CURL_LOCK_ACCESS_NONE = 0, /* unspecified action */ - CURL_LOCK_ACCESS_SHARED = 1, /* for read perhaps */ - CURL_LOCK_ACCESS_SINGLE = 2, /* for write perhaps */ - CURL_LOCK_ACCESS_LAST /* never use */ -} curl_lock_access; - -typedef void (*curl_lock_function)(CURL *handle, - curl_lock_data data, - curl_lock_access locktype, - void *userptr); -typedef void (*curl_unlock_function)(CURL *handle, - curl_lock_data data, - void *userptr); - -typedef void CURLSH; - -typedef enum { - CURLSHE_OK, /* all is fine */ - CURLSHE_BAD_OPTION, /* 1 */ - CURLSHE_IN_USE, /* 2 */ - CURLSHE_INVALID, /* 3 */ - CURLSHE_NOMEM, /* 4 out of memory */ - CURLSHE_NOT_BUILT_IN, /* 5 feature not present in lib */ - CURLSHE_LAST /* never use */ -} CURLSHcode; - -typedef enum { - CURLSHOPT_NONE, /* don't use */ - CURLSHOPT_SHARE, /* specify a data type to share */ - CURLSHOPT_UNSHARE, /* specify which data type to stop sharing */ - CURLSHOPT_LOCKFUNC, /* pass in a 'curl_lock_function' pointer */ - CURLSHOPT_UNLOCKFUNC, /* pass in a 'curl_unlock_function' pointer */ - CURLSHOPT_USERDATA, /* pass in a user data pointer used in the lock/unlock - callback functions */ - CURLSHOPT_LAST /* never use */ -} CURLSHoption; - -CURL_EXTERN CURLSH *curl_share_init(void); -CURL_EXTERN CURLSHcode curl_share_setopt(CURLSH *, CURLSHoption option, ...); -CURL_EXTERN CURLSHcode curl_share_cleanup(CURLSH *); - -/**************************************************************************** - * Structures for querying information about the curl library at runtime. - */ - -typedef enum { - CURLVERSION_FIRST, - CURLVERSION_SECOND, - CURLVERSION_THIRD, - CURLVERSION_FOURTH, - CURLVERSION_LAST /* never actually use this */ -} CURLversion; - -/* The 'CURLVERSION_NOW' is the symbolic name meant to be used by - basically all programs ever that want to get version information. It is - meant to be a built-in version number for what kind of struct the caller - expects. If the struct ever changes, we redefine the NOW to another enum - from above. */ -#define CURLVERSION_NOW CURLVERSION_FOURTH - -typedef struct { - CURLversion age; /* age of the returned struct */ - const char *version; /* LIBCURL_VERSION */ - unsigned int version_num; /* LIBCURL_VERSION_NUM */ - const char *host; /* OS/host/cpu/machine when configured */ - int features; /* bitmask, see defines below */ - const char *ssl_version; /* human readable string */ - long ssl_version_num; /* not used anymore, always 0 */ - const char *libz_version; /* human readable string */ - /* protocols is terminated by an entry with a NULL protoname */ - const char * const *protocols; - - /* The fields below this were added in CURLVERSION_SECOND */ - const char *ares; - int ares_num; - - /* This field was added in CURLVERSION_THIRD */ - const char *libidn; - - /* These field were added in CURLVERSION_FOURTH */ - - /* Same as '_libiconv_version' if built with HAVE_ICONV */ - int iconv_ver_num; - - const char *libssh_version; /* human readable string */ - -} curl_version_info_data; - -#define CURL_VERSION_IPV6 (1<<0) /* IPv6-enabled */ -#define CURL_VERSION_KERBEROS4 (1<<1) /* Kerberos V4 auth is supported - (deprecated) */ -#define CURL_VERSION_SSL (1<<2) /* SSL options are present */ -#define CURL_VERSION_LIBZ (1<<3) /* libz features are present */ -#define CURL_VERSION_NTLM (1<<4) /* NTLM auth is supported */ -#define CURL_VERSION_GSSNEGOTIATE (1<<5) /* Negotiate auth is supported - (deprecated) */ -#define CURL_VERSION_DEBUG (1<<6) /* Built with debug capabilities */ -#define CURL_VERSION_ASYNCHDNS (1<<7) /* Asynchronous DNS resolves */ -#define CURL_VERSION_SPNEGO (1<<8) /* SPNEGO auth is supported */ -#define CURL_VERSION_LARGEFILE (1<<9) /* Supports files larger than 2GB */ -#define CURL_VERSION_IDN (1<<10) /* Internationized Domain Names are - supported */ -#define CURL_VERSION_SSPI (1<<11) /* Built against Windows SSPI */ -#define CURL_VERSION_CONV (1<<12) /* Character conversions supported */ -#define CURL_VERSION_CURLDEBUG (1<<13) /* Debug memory tracking supported */ -#define CURL_VERSION_TLSAUTH_SRP (1<<14) /* TLS-SRP auth is supported */ -#define CURL_VERSION_NTLM_WB (1<<15) /* NTLM delegation to winbind helper - is suported */ -#define CURL_VERSION_HTTP2 (1<<16) /* HTTP2 support built-in */ -#define CURL_VERSION_GSSAPI (1<<17) /* Built against a GSS-API library */ -#define CURL_VERSION_KERBEROS5 (1<<18) /* Kerberos V5 auth is supported */ -#define CURL_VERSION_UNIX_SOCKETS (1<<19) /* Unix domain sockets support */ -#define CURL_VERSION_PSL (1<<20) /* Mozilla's Public Suffix List, used - for cookie domain verification */ - - /* - * NAME curl_version_info() - * - * DESCRIPTION - * - * This function returns a pointer to a static copy of the version info - * struct. See above. - */ -CURL_EXTERN curl_version_info_data *curl_version_info(CURLversion); - -/* - * NAME curl_easy_strerror() - * - * DESCRIPTION - * - * The curl_easy_strerror function may be used to turn a CURLcode value - * into the equivalent human readable error string. This is useful - * for printing meaningful error messages. - */ -CURL_EXTERN const char *curl_easy_strerror(CURLcode); - -/* - * NAME curl_share_strerror() - * - * DESCRIPTION - * - * The curl_share_strerror function may be used to turn a CURLSHcode value - * into the equivalent human readable error string. This is useful - * for printing meaningful error messages. - */ -CURL_EXTERN const char *curl_share_strerror(CURLSHcode); - -/* - * NAME curl_easy_pause() - * - * DESCRIPTION - * - * The curl_easy_pause function pauses or unpauses transfers. Select the new - * state by setting the bitmask, use the convenience defines below. - * - */ -CURL_EXTERN CURLcode curl_easy_pause(CURL *handle, int bitmask); - -#define CURLPAUSE_RECV (1<<0) -#define CURLPAUSE_RECV_CONT (0) - -#define CURLPAUSE_SEND (1<<2) -#define CURLPAUSE_SEND_CONT (0) - -#define CURLPAUSE_ALL (CURLPAUSE_RECV|CURLPAUSE_SEND) -#define CURLPAUSE_CONT (CURLPAUSE_RECV_CONT|CURLPAUSE_SEND_CONT) - -#ifdef __cplusplus -} -#endif - -/* unfortunately, the easy.h and multi.h include files need options and info - stuff before they can be included! */ -#include "easy.h" /* nothing in curl is fun without the easy stuff */ -#include "multi.h" - -/* the typechecker doesn't work in C++ (yet) */ -#if defined(__GNUC__) && defined(__GNUC_MINOR__) && \ - ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && \ - !defined(__cplusplus) && !defined(CURL_DISABLE_TYPECHECK) -#include "typecheck-gcc.h" -#else -#if defined(__STDC__) && (__STDC__ >= 1) -/* This preprocessor magic that replaces a call with the exact same call is - only done to make sure application authors pass exactly three arguments - to these functions. */ -#define curl_easy_setopt(handle,opt,param) curl_easy_setopt(handle,opt,param) -#define curl_easy_getinfo(handle,info,arg) curl_easy_getinfo(handle,info,arg) -#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param) -#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param) -#endif /* __STDC__ >= 1 */ -#endif /* gcc >= 4.3 && !__cplusplus */ - -#endif /* __CURL_CURL_H */ diff --git a/deps/libcurl/include/curl/curlbuild.h b/deps/libcurl/include/curl/curlbuild.h deleted file mode 100644 index bdca52b5355feec5f80f279bea6c5799bcf1041e..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/curlbuild.h +++ /dev/null @@ -1,198 +0,0 @@ -/* include/curl/curlbuild.h. Generated from curlbuild.h.in by configure. */ -#ifndef __CURL_CURLBUILD_H -#define __CURL_CURLBUILD_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* ================================================================ */ -/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */ -/* ================================================================ */ - -/* - * NOTE 1: - * ------- - * - * Nothing in this file is intended to be modified or adjusted by the - * curl library user nor by the curl library builder. - * - * If you think that something actually needs to be changed, adjusted - * or fixed in this file, then, report it on the libcurl development - * mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/ - * - * This header file shall only export symbols which are 'curl' or 'CURL' - * prefixed, otherwise public name space would be polluted. - * - * NOTE 2: - * ------- - * - * Right now you might be staring at file include/curl/curlbuild.h.in or - * at file include/curl/curlbuild.h, this is due to the following reason: - * - * On systems capable of running the configure script, the configure process - * will overwrite the distributed include/curl/curlbuild.h file with one that - * is suitable and specific to the library being configured and built, which - * is generated from the include/curl/curlbuild.h.in template file. - * - */ - -/* ================================================================ */ -/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */ -/* ================================================================ */ - -#ifdef CURL_SIZEOF_LONG -#error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined -#endif - -#ifdef CURL_TYPEOF_CURL_SOCKLEN_T -#error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined -#endif - -#ifdef CURL_SIZEOF_CURL_SOCKLEN_T -#error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined -#endif - -#ifdef CURL_TYPEOF_CURL_OFF_T -#error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_FORMAT_CURL_OFF_T -#error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_FORMAT_CURL_OFF_TU -#error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined -#endif - -#ifdef CURL_FORMAT_OFF_T -#error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined -#endif - -#ifdef CURL_SIZEOF_CURL_OFF_T -#error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_SUFFIX_CURL_OFF_T -#error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined -#endif - -#ifdef CURL_SUFFIX_CURL_OFF_TU -#error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined -#endif - -/* ================================================================ */ -/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */ -/* ================================================================ */ - -/* Configure process defines this to 1 when it finds out that system */ -/* header file ws2tcpip.h must be included by the external interface. */ -/* #undef CURL_PULL_WS2TCPIP_H */ -#ifdef CURL_PULL_WS2TCPIP_H -# ifndef WIN32_LEAN_AND_MEAN -# define WIN32_LEAN_AND_MEAN -# endif -# include -# include -# include -#endif - -/* Configure process defines this to 1 when it finds out that system */ -/* header file sys/types.h must be included by the external interface. */ -#define CURL_PULL_SYS_TYPES_H 1 -#ifdef CURL_PULL_SYS_TYPES_H -# include -#endif - -/* Configure process defines this to 1 when it finds out that system */ -/* header file stdint.h must be included by the external interface. */ -/* #undef CURL_PULL_STDINT_H */ -#ifdef CURL_PULL_STDINT_H -# include -#endif - -/* Configure process defines this to 1 when it finds out that system */ -/* header file inttypes.h must be included by the external interface. */ -/* #undef CURL_PULL_INTTYPES_H */ -#ifdef CURL_PULL_INTTYPES_H -# include -#endif - -/* Configure process defines this to 1 when it finds out that system */ -/* header file sys/socket.h must be included by the external interface. */ -#define CURL_PULL_SYS_SOCKET_H 1 -#ifdef CURL_PULL_SYS_SOCKET_H -# include -#endif - -/* Configure process defines this to 1 when it finds out that system */ -/* header file sys/poll.h must be included by the external interface. */ -/* #undef CURL_PULL_SYS_POLL_H */ -#ifdef CURL_PULL_SYS_POLL_H -# include -#endif - -/* The size of `long', as computed by sizeof. */ -#define CURL_SIZEOF_LONG 8 - -/* Integral data type used for curl_socklen_t. */ -#define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t - -/* The size of `curl_socklen_t', as computed by sizeof. */ -#define CURL_SIZEOF_CURL_SOCKLEN_T 4 - -/* Data type definition of curl_socklen_t. */ -typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t; - -/* Signed integral data type used for curl_off_t. */ -#define CURL_TYPEOF_CURL_OFF_T long - -/* Data type definition of curl_off_t. */ -typedef CURL_TYPEOF_CURL_OFF_T curl_off_t; - -/* curl_off_t formatting string directive without "%" conversion specifier. */ -#define CURL_FORMAT_CURL_OFF_T "ld" - -/* unsigned curl_off_t formatting string without "%" conversion specifier. */ -#define CURL_FORMAT_CURL_OFF_TU "lu" - -/* curl_off_t formatting string directive with "%" conversion specifier. */ -#define CURL_FORMAT_OFF_T "%ld" - -/* The size of `curl_off_t', as computed by sizeof. */ -#define CURL_SIZEOF_CURL_OFF_T 8 - -/* curl_off_t constant suffix. */ -#define CURL_SUFFIX_CURL_OFF_T L - -/* unsigned curl_off_t constant suffix. */ -#define CURL_SUFFIX_CURL_OFF_TU UL - -#endif /* __CURL_CURLBUILD_H */ diff --git a/deps/libcurl/include/curl/curlrules.h b/deps/libcurl/include/curl/curlrules.h deleted file mode 100644 index 7c2ede35b63a7540acd042ce5c2b27a70c982d02..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/curlrules.h +++ /dev/null @@ -1,262 +0,0 @@ -#ifndef __CURL_CURLRULES_H -#define __CURL_CURLRULES_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* ================================================================ */ -/* COMPILE TIME SANITY CHECKS */ -/* ================================================================ */ - -/* - * NOTE 1: - * ------- - * - * All checks done in this file are intentionally placed in a public - * header file which is pulled by curl/curl.h when an application is - * being built using an already built libcurl library. Additionally - * this file is also included and used when building the library. - * - * If compilation fails on this file it is certainly sure that the - * problem is elsewhere. It could be a problem in the curlbuild.h - * header file, or simply that you are using different compilation - * settings than those used to build the library. - * - * Nothing in this file is intended to be modified or adjusted by the - * curl library user nor by the curl library builder. - * - * Do not deactivate any check, these are done to make sure that the - * library is properly built and used. - * - * You can find further help on the libcurl development mailing list: - * http://cool.haxx.se/mailman/listinfo/curl-library/ - * - * NOTE 2 - * ------ - * - * Some of the following compile time checks are based on the fact - * that the dimension of a constant array can not be a negative one. - * In this way if the compile time verification fails, the compilation - * will fail issuing an error. The error description wording is compiler - * dependent but it will be quite similar to one of the following: - * - * "negative subscript or subscript is too large" - * "array must have at least one element" - * "-1 is an illegal array size" - * "size of array is negative" - * - * If you are building an application which tries to use an already - * built libcurl library and you are getting this kind of errors on - * this file, it is a clear indication that there is a mismatch between - * how the library was built and how you are trying to use it for your - * application. Your already compiled or binary library provider is the - * only one who can give you the details you need to properly use it. - */ - -/* - * Verify that some macros are actually defined. - */ - -#ifndef CURL_SIZEOF_LONG -# error "CURL_SIZEOF_LONG definition is missing!" - Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing -#endif - -#ifndef CURL_TYPEOF_CURL_SOCKLEN_T -# error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!" - Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing -#endif - -#ifndef CURL_SIZEOF_CURL_SOCKLEN_T -# error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!" - Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing -#endif - -#ifndef CURL_TYPEOF_CURL_OFF_T -# error "CURL_TYPEOF_CURL_OFF_T definition is missing!" - Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing -#endif - -#ifndef CURL_FORMAT_CURL_OFF_T -# error "CURL_FORMAT_CURL_OFF_T definition is missing!" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing -#endif - -#ifndef CURL_FORMAT_CURL_OFF_TU -# error "CURL_FORMAT_CURL_OFF_TU definition is missing!" - Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing -#endif - -#ifndef CURL_FORMAT_OFF_T -# error "CURL_FORMAT_OFF_T definition is missing!" - Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing -#endif - -#ifndef CURL_SIZEOF_CURL_OFF_T -# error "CURL_SIZEOF_CURL_OFF_T definition is missing!" - Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing -#endif - -#ifndef CURL_SUFFIX_CURL_OFF_T -# error "CURL_SUFFIX_CURL_OFF_T definition is missing!" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing -#endif - -#ifndef CURL_SUFFIX_CURL_OFF_TU -# error "CURL_SUFFIX_CURL_OFF_TU definition is missing!" - Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing -#endif - -/* - * Macros private to this header file. - */ - -#define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1 - -#define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1 - -/* - * Verify that the size previously defined and expected for long - * is the same as the one reported by sizeof() at compile time. - */ - -typedef char - __curl_rule_01__ - [CurlchkszEQ(long, CURL_SIZEOF_LONG)]; - -/* - * Verify that the size previously defined and expected for - * curl_off_t is actually the the same as the one reported - * by sizeof() at compile time. - */ - -typedef char - __curl_rule_02__ - [CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)]; - -/* - * Verify at compile time that the size of curl_off_t as reported - * by sizeof() is greater or equal than the one reported for long - * for the current compilation. - */ - -typedef char - __curl_rule_03__ - [CurlchkszGE(curl_off_t, long)]; - -/* - * Verify that the size previously defined and expected for - * curl_socklen_t is actually the the same as the one reported - * by sizeof() at compile time. - */ - -typedef char - __curl_rule_04__ - [CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)]; - -/* - * Verify at compile time that the size of curl_socklen_t as reported - * by sizeof() is greater or equal than the one reported for int for - * the current compilation. - */ - -typedef char - __curl_rule_05__ - [CurlchkszGE(curl_socklen_t, int)]; - -/* ================================================================ */ -/* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */ -/* ================================================================ */ - -/* - * CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow - * these to be visible and exported by the external libcurl interface API, - * while also making them visible to the library internals, simply including - * curl_setup.h, without actually needing to include curl.h internally. - * If some day this section would grow big enough, all this should be moved - * to its own header file. - */ - -/* - * Figure out if we can use the ## preprocessor operator, which is supported - * by ISO/ANSI C and C++. Some compilers support it without setting __STDC__ - * or __cplusplus so we need to carefully check for them too. - */ - -#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \ - defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \ - defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \ - defined(__ILEC400__) - /* This compiler is believed to have an ISO compatible preprocessor */ -#define CURL_ISOCPP -#else - /* This compiler is believed NOT to have an ISO compatible preprocessor */ -#undef CURL_ISOCPP -#endif - -/* - * Macros for minimum-width signed and unsigned curl_off_t integer constants. - */ - -#if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551) -# define __CURL_OFF_T_C_HLPR2(x) x -# define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x) -# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ - __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T) -# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ - __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU) -#else -# ifdef CURL_ISOCPP -# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix -# else -# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix -# endif -# define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix) -# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T) -# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU) -#endif - -/* - * Get rid of macros private to this header file. - */ - -#undef CurlchkszEQ -#undef CurlchkszGE - -/* - * Get rid of macros not intended to exist beyond this point. - */ - -#undef CURL_PULL_WS2TCPIP_H -#undef CURL_PULL_SYS_TYPES_H -#undef CURL_PULL_SYS_SOCKET_H -#undef CURL_PULL_SYS_POLL_H -#undef CURL_PULL_STDINT_H -#undef CURL_PULL_INTTYPES_H - -#undef CURL_TYPEOF_CURL_SOCKLEN_T -#undef CURL_TYPEOF_CURL_OFF_T - -#ifdef CURL_NO_OLDIES -#undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */ -#endif - -#endif /* __CURL_CURLRULES_H */ diff --git a/deps/libcurl/include/curl/curlver.h b/deps/libcurl/include/curl/curlver.h deleted file mode 100644 index 17906764c898e69ce28204bda7d74573f4c365ee..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/curlver.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef __CURL_CURLVER_H -#define __CURL_CURLVER_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2015, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* This header file contains nothing but libcurl version info, generated by - a script at release-time. This was made its own header file in 7.11.2 */ - -/* This is the global package copyright */ -#define LIBCURL_COPYRIGHT "1996 - 2015 Daniel Stenberg, ." - -/* This is the version number of the libcurl package from which this header - file origins: */ -#define LIBCURL_VERSION "7.47.0" - -/* The numeric version number is also available "in parts" by using these - defines: */ -#define LIBCURL_VERSION_MAJOR 7 -#define LIBCURL_VERSION_MINOR 47 -#define LIBCURL_VERSION_PATCH 0 - -/* This is the numeric version of the libcurl version number, meant for easier - parsing and comparions by programs. The LIBCURL_VERSION_NUM define will - always follow this syntax: - - 0xXXYYZZ - - Where XX, YY and ZZ are the main version, release and patch numbers in - hexadecimal (using 8 bits each). All three numbers are always represented - using two digits. 1.2 would appear as "0x010200" while version 9.11.7 - appears as "0x090b07". - - This 6-digit (24 bits) hexadecimal number does not show pre-release number, - and it is always a greater number in a more recent release. It makes - comparisons with greater than and less than work. - - Note: This define is the full hex number and _does not_ use the - CURL_VERSION_BITS() macro since curl's own configure script greps for it - and needs it to contain the full number. -*/ -#define LIBCURL_VERSION_NUM 0x072f00 - -/* - * This is the date and time when the full source package was created. The - * timestamp is not stored in git, as the timestamp is properly set in the - * tarballs by the maketgz script. - * - * The format of the date should follow this template: - * - * "Mon Feb 12 11:35:33 UTC 2007" - */ -#define LIBCURL_TIMESTAMP "Wed Jan 27 07:32:44 UTC 2016" - -#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|z) -#define CURL_AT_LEAST_VERSION(x,y,z) \ - (LIBCURL_VERSION_NUM >= CURL_VERSION_BITS(x, y, z)) - -#endif /* __CURL_CURLVER_H */ diff --git a/deps/libcurl/include/curl/easy.h b/deps/libcurl/include/curl/easy.h deleted file mode 100644 index c1e3e76096e3925821da279a2f2eae67ae61e0e8..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/easy.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef __CURL_EASY_H -#define __CURL_EASY_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2008, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ -#ifdef __cplusplus -extern "C" { -#endif - -CURL_EXTERN CURL *curl_easy_init(void); -CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...); -CURL_EXTERN CURLcode curl_easy_perform(CURL *curl); -CURL_EXTERN void curl_easy_cleanup(CURL *curl); - -/* - * NAME curl_easy_getinfo() - * - * DESCRIPTION - * - * Request internal information from the curl session with this function. The - * third argument MUST be a pointer to a long, a pointer to a char * or a - * pointer to a double (as the documentation describes elsewhere). The data - * pointed to will be filled in accordingly and can be relied upon only if the - * function returns CURLE_OK. This function is intended to get used *AFTER* a - * performed transfer, all results from this function are undefined until the - * transfer is completed. - */ -CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...); - - -/* - * NAME curl_easy_duphandle() - * - * DESCRIPTION - * - * Creates a new curl session handle with the same options set for the handle - * passed in. Duplicating a handle could only be a matter of cloning data and - * options, internal state info and things like persistent connections cannot - * be transferred. It is useful in multithreaded applications when you can run - * curl_easy_duphandle() for each new thread to avoid a series of identical - * curl_easy_setopt() invokes in every thread. - */ -CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl); - -/* - * NAME curl_easy_reset() - * - * DESCRIPTION - * - * Re-initializes a CURL handle to the default values. This puts back the - * handle to the same state as it was in when it was just created. - * - * It does keep: live connections, the Session ID cache, the DNS cache and the - * cookies. - */ -CURL_EXTERN void curl_easy_reset(CURL *curl); - -/* - * NAME curl_easy_recv() - * - * DESCRIPTION - * - * Receives data from the connected socket. Use after successful - * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. - */ -CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen, - size_t *n); - -/* - * NAME curl_easy_send() - * - * DESCRIPTION - * - * Sends data over the connected socket. Use after successful - * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. - */ -CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer, - size_t buflen, size_t *n); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/deps/libcurl/include/curl/mprintf.h b/deps/libcurl/include/curl/mprintf.h deleted file mode 100644 index c6b0d7679a6cf970a7960af9f47a7485729598dc..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/mprintf.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef __CURL_MPRINTF_H -#define __CURL_MPRINTF_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2015, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -#include -#include /* needed for FILE */ - -#include "curl.h" - -#ifdef __cplusplus -extern "C" { -#endif - -CURL_EXTERN int curl_mprintf(const char *format, ...); -CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...); -CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...); -CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength, - const char *format, ...); -CURL_EXTERN int curl_mvprintf(const char *format, va_list args); -CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args); -CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args); -CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength, - const char *format, va_list args); -CURL_EXTERN char *curl_maprintf(const char *format, ...); -CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args); - -#ifdef _MPRINTF_REPLACE -# undef printf -# undef fprintf -# undef sprintf -# undef vsprintf -# undef snprintf -# undef vprintf -# undef vfprintf -# undef vsnprintf -# undef aprintf -# undef vaprintf -# define printf curl_mprintf -# define fprintf curl_mfprintf -# define sprintf curl_msprintf -# define vsprintf curl_mvsprintf -# define snprintf curl_msnprintf -# define vprintf curl_mvprintf -# define vfprintf curl_mvfprintf -# define vsnprintf curl_mvsnprintf -# define aprintf curl_maprintf -# define vaprintf curl_mvaprintf -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* __CURL_MPRINTF_H */ diff --git a/deps/libcurl/include/curl/multi.h b/deps/libcurl/include/curl/multi.h deleted file mode 100644 index 36e2e940ecd440c0a14aadc0dc3eba1d5d53f186..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/multi.h +++ /dev/null @@ -1,435 +0,0 @@ -#ifndef __CURL_MULTI_H -#define __CURL_MULTI_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2015, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ -/* - This is an "external" header file. Don't give away any internals here! - - GOALS - - o Enable a "pull" interface. The application that uses libcurl decides where - and when to ask libcurl to get/send data. - - o Enable multiple simultaneous transfers in the same thread without making it - complicated for the application. - - o Enable the application to select() on its own file descriptors and curl's - file descriptors simultaneous easily. - -*/ - -/* - * This header file should not really need to include "curl.h" since curl.h - * itself includes this file and we expect user applications to do #include - * without the need for especially including multi.h. - * - * For some reason we added this include here at one point, and rather than to - * break existing (wrongly written) libcurl applications, we leave it as-is - * but with this warning attached. - */ -#include "curl.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void CURLM; - -typedef enum { - CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or - curl_multi_socket*() soon */ - CURLM_OK, - CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */ - CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */ - CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */ - CURLM_INTERNAL_ERROR, /* this is a libcurl bug */ - CURLM_BAD_SOCKET, /* the passed in socket argument did not match */ - CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */ - CURLM_ADDED_ALREADY, /* an easy handle already added to a multi handle was - attempted to get added - again */ - CURLM_LAST -} CURLMcode; - -/* just to make code nicer when using curl_multi_socket() you can now check - for CURLM_CALL_MULTI_SOCKET too in the same style it works for - curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */ -#define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM - -/* bitmask bits for CURLMOPT_PIPELINING */ -#define CURLPIPE_NOTHING 0L -#define CURLPIPE_HTTP1 1L -#define CURLPIPE_MULTIPLEX 2L - -typedef enum { - CURLMSG_NONE, /* first, not used */ - CURLMSG_DONE, /* This easy handle has completed. 'result' contains - the CURLcode of the transfer */ - CURLMSG_LAST /* last, not used */ -} CURLMSG; - -struct CURLMsg { - CURLMSG msg; /* what this message means */ - CURL *easy_handle; /* the handle it concerns */ - union { - void *whatever; /* message-specific data */ - CURLcode result; /* return code for transfer */ - } data; -}; -typedef struct CURLMsg CURLMsg; - -/* Based on poll(2) structure and values. - * We don't use pollfd and POLL* constants explicitly - * to cover platforms without poll(). */ -#define CURL_WAIT_POLLIN 0x0001 -#define CURL_WAIT_POLLPRI 0x0002 -#define CURL_WAIT_POLLOUT 0x0004 - -struct curl_waitfd { - curl_socket_t fd; - short events; - short revents; /* not supported yet */ -}; - -/* - * Name: curl_multi_init() - * - * Desc: inititalize multi-style curl usage - * - * Returns: a new CURLM handle to use in all 'curl_multi' functions. - */ -CURL_EXTERN CURLM *curl_multi_init(void); - -/* - * Name: curl_multi_add_handle() - * - * Desc: add a standard curl handle to the multi stack - * - * Returns: CURLMcode type, general multi error code. - */ -CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle, - CURL *curl_handle); - - /* - * Name: curl_multi_remove_handle() - * - * Desc: removes a curl handle from the multi stack again - * - * Returns: CURLMcode type, general multi error code. - */ -CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle, - CURL *curl_handle); - - /* - * Name: curl_multi_fdset() - * - * Desc: Ask curl for its fd_set sets. The app can use these to select() or - * poll() on. We want curl_multi_perform() called as soon as one of - * them are ready. - * - * Returns: CURLMcode type, general multi error code. - */ -CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle, - fd_set *read_fd_set, - fd_set *write_fd_set, - fd_set *exc_fd_set, - int *max_fd); - -/* - * Name: curl_multi_wait() - * - * Desc: Poll on all fds within a CURLM set as well as any - * additional fds passed to the function. - * - * Returns: CURLMcode type, general multi error code. - */ -CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle, - struct curl_waitfd extra_fds[], - unsigned int extra_nfds, - int timeout_ms, - int *ret); - - /* - * Name: curl_multi_perform() - * - * Desc: When the app thinks there's data available for curl it calls this - * function to read/write whatever there is right now. This returns - * as soon as the reads and writes are done. This function does not - * require that there actually is data available for reading or that - * data can be written, it can be called just in case. It returns - * the number of handles that still transfer data in the second - * argument's integer-pointer. - * - * Returns: CURLMcode type, general multi error code. *NOTE* that this only - * returns errors etc regarding the whole multi stack. There might - * still have occurred problems on invidual transfers even when this - * returns OK. - */ -CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle, - int *running_handles); - - /* - * Name: curl_multi_cleanup() - * - * Desc: Cleans up and removes a whole multi stack. It does not free or - * touch any individual easy handles in any way. We need to define - * in what state those handles will be if this function is called - * in the middle of a transfer. - * - * Returns: CURLMcode type, general multi error code. - */ -CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle); - -/* - * Name: curl_multi_info_read() - * - * Desc: Ask the multi handle if there's any messages/informationals from - * the individual transfers. Messages include informationals such as - * error code from the transfer or just the fact that a transfer is - * completed. More details on these should be written down as well. - * - * Repeated calls to this function will return a new struct each - * time, until a special "end of msgs" struct is returned as a signal - * that there is no more to get at this point. - * - * The data the returned pointer points to will not survive calling - * curl_multi_cleanup(). - * - * The 'CURLMsg' struct is meant to be very simple and only contain - * very basic informations. If more involved information is wanted, - * we will provide the particular "transfer handle" in that struct - * and that should/could/would be used in subsequent - * curl_easy_getinfo() calls (or similar). The point being that we - * must never expose complex structs to applications, as then we'll - * undoubtably get backwards compatibility problems in the future. - * - * Returns: A pointer to a filled-in struct, or NULL if it failed or ran out - * of structs. It also writes the number of messages left in the - * queue (after this read) in the integer the second argument points - * to. - */ -CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle, - int *msgs_in_queue); - -/* - * Name: curl_multi_strerror() - * - * Desc: The curl_multi_strerror function may be used to turn a CURLMcode - * value into the equivalent human readable error string. This is - * useful for printing meaningful error messages. - * - * Returns: A pointer to a zero-terminated error message. - */ -CURL_EXTERN const char *curl_multi_strerror(CURLMcode); - -/* - * Name: curl_multi_socket() and - * curl_multi_socket_all() - * - * Desc: An alternative version of curl_multi_perform() that allows the - * application to pass in one of the file descriptors that have been - * detected to have "action" on them and let libcurl perform. - * See man page for details. - */ -#define CURL_POLL_NONE 0 -#define CURL_POLL_IN 1 -#define CURL_POLL_OUT 2 -#define CURL_POLL_INOUT 3 -#define CURL_POLL_REMOVE 4 - -#define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD - -#define CURL_CSELECT_IN 0x01 -#define CURL_CSELECT_OUT 0x02 -#define CURL_CSELECT_ERR 0x04 - -typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */ - curl_socket_t s, /* socket */ - int what, /* see above */ - void *userp, /* private callback - pointer */ - void *socketp); /* private socket - pointer */ -/* - * Name: curl_multi_timer_callback - * - * Desc: Called by libcurl whenever the library detects a change in the - * maximum number of milliseconds the app is allowed to wait before - * curl_multi_socket() or curl_multi_perform() must be called - * (to allow libcurl's timed events to take place). - * - * Returns: The callback should return zero. - */ -typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */ - long timeout_ms, /* see above */ - void *userp); /* private callback - pointer */ - -CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s, - int *running_handles); - -CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle, - curl_socket_t s, - int ev_bitmask, - int *running_handles); - -CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle, - int *running_handles); - -#ifndef CURL_ALLOW_OLD_MULTI_SOCKET -/* This macro below was added in 7.16.3 to push users who recompile to use - the new curl_multi_socket_action() instead of the old curl_multi_socket() -*/ -#define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z) -#endif - -/* - * Name: curl_multi_timeout() - * - * Desc: Returns the maximum number of milliseconds the app is allowed to - * wait before curl_multi_socket() or curl_multi_perform() must be - * called (to allow libcurl's timed events to take place). - * - * Returns: CURLM error code. - */ -CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle, - long *milliseconds); - -#undef CINIT /* re-using the same name as in curl.h */ - -#ifdef CURL_ISOCPP -#define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num -#else -/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */ -#define LONG CURLOPTTYPE_LONG -#define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT -#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT -#define OFF_T CURLOPTTYPE_OFF_T -#define CINIT(name,type,number) CURLMOPT_/**/name = type + number -#endif - -typedef enum { - /* This is the socket callback function pointer */ - CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1), - - /* This is the argument passed to the socket callback */ - CINIT(SOCKETDATA, OBJECTPOINT, 2), - - /* set to 1 to enable pipelining for this multi handle */ - CINIT(PIPELINING, LONG, 3), - - /* This is the timer callback function pointer */ - CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4), - - /* This is the argument passed to the timer callback */ - CINIT(TIMERDATA, OBJECTPOINT, 5), - - /* maximum number of entries in the connection cache */ - CINIT(MAXCONNECTS, LONG, 6), - - /* maximum number of (pipelining) connections to one host */ - CINIT(MAX_HOST_CONNECTIONS, LONG, 7), - - /* maximum number of requests in a pipeline */ - CINIT(MAX_PIPELINE_LENGTH, LONG, 8), - - /* a connection with a content-length longer than this - will not be considered for pipelining */ - CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9), - - /* a connection with a chunk length longer than this - will not be considered for pipelining */ - CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10), - - /* a list of site names(+port) that are blacklisted from - pipelining */ - CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11), - - /* a list of server types that are blacklisted from - pipelining */ - CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12), - - /* maximum number of open connections in total */ - CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13), - - /* This is the server push callback function pointer */ - CINIT(PUSHFUNCTION, FUNCTIONPOINT, 14), - - /* This is the argument passed to the server push callback */ - CINIT(PUSHDATA, OBJECTPOINT, 15), - - CURLMOPT_LASTENTRY /* the last unused */ -} CURLMoption; - - -/* - * Name: curl_multi_setopt() - * - * Desc: Sets options for the multi handle. - * - * Returns: CURLM error code. - */ -CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle, - CURLMoption option, ...); - - -/* - * Name: curl_multi_assign() - * - * Desc: This function sets an association in the multi handle between the - * given socket and a private pointer of the application. This is - * (only) useful for curl_multi_socket uses. - * - * Returns: CURLM error code. - */ -CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle, - curl_socket_t sockfd, void *sockp); - - -/* - * Name: curl_push_callback - * - * Desc: This callback gets called when a new stream is being pushed by the - * server. It approves or denies the new stream. - * - * Returns: CURL_PUSH_OK or CURL_PUSH_DENY. - */ -#define CURL_PUSH_OK 0 -#define CURL_PUSH_DENY 1 - -struct curl_pushheaders; /* forward declaration only */ - -CURL_EXTERN char *curl_pushheader_bynum(struct curl_pushheaders *h, - size_t num); -CURL_EXTERN char *curl_pushheader_byname(struct curl_pushheaders *h, - const char *name); - -typedef int (*curl_push_callback)(CURL *parent, - CURL *easy, - size_t num_headers, - struct curl_pushheaders *headers, - void *userp); - -#ifdef __cplusplus -} /* end of extern "C" */ -#endif - -#endif diff --git a/deps/libcurl/include/curl/stdcheaders.h b/deps/libcurl/include/curl/stdcheaders.h deleted file mode 100644 index ad82ef6335d6167aecf8b9e49d68c462ea028bfd..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/stdcheaders.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __STDC_HEADERS_H -#define __STDC_HEADERS_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2010, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -#include - -size_t fread (void *, size_t, size_t, FILE *); -size_t fwrite (const void *, size_t, size_t, FILE *); - -int strcasecmp(const char *, const char *); -int strncasecmp(const char *, const char *, size_t); - -#endif /* __STDC_HEADERS_H */ diff --git a/deps/libcurl/include/curl/typecheck-gcc.h b/deps/libcurl/include/curl/typecheck-gcc.h deleted file mode 100644 index 2e24db0ff59c40b137ec89a9d8a3f571ed4791b2..0000000000000000000000000000000000000000 --- a/deps/libcurl/include/curl/typecheck-gcc.h +++ /dev/null @@ -1,622 +0,0 @@ -#ifndef __CURL_TYPECHECK_GCC_H -#define __CURL_TYPECHECK_GCC_H -/*************************************************************************** - * _ _ ____ _ - * Project ___| | | | _ \| | - * / __| | | | |_) | | - * | (__| |_| | _ <| |___ - * \___|\___/|_| \_\_____| - * - * Copyright (C) 1998 - 2015, Daniel Stenberg, , et al. - * - * This software is licensed as described in the file COPYING, which - * you should have received as part of this distribution. The terms - * are also available at http://curl.haxx.se/docs/copyright.html. - * - * You may opt to use, copy, modify, merge, publish, distribute and/or sell - * copies of the Software, and permit persons to whom the Software is - * furnished to do so, under the terms of the COPYING file. - * - * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY - * KIND, either express or implied. - * - ***************************************************************************/ - -/* wraps curl_easy_setopt() with typechecking */ - -/* To add a new kind of warning, add an - * if(_curl_is_sometype_option(_curl_opt)) - * if(!_curl_is_sometype(value)) - * _curl_easy_setopt_err_sometype(); - * block and define _curl_is_sometype_option, _curl_is_sometype and - * _curl_easy_setopt_err_sometype below - * - * NOTE: We use two nested 'if' statements here instead of the && operator, in - * order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x - * when compiling with -Wlogical-op. - * - * To add an option that uses the same type as an existing option, you'll just - * need to extend the appropriate _curl_*_option macro - */ -#define curl_easy_setopt(handle, option, value) \ -__extension__ ({ \ - __typeof__ (option) _curl_opt = option; \ - if(__builtin_constant_p(_curl_opt)) { \ - if(_curl_is_long_option(_curl_opt)) \ - if(!_curl_is_long(value)) \ - _curl_easy_setopt_err_long(); \ - if(_curl_is_off_t_option(_curl_opt)) \ - if(!_curl_is_off_t(value)) \ - _curl_easy_setopt_err_curl_off_t(); \ - if(_curl_is_string_option(_curl_opt)) \ - if(!_curl_is_string(value)) \ - _curl_easy_setopt_err_string(); \ - if(_curl_is_write_cb_option(_curl_opt)) \ - if(!_curl_is_write_cb(value)) \ - _curl_easy_setopt_err_write_callback(); \ - if((_curl_opt) == CURLOPT_READFUNCTION) \ - if(!_curl_is_read_cb(value)) \ - _curl_easy_setopt_err_read_cb(); \ - if((_curl_opt) == CURLOPT_IOCTLFUNCTION) \ - if(!_curl_is_ioctl_cb(value)) \ - _curl_easy_setopt_err_ioctl_cb(); \ - if((_curl_opt) == CURLOPT_SOCKOPTFUNCTION) \ - if(!_curl_is_sockopt_cb(value)) \ - _curl_easy_setopt_err_sockopt_cb(); \ - if((_curl_opt) == CURLOPT_OPENSOCKETFUNCTION) \ - if(!_curl_is_opensocket_cb(value)) \ - _curl_easy_setopt_err_opensocket_cb(); \ - if((_curl_opt) == CURLOPT_PROGRESSFUNCTION) \ - if(!_curl_is_progress_cb(value)) \ - _curl_easy_setopt_err_progress_cb(); \ - if((_curl_opt) == CURLOPT_DEBUGFUNCTION) \ - if(!_curl_is_debug_cb(value)) \ - _curl_easy_setopt_err_debug_cb(); \ - if((_curl_opt) == CURLOPT_SSL_CTX_FUNCTION) \ - if(!_curl_is_ssl_ctx_cb(value)) \ - _curl_easy_setopt_err_ssl_ctx_cb(); \ - if(_curl_is_conv_cb_option(_curl_opt)) \ - if(!_curl_is_conv_cb(value)) \ - _curl_easy_setopt_err_conv_cb(); \ - if((_curl_opt) == CURLOPT_SEEKFUNCTION) \ - if(!_curl_is_seek_cb(value)) \ - _curl_easy_setopt_err_seek_cb(); \ - if(_curl_is_cb_data_option(_curl_opt)) \ - if(!_curl_is_cb_data(value)) \ - _curl_easy_setopt_err_cb_data(); \ - if((_curl_opt) == CURLOPT_ERRORBUFFER) \ - if(!_curl_is_error_buffer(value)) \ - _curl_easy_setopt_err_error_buffer(); \ - if((_curl_opt) == CURLOPT_STDERR) \ - if(!_curl_is_FILE(value)) \ - _curl_easy_setopt_err_FILE(); \ - if(_curl_is_postfields_option(_curl_opt)) \ - if(!_curl_is_postfields(value)) \ - _curl_easy_setopt_err_postfields(); \ - if((_curl_opt) == CURLOPT_HTTPPOST) \ - if(!_curl_is_arr((value), struct curl_httppost)) \ - _curl_easy_setopt_err_curl_httpost(); \ - if(_curl_is_slist_option(_curl_opt)) \ - if(!_curl_is_arr((value), struct curl_slist)) \ - _curl_easy_setopt_err_curl_slist(); \ - if((_curl_opt) == CURLOPT_SHARE) \ - if(!_curl_is_ptr((value), CURLSH)) \ - _curl_easy_setopt_err_CURLSH(); \ - } \ - curl_easy_setopt(handle, _curl_opt, value); \ -}) - -/* wraps curl_easy_getinfo() with typechecking */ -/* FIXME: don't allow const pointers */ -#define curl_easy_getinfo(handle, info, arg) \ -__extension__ ({ \ - __typeof__ (info) _curl_info = info; \ - if(__builtin_constant_p(_curl_info)) { \ - if(_curl_is_string_info(_curl_info)) \ - if(!_curl_is_arr((arg), char *)) \ - _curl_easy_getinfo_err_string(); \ - if(_curl_is_long_info(_curl_info)) \ - if(!_curl_is_arr((arg), long)) \ - _curl_easy_getinfo_err_long(); \ - if(_curl_is_double_info(_curl_info)) \ - if(!_curl_is_arr((arg), double)) \ - _curl_easy_getinfo_err_double(); \ - if(_curl_is_slist_info(_curl_info)) \ - if(!_curl_is_arr((arg), struct curl_slist *)) \ - _curl_easy_getinfo_err_curl_slist(); \ - } \ - curl_easy_getinfo(handle, _curl_info, arg); \ -}) - -/* TODO: typechecking for curl_share_setopt() and curl_multi_setopt(), - * for now just make sure that the functions are called with three - * arguments - */ -#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param) -#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param) - - -/* the actual warnings, triggered by calling the _curl_easy_setopt_err* - * functions */ - -/* To define a new warning, use _CURL_WARNING(identifier, "message") */ -#define _CURL_WARNING(id, message) \ - static void __attribute__((__warning__(message))) \ - __attribute__((__unused__)) __attribute__((__noinline__)) \ - id(void) { __asm__(""); } - -_CURL_WARNING(_curl_easy_setopt_err_long, - "curl_easy_setopt expects a long argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_curl_off_t, - "curl_easy_setopt expects a curl_off_t argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_string, - "curl_easy_setopt expects a " - "string (char* or char[]) argument for this option" - ) -_CURL_WARNING(_curl_easy_setopt_err_write_callback, - "curl_easy_setopt expects a curl_write_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_read_cb, - "curl_easy_setopt expects a curl_read_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_ioctl_cb, - "curl_easy_setopt expects a curl_ioctl_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_sockopt_cb, - "curl_easy_setopt expects a curl_sockopt_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_opensocket_cb, - "curl_easy_setopt expects a " - "curl_opensocket_callback argument for this option" - ) -_CURL_WARNING(_curl_easy_setopt_err_progress_cb, - "curl_easy_setopt expects a curl_progress_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_debug_cb, - "curl_easy_setopt expects a curl_debug_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_ssl_ctx_cb, - "curl_easy_setopt expects a curl_ssl_ctx_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_conv_cb, - "curl_easy_setopt expects a curl_conv_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_seek_cb, - "curl_easy_setopt expects a curl_seek_callback argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_cb_data, - "curl_easy_setopt expects a " - "private data pointer as argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_error_buffer, - "curl_easy_setopt expects a " - "char buffer of CURL_ERROR_SIZE as argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_FILE, - "curl_easy_setopt expects a FILE* argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_postfields, - "curl_easy_setopt expects a void* or char* argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_curl_httpost, - "curl_easy_setopt expects a struct curl_httppost* argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_curl_slist, - "curl_easy_setopt expects a struct curl_slist* argument for this option") -_CURL_WARNING(_curl_easy_setopt_err_CURLSH, - "curl_easy_setopt expects a CURLSH* argument for this option") - -_CURL_WARNING(_curl_easy_getinfo_err_string, - "curl_easy_getinfo expects a pointer to char * for this info") -_CURL_WARNING(_curl_easy_getinfo_err_long, - "curl_easy_getinfo expects a pointer to long for this info") -_CURL_WARNING(_curl_easy_getinfo_err_double, - "curl_easy_getinfo expects a pointer to double for this info") -_CURL_WARNING(_curl_easy_getinfo_err_curl_slist, - "curl_easy_getinfo expects a pointer to struct curl_slist * for this info") - -/* groups of curl_easy_setops options that take the same type of argument */ - -/* To add a new option to one of the groups, just add - * (option) == CURLOPT_SOMETHING - * to the or-expression. If the option takes a long or curl_off_t, you don't - * have to do anything - */ - -/* evaluates to true if option takes a long argument */ -#define _curl_is_long_option(option) \ - (0 < (option) && (option) < CURLOPTTYPE_OBJECTPOINT) - -#define _curl_is_off_t_option(option) \ - ((option) > CURLOPTTYPE_OFF_T) - -/* evaluates to true if option takes a char* argument */ -#define _curl_is_string_option(option) \ - ((option) == CURLOPT_ACCEPT_ENCODING || \ - (option) == CURLOPT_CAINFO || \ - (option) == CURLOPT_CAPATH || \ - (option) == CURLOPT_COOKIE || \ - (option) == CURLOPT_COOKIEFILE || \ - (option) == CURLOPT_COOKIEJAR || \ - (option) == CURLOPT_COOKIELIST || \ - (option) == CURLOPT_CRLFILE || \ - (option) == CURLOPT_CUSTOMREQUEST || \ - (option) == CURLOPT_DEFAULT_PROTOCOL || \ - (option) == CURLOPT_DNS_INTERFACE || \ - (option) == CURLOPT_DNS_LOCAL_IP4 || \ - (option) == CURLOPT_DNS_LOCAL_IP6 || \ - (option) == CURLOPT_DNS_SERVERS || \ - (option) == CURLOPT_EGDSOCKET || \ - (option) == CURLOPT_FTPPORT || \ - (option) == CURLOPT_FTP_ACCOUNT || \ - (option) == CURLOPT_FTP_ALTERNATIVE_TO_USER || \ - (option) == CURLOPT_INTERFACE || \ - (option) == CURLOPT_ISSUERCERT || \ - (option) == CURLOPT_KEYPASSWD || \ - (option) == CURLOPT_KRBLEVEL || \ - (option) == CURLOPT_LOGIN_OPTIONS || \ - (option) == CURLOPT_MAIL_AUTH || \ - (option) == CURLOPT_MAIL_FROM || \ - (option) == CURLOPT_NETRC_FILE || \ - (option) == CURLOPT_NOPROXY || \ - (option) == CURLOPT_PASSWORD || \ - (option) == CURLOPT_PINNEDPUBLICKEY || \ - (option) == CURLOPT_PROXY || \ - (option) == CURLOPT_PROXYPASSWORD || \ - (option) == CURLOPT_PROXYUSERNAME || \ - (option) == CURLOPT_PROXYUSERPWD || \ - (option) == CURLOPT_PROXY_SERVICE_NAME || \ - (option) == CURLOPT_RANDOM_FILE || \ - (option) == CURLOPT_RANGE || \ - (option) == CURLOPT_REFERER || \ - (option) == CURLOPT_RTSP_SESSION_ID || \ - (option) == CURLOPT_RTSP_STREAM_URI || \ - (option) == CURLOPT_RTSP_TRANSPORT || \ - (option) == CURLOPT_SERVICE_NAME || \ - (option) == CURLOPT_SOCKS5_GSSAPI_SERVICE || \ - (option) == CURLOPT_SSH_HOST_PUBLIC_KEY_MD5 || \ - (option) == CURLOPT_SSH_KNOWNHOSTS || \ - (option) == CURLOPT_SSH_PRIVATE_KEYFILE || \ - (option) == CURLOPT_SSH_PUBLIC_KEYFILE || \ - (option) == CURLOPT_SSLCERT || \ - (option) == CURLOPT_SSLCERTTYPE || \ - (option) == CURLOPT_SSLENGINE || \ - (option) == CURLOPT_SSLKEY || \ - (option) == CURLOPT_SSLKEYTYPE || \ - (option) == CURLOPT_SSL_CIPHER_LIST || \ - (option) == CURLOPT_TLSAUTH_PASSWORD || \ - (option) == CURLOPT_TLSAUTH_TYPE || \ - (option) == CURLOPT_TLSAUTH_USERNAME || \ - (option) == CURLOPT_UNIX_SOCKET_PATH || \ - (option) == CURLOPT_URL || \ - (option) == CURLOPT_USERAGENT || \ - (option) == CURLOPT_USERNAME || \ - (option) == CURLOPT_USERPWD || \ - (option) == CURLOPT_XOAUTH2_BEARER || \ - 0) - -/* evaluates to true if option takes a curl_write_callback argument */ -#define _curl_is_write_cb_option(option) \ - ((option) == CURLOPT_HEADERFUNCTION || \ - (option) == CURLOPT_WRITEFUNCTION) - -/* evaluates to true if option takes a curl_conv_callback argument */ -#define _curl_is_conv_cb_option(option) \ - ((option) == CURLOPT_CONV_TO_NETWORK_FUNCTION || \ - (option) == CURLOPT_CONV_FROM_NETWORK_FUNCTION || \ - (option) == CURLOPT_CONV_FROM_UTF8_FUNCTION) - -/* evaluates to true if option takes a data argument to pass to a callback */ -#define _curl_is_cb_data_option(option) \ - ((option) == CURLOPT_CHUNK_DATA || \ - (option) == CURLOPT_CLOSESOCKETDATA || \ - (option) == CURLOPT_DEBUGDATA || \ - (option) == CURLOPT_FNMATCH_DATA || \ - (option) == CURLOPT_HEADERDATA || \ - (option) == CURLOPT_INTERLEAVEDATA || \ - (option) == CURLOPT_IOCTLDATA || \ - (option) == CURLOPT_OPENSOCKETDATA || \ - (option) == CURLOPT_PRIVATE || \ - (option) == CURLOPT_PROGRESSDATA || \ - (option) == CURLOPT_READDATA || \ - (option) == CURLOPT_SEEKDATA || \ - (option) == CURLOPT_SOCKOPTDATA || \ - (option) == CURLOPT_SSH_KEYDATA || \ - (option) == CURLOPT_SSL_CTX_DATA || \ - (option) == CURLOPT_WRITEDATA || \ - 0) - -/* evaluates to true if option takes a POST data argument (void* or char*) */ -#define _curl_is_postfields_option(option) \ - ((option) == CURLOPT_POSTFIELDS || \ - (option) == CURLOPT_COPYPOSTFIELDS || \ - 0) - -/* evaluates to true if option takes a struct curl_slist * argument */ -#define _curl_is_slist_option(option) \ - ((option) == CURLOPT_HTTP200ALIASES || \ - (option) == CURLOPT_HTTPHEADER || \ - (option) == CURLOPT_MAIL_RCPT || \ - (option) == CURLOPT_POSTQUOTE || \ - (option) == CURLOPT_PREQUOTE || \ - (option) == CURLOPT_PROXYHEADER || \ - (option) == CURLOPT_QUOTE || \ - (option) == CURLOPT_RESOLVE || \ - (option) == CURLOPT_TELNETOPTIONS || \ - 0) - -/* groups of curl_easy_getinfo infos that take the same type of argument */ - -/* evaluates to true if info expects a pointer to char * argument */ -#define _curl_is_string_info(info) \ - (CURLINFO_STRING < (info) && (info) < CURLINFO_LONG) - -/* evaluates to true if info expects a pointer to long argument */ -#define _curl_is_long_info(info) \ - (CURLINFO_LONG < (info) && (info) < CURLINFO_DOUBLE) - -/* evaluates to true if info expects a pointer to double argument */ -#define _curl_is_double_info(info) \ - (CURLINFO_DOUBLE < (info) && (info) < CURLINFO_SLIST) - -/* true if info expects a pointer to struct curl_slist * argument */ -#define _curl_is_slist_info(info) \ - (CURLINFO_SLIST < (info)) - - -/* typecheck helpers -- check whether given expression has requested type*/ - -/* For pointers, you can use the _curl_is_ptr/_curl_is_arr macros, - * otherwise define a new macro. Search for __builtin_types_compatible_p - * in the GCC manual. - * NOTE: these macros MUST NOT EVALUATE their arguments! The argument is - * the actual expression passed to the curl_easy_setopt macro. This - * means that you can only apply the sizeof and __typeof__ operators, no - * == or whatsoever. - */ - -/* XXX: should evaluate to true iff expr is a pointer */ -#define _curl_is_any_ptr(expr) \ - (sizeof(expr) == sizeof(void*)) - -/* evaluates to true if expr is NULL */ -/* XXX: must not evaluate expr, so this check is not accurate */ -#define _curl_is_NULL(expr) \ - (__builtin_types_compatible_p(__typeof__(expr), __typeof__(NULL))) - -/* evaluates to true if expr is type*, const type* or NULL */ -#define _curl_is_ptr(expr, type) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), type *) || \ - __builtin_types_compatible_p(__typeof__(expr), const type *)) - -/* evaluates to true if expr is one of type[], type*, NULL or const type* */ -#define _curl_is_arr(expr, type) \ - (_curl_is_ptr((expr), type) || \ - __builtin_types_compatible_p(__typeof__(expr), type [])) - -/* evaluates to true if expr is a string */ -#define _curl_is_string(expr) \ - (_curl_is_arr((expr), char) || \ - _curl_is_arr((expr), signed char) || \ - _curl_is_arr((expr), unsigned char)) - -/* evaluates to true if expr is a long (no matter the signedness) - * XXX: for now, int is also accepted (and therefore short and char, which - * are promoted to int when passed to a variadic function) */ -#define _curl_is_long(expr) \ - (__builtin_types_compatible_p(__typeof__(expr), long) || \ - __builtin_types_compatible_p(__typeof__(expr), signed long) || \ - __builtin_types_compatible_p(__typeof__(expr), unsigned long) || \ - __builtin_types_compatible_p(__typeof__(expr), int) || \ - __builtin_types_compatible_p(__typeof__(expr), signed int) || \ - __builtin_types_compatible_p(__typeof__(expr), unsigned int) || \ - __builtin_types_compatible_p(__typeof__(expr), short) || \ - __builtin_types_compatible_p(__typeof__(expr), signed short) || \ - __builtin_types_compatible_p(__typeof__(expr), unsigned short) || \ - __builtin_types_compatible_p(__typeof__(expr), char) || \ - __builtin_types_compatible_p(__typeof__(expr), signed char) || \ - __builtin_types_compatible_p(__typeof__(expr), unsigned char)) - -/* evaluates to true if expr is of type curl_off_t */ -#define _curl_is_off_t(expr) \ - (__builtin_types_compatible_p(__typeof__(expr), curl_off_t)) - -/* evaluates to true if expr is abuffer suitable for CURLOPT_ERRORBUFFER */ -/* XXX: also check size of an char[] array? */ -#define _curl_is_error_buffer(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), char *) || \ - __builtin_types_compatible_p(__typeof__(expr), char[])) - -/* evaluates to true if expr is of type (const) void* or (const) FILE* */ -#if 0 -#define _curl_is_cb_data(expr) \ - (_curl_is_ptr((expr), void) || \ - _curl_is_ptr((expr), FILE)) -#else /* be less strict */ -#define _curl_is_cb_data(expr) \ - _curl_is_any_ptr(expr) -#endif - -/* evaluates to true if expr is of type FILE* */ -#define _curl_is_FILE(expr) \ - (__builtin_types_compatible_p(__typeof__(expr), FILE *)) - -/* evaluates to true if expr can be passed as POST data (void* or char*) */ -#define _curl_is_postfields(expr) \ - (_curl_is_ptr((expr), void) || \ - _curl_is_arr((expr), char)) - -/* FIXME: the whole callback checking is messy... - * The idea is to tolerate char vs. void and const vs. not const - * pointers in arguments at least - */ -/* helper: __builtin_types_compatible_p distinguishes between functions and - * function pointers, hide it */ -#define _curl_callback_compatible(func, type) \ - (__builtin_types_compatible_p(__typeof__(func), type) || \ - __builtin_types_compatible_p(__typeof__(func), type*)) - -/* evaluates to true if expr is of type curl_read_callback or "similar" */ -#define _curl_is_read_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), __typeof__(fread)) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_read_callback) || \ - _curl_callback_compatible((expr), _curl_read_callback1) || \ - _curl_callback_compatible((expr), _curl_read_callback2) || \ - _curl_callback_compatible((expr), _curl_read_callback3) || \ - _curl_callback_compatible((expr), _curl_read_callback4) || \ - _curl_callback_compatible((expr), _curl_read_callback5) || \ - _curl_callback_compatible((expr), _curl_read_callback6)) -typedef size_t (_curl_read_callback1)(char *, size_t, size_t, void*); -typedef size_t (_curl_read_callback2)(char *, size_t, size_t, const void*); -typedef size_t (_curl_read_callback3)(char *, size_t, size_t, FILE*); -typedef size_t (_curl_read_callback4)(void *, size_t, size_t, void*); -typedef size_t (_curl_read_callback5)(void *, size_t, size_t, const void*); -typedef size_t (_curl_read_callback6)(void *, size_t, size_t, FILE*); - -/* evaluates to true if expr is of type curl_write_callback or "similar" */ -#define _curl_is_write_cb(expr) \ - (_curl_is_read_cb(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), __typeof__(fwrite)) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_write_callback) || \ - _curl_callback_compatible((expr), _curl_write_callback1) || \ - _curl_callback_compatible((expr), _curl_write_callback2) || \ - _curl_callback_compatible((expr), _curl_write_callback3) || \ - _curl_callback_compatible((expr), _curl_write_callback4) || \ - _curl_callback_compatible((expr), _curl_write_callback5) || \ - _curl_callback_compatible((expr), _curl_write_callback6)) -typedef size_t (_curl_write_callback1)(const char *, size_t, size_t, void*); -typedef size_t (_curl_write_callback2)(const char *, size_t, size_t, - const void*); -typedef size_t (_curl_write_callback3)(const char *, size_t, size_t, FILE*); -typedef size_t (_curl_write_callback4)(const void *, size_t, size_t, void*); -typedef size_t (_curl_write_callback5)(const void *, size_t, size_t, - const void*); -typedef size_t (_curl_write_callback6)(const void *, size_t, size_t, FILE*); - -/* evaluates to true if expr is of type curl_ioctl_callback or "similar" */ -#define _curl_is_ioctl_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_ioctl_callback) || \ - _curl_callback_compatible((expr), _curl_ioctl_callback1) || \ - _curl_callback_compatible((expr), _curl_ioctl_callback2) || \ - _curl_callback_compatible((expr), _curl_ioctl_callback3) || \ - _curl_callback_compatible((expr), _curl_ioctl_callback4)) -typedef curlioerr (_curl_ioctl_callback1)(CURL *, int, void*); -typedef curlioerr (_curl_ioctl_callback2)(CURL *, int, const void*); -typedef curlioerr (_curl_ioctl_callback3)(CURL *, curliocmd, void*); -typedef curlioerr (_curl_ioctl_callback4)(CURL *, curliocmd, const void*); - -/* evaluates to true if expr is of type curl_sockopt_callback or "similar" */ -#define _curl_is_sockopt_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_sockopt_callback) || \ - _curl_callback_compatible((expr), _curl_sockopt_callback1) || \ - _curl_callback_compatible((expr), _curl_sockopt_callback2)) -typedef int (_curl_sockopt_callback1)(void *, curl_socket_t, curlsocktype); -typedef int (_curl_sockopt_callback2)(const void *, curl_socket_t, - curlsocktype); - -/* evaluates to true if expr is of type curl_opensocket_callback or - "similar" */ -#define _curl_is_opensocket_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_opensocket_callback) ||\ - _curl_callback_compatible((expr), _curl_opensocket_callback1) || \ - _curl_callback_compatible((expr), _curl_opensocket_callback2) || \ - _curl_callback_compatible((expr), _curl_opensocket_callback3) || \ - _curl_callback_compatible((expr), _curl_opensocket_callback4)) -typedef curl_socket_t (_curl_opensocket_callback1) - (void *, curlsocktype, struct curl_sockaddr *); -typedef curl_socket_t (_curl_opensocket_callback2) - (void *, curlsocktype, const struct curl_sockaddr *); -typedef curl_socket_t (_curl_opensocket_callback3) - (const void *, curlsocktype, struct curl_sockaddr *); -typedef curl_socket_t (_curl_opensocket_callback4) - (const void *, curlsocktype, const struct curl_sockaddr *); - -/* evaluates to true if expr is of type curl_progress_callback or "similar" */ -#define _curl_is_progress_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_progress_callback) || \ - _curl_callback_compatible((expr), _curl_progress_callback1) || \ - _curl_callback_compatible((expr), _curl_progress_callback2)) -typedef int (_curl_progress_callback1)(void *, - double, double, double, double); -typedef int (_curl_progress_callback2)(const void *, - double, double, double, double); - -/* evaluates to true if expr is of type curl_debug_callback or "similar" */ -#define _curl_is_debug_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_debug_callback) || \ - _curl_callback_compatible((expr), _curl_debug_callback1) || \ - _curl_callback_compatible((expr), _curl_debug_callback2) || \ - _curl_callback_compatible((expr), _curl_debug_callback3) || \ - _curl_callback_compatible((expr), _curl_debug_callback4) || \ - _curl_callback_compatible((expr), _curl_debug_callback5) || \ - _curl_callback_compatible((expr), _curl_debug_callback6) || \ - _curl_callback_compatible((expr), _curl_debug_callback7) || \ - _curl_callback_compatible((expr), _curl_debug_callback8)) -typedef int (_curl_debug_callback1) (CURL *, - curl_infotype, char *, size_t, void *); -typedef int (_curl_debug_callback2) (CURL *, - curl_infotype, char *, size_t, const void *); -typedef int (_curl_debug_callback3) (CURL *, - curl_infotype, const char *, size_t, void *); -typedef int (_curl_debug_callback4) (CURL *, - curl_infotype, const char *, size_t, const void *); -typedef int (_curl_debug_callback5) (CURL *, - curl_infotype, unsigned char *, size_t, void *); -typedef int (_curl_debug_callback6) (CURL *, - curl_infotype, unsigned char *, size_t, const void *); -typedef int (_curl_debug_callback7) (CURL *, - curl_infotype, const unsigned char *, size_t, void *); -typedef int (_curl_debug_callback8) (CURL *, - curl_infotype, const unsigned char *, size_t, const void *); - -/* evaluates to true if expr is of type curl_ssl_ctx_callback or "similar" */ -/* this is getting even messier... */ -#define _curl_is_ssl_ctx_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_ssl_ctx_callback) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback1) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback2) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback3) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback4) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback5) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback6) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback7) || \ - _curl_callback_compatible((expr), _curl_ssl_ctx_callback8)) -typedef CURLcode (_curl_ssl_ctx_callback1)(CURL *, void *, void *); -typedef CURLcode (_curl_ssl_ctx_callback2)(CURL *, void *, const void *); -typedef CURLcode (_curl_ssl_ctx_callback3)(CURL *, const void *, void *); -typedef CURLcode (_curl_ssl_ctx_callback4)(CURL *, const void *, const void *); -#ifdef HEADER_SSL_H -/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX - * this will of course break if we're included before OpenSSL headers... - */ -typedef CURLcode (_curl_ssl_ctx_callback5)(CURL *, SSL_CTX, void *); -typedef CURLcode (_curl_ssl_ctx_callback6)(CURL *, SSL_CTX, const void *); -typedef CURLcode (_curl_ssl_ctx_callback7)(CURL *, const SSL_CTX, void *); -typedef CURLcode (_curl_ssl_ctx_callback8)(CURL *, const SSL_CTX, - const void *); -#else -typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback5; -typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback6; -typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback7; -typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback8; -#endif - -/* evaluates to true if expr is of type curl_conv_callback or "similar" */ -#define _curl_is_conv_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_conv_callback) || \ - _curl_callback_compatible((expr), _curl_conv_callback1) || \ - _curl_callback_compatible((expr), _curl_conv_callback2) || \ - _curl_callback_compatible((expr), _curl_conv_callback3) || \ - _curl_callback_compatible((expr), _curl_conv_callback4)) -typedef CURLcode (*_curl_conv_callback1)(char *, size_t length); -typedef CURLcode (*_curl_conv_callback2)(const char *, size_t length); -typedef CURLcode (*_curl_conv_callback3)(void *, size_t length); -typedef CURLcode (*_curl_conv_callback4)(const void *, size_t length); - -/* evaluates to true if expr is of type curl_seek_callback or "similar" */ -#define _curl_is_seek_cb(expr) \ - (_curl_is_NULL(expr) || \ - __builtin_types_compatible_p(__typeof__(expr), curl_seek_callback) || \ - _curl_callback_compatible((expr), _curl_seek_callback1) || \ - _curl_callback_compatible((expr), _curl_seek_callback2)) -typedef CURLcode (*_curl_seek_callback1)(void *, curl_off_t, int); -typedef CURLcode (*_curl_seek_callback2)(const void *, curl_off_t, int); - - -#endif /* __CURL_TYPECHECK_GCC_H */ diff --git a/deps/libcurl/lib/libcurl.a b/deps/libcurl/lib/libcurl.a deleted file mode 100644 index c63ac70946dddefb5b3b5c55cf82d693b731cad6..0000000000000000000000000000000000000000 Binary files a/deps/libcurl/lib/libcurl.a and /dev/null differ diff --git a/deps/pthread/CMakeLists.txt b/deps/pthread/CMakeLists.txt index dcd9ed035814ac9efd66340b58b287ccbabe61df..04e5be7472a9b8cbdb384348697b919bf2dd0ece 100644 --- a/deps/pthread/CMakeLists.txt +++ b/deps/pthread/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/regex/CMakeLists.txt b/deps/regex/CMakeLists.txt index c7e983b992fe8a2dd21b8420e652358976663a37..054b093d07c386d7ff9b0ffc4c05909d79b33129 100644 --- a/deps/regex/CMakeLists.txt +++ b/deps/regex/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/wepoll/CMakeLists.txt b/deps/wepoll/CMakeLists.txt index a8b34112215fae0df7bcac034622015fd72d337b..a81fd782bbc4b05a1158273a7fcc6701bc4d980d 100644 --- a/deps/wepoll/CMakeLists.txt +++ b/deps/wepoll/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt index a8750471d694bcfa98b711348861449e3fc7b23a..f83aa70085491fb6575c0a6bf93252192cddd040 100644 --- a/deps/zlib-1.2.11/CMakeLists.txt +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/documentation/tdenginedocs-en/faq/index.html b/documentation/tdenginedocs-en/faq/index.html index a2fb8ba68ba909c2ad8273e6c5b8aa9e4a1d109b..2d716cd6dc1b4bc233dbe3a680fa17286f651c91 100644 --- a/documentation/tdenginedocs-en/faq/index.html +++ b/documentation/tdenginedocs-en/faq/index.html @@ -1,5 +1,5 @@ Documentation | Taos Data
Back

FAQ

-

1. When encoutered with the error "failed to connect to server", what can I do?

+

1. When encountered with the error "failed to connect to server", what can I do?

The client may encounter connection errors. Please follow the steps below for troubleshooting:

  1. On the server side, execute systemctl status taosd to check the status of taosd service. If taosd is not running, start it and retry connecting.
  2. diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..c16673cba59d42ab00aa2975cfa2341ede9e5fbb --- /dev/null +++ b/documentation20/cn/00.index/docs.md @@ -0,0 +1,153 @@ +# TDengine文档 + +TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](/architecture) 与 [数据建模](/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。如需查阅TDengine 1.6 文档,请点击 [这里](https://www.taosdata.com/cn/documentation16/) 访问。 + +## [TDengine介绍](/evaluation) + +* [TDengine 简介及特色](/evaluation#intro) +* [TDengine 适用场景](/evaluation#scenes) +* [TDengine 性能指标介绍和验证方法](/evaluation#) + +## [立即开始](/getting-started) + +* [快捷安装](/getting-started#install):可通过源码、安装包或docker安装,三秒钟搞定 +* [轻松启动](/getting-started#start):使用systemctl 启停TDengine +* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式 +* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询 +* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表 + +## [整体架构](/architecture) + +* [数据模型](/architecture#model):关系型数据库模型,但要求每个采集点单独建表 +* [集群与基本逻辑单元](/architecture#cluster):吸取NoSQL优点,支持水平扩展,支持高可靠 +* [存储模型与数据分区、分片](/architecture#sharding):标签数据与时序数据完全分离,按vnode和时间两个维度对数据切分 +* [数据写入与复制流程](/architecture#replication):先写入WAL、之后写入缓存,再给应用确认,支持多副本 +* [缓存与持久化](/architecture#persistence):最新数据缓存在内存中,但落盘时采用列式存储、超高压缩比 +* [数据查询](/architecture#query):支持各种函数、时间轴聚合、插值、多表聚合 + +## [数据建模](/model) + +* [创建库](/model#create-db):为具有相似数据特征的数据采集点创建一个库 +* [创建超级表](/model#create-stable):为同一类型的数据采集点创建一个超级表 +* [创建表](/model#create-table):使用超级表做模板,为每一个具体的数据采集点单独建表 + +## [TAOS SQL](/taos-sql) + +* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型 +* [数据库管理](/taos-sql#management):添加、删除、查看数据库 +* [表管理](/taos-sql#table):添加、删除、查看、修改表 +* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表 +* [标签管理](/taos-sql#tags):增加、删除、修改标签 +* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入 +* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等 +* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 +* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理 +* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 +* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 + +## [高效写入数据](/insert) + +* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 +* [Prometheus写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 +* [Telegraf写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 +* [EMQ X Broker](/insert#emq):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 +* [HiveMQ Broker](/insert#hivemq):配置HiveMQ,不用任何代码,就可将MQTT数据直接写入 + +## [高效查询数据](/queries) + +* [主要查询功能](/queries#queries):支持各种标准函数,设置过滤条件,时间段查询 +* [多表聚合查询](/queries#aggregation):使用超级表,设置标签过滤条件,进行高效聚合查询 +* [降采样查询值](/queries#sampling):按时间段分段聚合,支持插值 + +## [高级功能](/advanced-features) + +* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算 +* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据 +* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取 +* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送 + +## [连接器](/connector) + +* [C/C++ Connector](/connector#c-cpp):通过libtaos客户端的库,连接TDengine服务器的主要方法 +* [Java Connector(JDBC)](/connector/java):通过标准的JDBC API,给Java应用提供到TDengine的连接 +* [Python Connector](/connector#python):给Python应用提供一个连接TDengine服务器的驱动 +* [RESTful Connector](/connector#restful):提供一最简单的连接TDengine服务器的方式 +* [Go Connector](/connector#go):给Go应用提供一个连接TDengine服务器的驱动 +* [Node.js Connector](/connector#nodejs):给node应用提供一个连接TDengine服务器的驱动 +* [C# Connector](/connector#csharp):给C#应用提供一个连接TDengine服务器的驱动 +* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 + +## [与其他工具的连接](/connections) + +* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 +* [Matlab](/connections#matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据 +* [R](/connections#r):通过配置R的JDBC数据源访问保存在TDengine的数据 +* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine + +## [TDengine集群的安装、管理](/cluster) + +* [准备工作](/cluster#prepare):部署环境前的几点注意事项 +* [创建第一个节点](/cluster#node-one):与快捷安装完全一样,非常简单 +* [创建后续节点](/cluster#node-other):配置新节点的taos.cfg, 在现有集群添加新的节点 +* [节点管理](/cluster#management):增加、删除、查看集群的节点 +* [Vnode 的高可用性](/cluster#high-availability):通过多副本的机制来提供 Vnode 的高可用性 +* [Mnode 的管理](/cluster#mnode):系统自动创建、无需任何人工干预 +* [负载均衡](/cluster#load-balancing):一旦节点个数或负载有变化,自动进行 +* [节点离线处理](/cluster#offline):节点离线超过一定时长,将从集群中剔除 +* [Arbitrator](/cluster#arbitrator):对于偶数个副本的情形,使用它可以防止split brain + +## [TDengine的运营和维护](/administrator) + +* [容量规划](/administrator#planning):根据场景,估算硬件资源 +* [容错和灾备](/administrator#tolerance):设置正确的WAL和数据副本数 +* [系统配置](/administrator#config):端口,缓存大小,文件块大小和其他系统配置 +* [用户管理](/administrator#user):添加、删除TDengine用户,修改用户密码 +* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入 +* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出 +* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等 +* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录 +* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表 + +## TDengine的技术设计 + +* [系统模块](/architecture/taosd):taosd的功能和模块划分 +* [数据复制](/architecture/replica):支持实时同步、异步复制,保证系统的High Availibility +* [技术博客](https://www.taosdata.com/cn/blog/?categories=3):更多的技术分析和架构设计文章 + +## 常用工具 + +* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html) +* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) +* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html) +* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI) +* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/alibaba/DataX) + +## TDengine与其他数据库的对比测试 + +* [用InfluxDB开源的性能测试工具对比InfluxDB和TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) +* [TDengine与OpenTSDB对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) +* [TDengine与Cassandra对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) +* [TDengine与InfluxDB对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) +* [TDengine与InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) + +## 物联网大数据 + +* [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html) +* [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html) +* [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html) +* [物联网、车联网、工业互联网大数据平台,为什么推荐使用TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html) + +## 培训和FAQ + +* [FAQ:常见问题与答案](/faq) +* [技术公开课:开源、高效的物联网大数据平台,TDengine内核技术剖析](https://www.taosdata.com/blog/2020/12/25/2126.html) +* [TDengine视频教程-快速上手](https://www.taosdata.com/blog/2020/11/11/1941.html) +* [TDengine视频教程-数据建模](https://www.taosdata.com/blog/2020/11/11/1945.html) +* [TDengine视频教程-集群搭建](https://www.taosdata.com/blog/2020/11/11/1961.html) +* [TDengine视频教程-Go Connector](https://www.taosdata.com/blog/2020/11/11/1951.html) +* [TDengine视频教程-JDBC Connector](https://www.taosdata.com/blog/2020/11/11/1955.html) +* [TDengine视频教程-NodeJS Connector](https://www.taosdata.com/blog/2020/11/11/1957.html) +* [TDengine视频教程-Python Connector](https://www.taosdata.com/blog/2020/11/11/1963.html) +* [TDengine视频教程-RESTful Connector](https://www.taosdata.com/blog/2020/11/11/1965.html) +* [TDengine视频教程-“零”代码运维监控](https://www.taosdata.com/blog/2020/11/11/1959.html) +* [应用案例:一些使用实例来解释如何使用TDengine](https://www.taosdata.com/cn/blog/?categories=4) diff --git a/documentation20/webdocs/markdowndocs/Evaluation-ch.md b/documentation20/cn/01.evaluation/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/Evaluation-ch.md rename to documentation20/cn/01.evaluation/docs.md index fa6cec6e488d144a7009dc52772987380d5065da..0ae2106ff2a63696dc8bbc51d25bbf5e811ef561 100644 --- a/documentation20/webdocs/markdowndocs/Evaluation-ch.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -1,6 +1,6 @@ # TDengine 介绍 -## TDengine 简介 +## TDengine 简介 TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。 @@ -15,10 +15,11 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 -
    +![TDengine技术生态图](page://images/eco_system.png)
    图 1. TDengine技术生态图
    -## TDengine 总体适用场景 + +## TDengine 总体适用场景 作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。 diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/cn/02.getting-started/docs.md similarity index 91% rename from documentation20/webdocs/markdowndocs/Getting Started-ch.md rename to documentation20/cn/02.getting-started/docs.md index b8b298b9501a43396e134eb1efa9ba6e6a029b79..c9c82942f3bd47fa774636dd5cf7c5b33f879209 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -1,10 +1,10 @@ # 立即开始 -## 快捷安装 +## 快捷安装 TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 -### 通过源码安装 +### 通过源码安装 请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装. @@ -12,17 +12,15 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html) -### 通过安装包安装 +### 通过安装包安装 TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择: -- TDengine-server-2.0.10.0-Linux-x64.rpm (4.2M) -- TDengine-server-2.0.10.0-Linux-x64.deb (2.7M) -- TDengine-server-2.0.10.0-Linux-x64.tar.gz (4.5M) +安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。 -具体的安装过程,请参见TDengine多种安装包的安装和卸载以及视频教程。 +具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 -## 轻松启动 +## 轻松启动 安装成功后,用户可使用`systemctl`命令来启动TDengine的服务进程。 @@ -52,8 +50,7 @@ $ systemctl status taosd 如果系统中不支持systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 - -## TDengine命令行程序 +## TDengine命令行程序 执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。 @@ -61,7 +58,7 @@ $ systemctl status taosd $ taos ``` -如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: +如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: ```cmd taos> @@ -117,7 +114,8 @@ taos> source ; - ctrl+c 中止正在进行中的查询 - 执行`RESET QUERY CACHE`清空本地缓存的表的schema -## TDengine 极速体验 + +## TDengine 极速体验 启动TDengine的服务,在Linux终端执行taosdemo @@ -164,7 +162,6 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); **Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。 - ## 客户端和报警模块 如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下: @@ -178,8 +175,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); - TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M) - -## 支持平台列表 +## 支持平台列表 ### TDengine服务器支持的平台列表 @@ -220,5 +216,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 -请跳转到 [连接器 ](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 +请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 diff --git a/documentation20/webdocs/markdowndocs/taosd-ch.md b/documentation20/cn/03.architecture/01.taosd/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/taosd-ch.md rename to documentation20/cn/03.architecture/01.taosd/docs.md index 08be0c163e7076b58f03ff8ea3165e902a80fe64..66d51ed2dc2ea1546ab167cad680c20b3fa9729c 100644 --- a/documentation20/webdocs/markdowndocs/taosd-ch.md +++ b/documentation20/cn/03.architecture/01.taosd/docs.md @@ -3,9 +3,10 @@ 逻辑上,TDengine系统包含dnode, taosc和App,dnode是服务器侧执行代码taosd的一个运行实例,因此taosd是TDengine的核心,本文对taosd的设计做一简单的介绍,模块内的实现细节请见其他文档。 ## 系统模块图 + taosd包含rpc, dnode, vnode, tsdb, query, cq, sync, wal, mnode, http, monitor等模块,具体如下图: -
    +![modules.png](page://images/architecture/modules.png) taosd的启动入口是dnode模块,dnode然后启动其他模块,包括可选配置的http, monitor模块。taosc或dnode之间交互的消息都是通过rpc模块进行,dnode模块根据接收到的消息类型,将消息分发到vnode或mnode的消息队列,或由dnode模块自己消费。dnode的工作线程(worker)消费消息队列里的消息,交给mnode或vnode进行处理。下面对各个模块做简要说明。 @@ -40,13 +41,14 @@ RPC模块还提供数据压缩功能,如果数据包的字节数超过系统 taosd的消息消费由dnode通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: -
    +![dnode.png](page://images/architecture/dnode.png) ## VNODE模块 vnode是一独立的数据存储查询逻辑单元,但因为一个vnode只能容许一个DB,因此vnode内部没有account, DB, user等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的TSDB,负责查询的Query, 负责数据复制的sync,负责数据库日志的的wal, 负责连续查询的cq(continuous query), 负责事件触发的流计算的event等模块,这些子模块只与vnode模块发生关系,与其他模块没有任何调用关系。模块图如下: -
    +![vnode.png](page://images/architecture/vnode.png) + vnode模块向下,与dnodeVRead,dnodeVWrite发生互动,向上,与子模块发生互动。它主要的功能有: - 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过vnode模块进行; @@ -68,30 +70,37 @@ mnode是整个系统的大脑,负责整个系统的资源调度,负责meta d mnode里还负责account, user, DB, stable, table, vgroup, dnode的创建、删除与更新。mnode不仅把这些entity的meta data保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在mnode(保存在vnode),而且子表不维护自己的schema, 而是与stable共享。为减小mnode的查询压力,taosc会缓存table、stable的schema。对于查询类的操作,各个slave mnode也可以提供,以减轻master压力。 ## TSDB模块 + TSDB模块是VNODE中的负责快速高并发地存储和读取属于该VNODE的表的元数据及采集的时序数据的引擎。除此之外,TSDB还提供了表结构的修改、表标签值的修改等功能。TSDB提供API供VNODE和Query等模块调用。TSDB中存储了两类数据,1:元数据信息;2:时序数据 ### 元数据信息 + TSDB中存储的元数据包含属于其所在的VNODE中表的类型,schema的定义等。对于超级表和超级表下的子表而言,又包含了tag的schema定义以及子表的tag值等。对于元数据信息而言,TSDB就相当于一个全内存的KV型数据库,属于该VNODE的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB还对其中的子表,按照tag的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到meta文件中。meta文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB也提供了对于元数据的修改操作,如表schema的修改,tag schema的修改以及tag值的修改等。 ### 时序数据 + 每个TSDB在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入TSDB时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。 而时序数据在写入到TSDB的数据文件时,是以列(column)的形式存储的。TSDB中的数据文件包含多个数据文件组,每个数据文件组中又包含.head、.data和.last三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB中的数据文件组是按照时间跨度进行分片的,默认是10天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在TSDB的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在.head文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head和.last文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入.data文件中,否则,会写入.last文件中,等待下次落盘时合并数据写入.data文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。 ## Query模块 + 该模块负责整体系统的查询处理。客户端调用该该模块进行SQL语法解析,并将查询或写入请求发送到vnode,同时负责针对超级表的查询进行二阶段的聚合操作。在Vnode端,该模块调用TSDB模块读取系统中存储的数据进行查询处理。Query模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0查询模块设计》。 ## SYNC模块 + 该模块实现数据的多副本复制,包括vnode与mnode的数据复制,支持异步和同步两种复制方式,以满足meta data与时序数据不同复制的需求。因为它为mnode与vnode共享,系统为mnode副本预留了一个特殊的vgroup ID:1。因此vnode group的ID是从2开始的。 -每个vnode/mnode模块实例会有一对应的sync模块实例,他们是一一对应的。详细设计请见TDengine 2.0 数据复制模块设计 +每个vnode/mnode模块实例会有一对应的sync模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](https://www.taosdata.com/cn/documentation/architecture/replica/) ## WAL模块 + 该模块负责将新插入的数据写入write ahead log(WAL), 为vnode, mnode共享。以保证服务器crash或其他故障,能从WAL中恢复数据。 每个vnode/mnode模块实例会有一对应的wal模块实例,是完全一一对应的。WAL的落盘操作由两个参数walLevel, fsync控制。看具体场景,如果要100%保证数据不会丢失,需要将walLevel配置为2,fsync设置为0,每条数据插入请求,都会实时落盘后,才会给应用确认 ## HTTP模块 + 该模块负责处理系统对外的RESTful接口,可以通过配置,由dnode启动或停止。 该模块将接收到的RESTful请求,做了各种合法性检查后,将其变成标准的SQL语句,通过taosc的异步接口,将请求发往整个系统中的任一dnode。收到处理后的结果后,再翻译成HTTP协议,返回给应用。 @@ -99,6 +108,7 @@ TSDB中存储的元数据包含属于其所在的VNODE中表的类型,schema 如果HTTP模块启动,就意味着启动了一个taosc的实例。任一一个dnode都可以启动该模块,以实现对RESTful请求的分布式处理。 ## Monitor模块 + 该模块负责检测一个dnode的运行状态,可以通过配置,由dnode启动或停止。原则上,每个dnode都应该启动一个monitor实例。 Monitor采集TDengine里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集CPU、内存、网络等资源的使用情况(采集周期由系统配置参数monitorInterval控制)。获得这些数据后,monitor模块将采集的数据写入系统的日志库(DB名字由系统配置参数monitorDbName控制)。 diff --git a/documentation20/webdocs/markdowndocs/replica-ch.md b/documentation20/cn/03.architecture/02.replica/docs.md similarity index 95% rename from documentation20/webdocs/markdowndocs/replica-ch.md rename to documentation20/cn/03.architecture/02.replica/docs.md index 4d714fb5502534521e47d761e24af67b31248f51..8e1b1e3ab1513fbeaa5b9b805263485a13483b9b 100644 --- a/documentation20/webdocs/markdowndocs/replica-ch.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -10,13 +10,13 @@ TDengine面向的是物联网场景,需要支持数据的实时复制,来最 数据复制是与数据存储(写入、读取)密切相关的,但两者又是相对独立,可以完全脱耦的。在TDengine系统中,有两种不同类型的数据,一种是时序数据,由TSDB模块负责;一种是元数据(Meta Data), 由MNODE负责。这两种性质不同的数据都需要同步功能。数据复制模块通过不同的实例启动配置参数,为这两种类型数据都提供同步功能。 -在阅读本文之前,请先阅读《TDengine 2.0 整体架构》,了解TDengine的集群设计和基本概念 +在阅读本文之前,请先阅读《[TDengine 2.0 整体架构](https://www.taosdata.com/cn/documentation/architecture/)》,了解TDengine的集群设计和基本概念 特别注明:本文中提到数据更新操作包括数据的增加、删除与修改。 ## 基本概念和定义 -TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgoup也指mnode group, 除非特别注明。 +TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgroup也指mnode group, 除非特别注明。 **版本(version)**: @@ -90,7 +90,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 具体的流程图如下: -
    +![replica-master.png](page://images/architecture/replica-master.png) 选择Master的具体规则如下: @@ -105,7 +105,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 如果vnode A是master, vnode B是slave, vnode A能接受客户端的写请求,而vnode B不能。当vnode A收到写的请求后,遵循下面的流程: -
    +![replica-forward.png](page://images/architecture/replica-forward.png) 1. 应用对写请求做基本的合法性检查,通过,则给改请求包打上一个版本号(version, 单调递增) 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) @@ -128,19 +128,19 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。 3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。 4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则 - 1. index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。 - 2. 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用 - 3. 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用 - 4. 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。 + * index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。 + * 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用 + * 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用 + * 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。 5. 当sync模块调用回调函数getWalInfo获得wal信息时,有如下规则 - 1. index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字 - 2. 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件 - 3. 返回的文件名为空,那表示没有WAL文件 + * index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字 + * 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件 + * 返回的文件名为空,那表示没有WAL文件 6. 无论是getFileInfo, 还是getWalInfo, 只要获取出错(不是文件不存在),返回-1即可,系统会报错,停止同步 整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: -
    +![replica-forward.png](page://images/architecture/replica-forward.png) 1. 通过已经建立的TCP连接,发送sync req给master节点 2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/cn/03.architecture/docs.md similarity index 92% rename from documentation20/webdocs/markdowndocs/architecture-ch.md rename to documentation20/cn/03.architecture/docs.md index 8921633c8d31f0fc2c2ab3f00d36e8e57d5dbb9a..dc710704e50c5f6b3504a8427ae7088a15b12e77 100644 --- a/documentation20/webdocs/markdowndocs/architecture-ch.md +++ b/documentation20/cn/03.architecture/docs.md @@ -1,7 +1,9 @@ # 数据模型和整体架构 -## 数据模型 +## 数据模型 + ### 物联网典型场景 + 在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
    @@ -103,6 +105,7 @@ 每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表一中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。 ### 数据特征 + 除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征: 1. 数据高度结构化; @@ -119,9 +122,11 @@ 充分利用上述特征,TDengine 采取了经特殊优化的存储和计算设计来处理时序数据,它将系统处理能力显著提高,同时大幅降低了系统运维的复杂度。 ### 关系型数据库模型 + 因为采集的数据一般是结构化数据,同时为降低学习门槛,TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库,然后创建表,之后才能插入或查询数据。TDengine采用的是结构化存储,而不是NoSQL的key-value存储。 ### 一个数据采集点一张表 + 为充分利用其数据的时序性和其他数据特点,TDengine要求**对每个数据采集点单独建表**(比如有一千万个智能电表,就需创建一千万张表,上述表格中的d1001, d1002, d1003, d1004都需单独建表),用来存储这个采集点所采集的时序数据。这种设计有几大优点: 1. 能保证一个采集点的数据在存储介质上是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。 @@ -133,6 +138,7 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。每个数据采集点可能同时采集多个物理量(如上表中的curent, voltage, phase),每个物理量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集的数据,TDengine将自动按照时间戳建立索引,但对采集的物理量不建任何索引。数据用列式存储方式保存。 ### 超级表:同一类型数据采集点的集合 + 由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine引入超级表(Super Table,简称为STable)的概念。 超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的结合),除需要定义采集量的表结构之外,还需要定义其标签的schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。 如果整个系统有N个不同类型的数据采集点,就需要建立N个超级表。 @@ -141,16 +147,20 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。 当对多个具有相同数据类型的数据采集点进行聚合操作时,TDengine将先把满足标签过滤条件的表从超级表的中查找出来,然后再扫描这些表的时序数据,进行聚合操作,这样能将需要扫描的数据集大幅减少,从而大幅提高聚合计算的性能。 -## 集群与基本逻辑单元 +## 集群与基本逻辑单元 + TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以自动化负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。 ### 主要逻辑单元 + TDengine 分布式架构的逻辑结构图如下: -
    + +![TDengine架构示意图](page://images/architecture/structure.png)
    图 1 TDengine架构示意图
    + 一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。 -**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《一篇文章说清楚TDengine的FQDN》。 +**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 **数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 @@ -163,26 +173,30 @@ TDengine 分布式架构的逻辑结构图如下: **TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, C/C++/C#/Python/Go/Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。 ### 节点之间的通讯 + **通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。 **FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。 -**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。 +**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。 **集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。 -**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpList文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取mnode EP配置参数first, second,如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 +**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpSet文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取节点配置参数firstEp, secondEp,(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点)如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 **MNODE的选择:** TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。 -**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 +**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将firstEp, secondEp参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 **重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。 ### 一个典型的消息流程 + 为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 -
    + +![TDengine典型的操作流程](page://images/architecture/message.png)
    图 2 TDengine典型的操作流程
    + 1. 应用通过JDBC、ODBC或其他API接口发起插入数据的请求。 2. taosc会检查缓存,看是否保存有该表的meta data。如果有,直接到第4步。如果没有,taosc将向mnode发出get meta-data请求。 3. mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息(vnode ID以及所在的dnode的End Point,如果副本数为N,就有N组End Point)。如果taosc迟迟得不到mnode回应,而且存在多个mnode, taosc将向下一个mnode发出请求。 @@ -198,14 +212,15 @@ TDengine 分布式架构的逻辑结构图如下: 通过taosc缓存机制,只有在第一次对一张表操作时,才需要访问mnode,因此mnode不会成为系统瓶颈。但因为schema有可能变化,而且vgroup有可能发生改变(比如负载均衡发生),因此taosc会定时和mnode交互,自动更新缓存。 -## 存储模型与数据分区、分片 +## 存储模型与数据分区、分片 ### 存储模型 + TDengine存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: -- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除和更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在update参数设置为1时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 - 标签数据:存放于vnode里的meta文件,支持增删改查四个标准操作。数据量不大,有N张表,就有N条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此TDengine支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。 -- 其他元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等等,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 +- 元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 与典型的NoSQL存储模型相比,TDengine将标签数据与时序数据完全分离存储,它具有两大优势: @@ -213,6 +228,7 @@ TDengine存储的数据包括采集的时序数据以及库、表相关的元数 - 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。而且标签数据采用全内存的结构进行管理和维护,千万级别规模的标签数据查询可以在毫秒级别返回。 ### 数据分片 + 对于海量的数据管理,为实现水平扩展,一般都需要采取分片(Sharding)分区(Partitioning)策略。TDengine是通过vnode来实现数据分片的,通过一个时间段一个数据文件来实现时序数据分区的。 vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和计算功能。为便于负载均衡、数据恢复、支持异构环境,TDengine将一个数据节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的,对应用完全透明。 @@ -224,11 +240,13 @@ vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和 每张表的meda data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 ### 数据分区 + TDengine除vnode分片之外,还对时序数据按照时间段进行分区。每个数据文件只包含一个时间段的时序数据,时间段的长度由DB的配置参数days决定。这种按时间段分区的方法还便于高效实现数据的保留策略,只要数据文件超过规定的天数(系统配置参数keep),将被自动删除。而且不同的时间段可以存放于不同的路径和存储介质,以便于大数据的冷热管理,实现多级存储。 总的来说,**TDengine是通过vnode以及时间两个维度,对大数据进行切分**,便于并行高效的管理,实现水平扩展。 ### 负载均衡 + 每个dnode都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此mnode了解整个集群的状态。基于整体状态,当mnode发现某个dnode负载过重,它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。 如果mnode一段时间没有收到dnode的状态报告,mnode会认为这个dnode已经离线。如果离线时间超过一定时长(时长由配置参数offlineThreshold决定),该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一,系统将自动在其他dnode上创建新的副本,以保证数据的副本数。如果该dnode上还有mnode, 而且mnode的副本数大于一,系统也将自动在其他dnode上创建新的mnode, 以保证mnode的副本数。 @@ -238,13 +256,17 @@ TDengine除vnode分片之外,还对时序数据按照时间段进行分区。 负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 **提示:负载均衡由参数balance控制,决定开启/关闭自动负载均衡。** -## 数据写入与复制流程 +## 数据写入与复制流程 + 如果一个数据库有N个副本,那一个虚拟节点组就有N个虚拟节点,但是只有一个是Master,其他都是slave。当应用将新的记录写入系统时,只有Master vnode能接受写的请求。如果slave vnode收到写的请求,系统将通知taosc需要重新定向。 + ### Master vnode写入流程 + Master Vnode遵循下面的写入流程: -
    +![TDengine Master写入流程](page://images/architecture/write_master.png)
    图 3 TDengine Master写入流程
    + 1. Master vnode收到应用的数据插入请求,验证OK,进入下一步; 2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; 3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version); @@ -253,10 +275,12 @@ Master Vnode遵循下面的写入流程: 6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。 ### Slave vnode写入流程 + 对于slave vnode, 写入流程是: -
    +![TDengine Slave写入流程](page://images/architecture/write_slave.png)
    图 4 TDengine Slave写入流程
    + 1. Slave vnode收到Master vnode转发了的数据插入请求。 2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; 3. 写入内存,更新内存中的skip list。 @@ -264,6 +288,7 @@ Master Vnode遵循下面的写入流程: 与Master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。 ### 异地容灾、IDC迁移 + 从上述Master和Slave流程可以看出,TDengine采用的是异步复制的方式进行数据同步。这种方式能够大幅提高写入性能,网络延时对写入速度不会有大的影响。通过配置每个物理节点的IDC和机架号,可以保证对于一个虚拟节点组,虚拟节点由来自不同IDC、不同机架的物理节点组成,从而实现异地容灾。因此TDengine原生支持异地容灾,无需再使用其他工具。 另外一方面,TDengine支持动态修改副本数,一旦副本数增加,新加入的虚拟节点将立即进入数据同步流程,同步结束后,新加入的虚拟节点即可提供服务。而在同步过程中,master以及其他已经同步的虚拟节点都可以对外提供服务。利用这一特性,TDengine可以实现无服务中断的IDC机房迁移。只需要将新IDC的物理节点加入现有集群,等数据同步完成后,再将老的IDC的物理节点从集群中剔除即可。 @@ -280,6 +305,7 @@ Master Vnode遵循下面的写入流程: **提示:该功能暂未提供** ### 主从选择 + Vnode会保持一个数据版本号(Version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增一。 一个vnode启动时,角色(master、slave) 是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立TCP连接,并互相交换status,其中包括version和自己的角色。通过status的交换,系统进入选主流程,规则如下: @@ -289,17 +315,20 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存 3. 在线的虚拟节点数过半,而且有虚拟节点是slave的话,该虚拟节点自动成为master 4. 对于2和3,如果多个虚拟节点满足成为master的要求,那么虚拟节点组的节点列表里,最前面的选为master -更多的关于数据复制的流程,请见TDengine 2.0数据复制模块设计。 +更多的关于数据复制的流程,请见[TDengine 2.0数据复制模块设计](https://www.taosdata.com/cn/documentation/architecture/replica/)。 ### 同步复制 + 对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此TDengine提供同步复制的机制供用户选择。在创建数据库时,除指定副本数replica之外,用户还需要指定新的参数quorum。如果quorum大于一,它表示每次Master转发给副本时,需要等待quorum-1个回复确认,才能通知应用,数据在slave已经写入成功。如果在一定的时间内,得不到quorum-1个回复确认,master vnode将返回错误给应用。 采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。 注:vnode之间的同步复制仅仅企业版支持 -## 缓存与持久化 +## 缓存与持久化 + ### 缓存 + TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。 TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。 @@ -307,6 +336,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接 每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 ### 持久化存储 + TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。 为充分利用时序数据特点,TDengine将一个vnode保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数days决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。 @@ -322,6 +352,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久 数据写入磁盘时,根据系统配置参数comp决定是否压缩数据。TDengine提供了三种压缩选项:无压缩、一阶段压缩和两阶段压缩,分别对应comp值为0、1和2的情况。一阶段压缩根据数据的类型进行了相应的压缩,压缩算法包括delta-delta编码、simple 8B方法、zig-zag编码、LZ4等算法。二阶段压缩在一阶段压缩的基础上又用通用压缩算法进行了压缩,压缩率更高。 ### 多级存储 + 在默认配置下,TDengine会将所有数据保存在/var/lib/taos目录下,而且每个vnode的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine可通过配置系统参数dataDir让多个挂载的硬盘被系统同时使用。除此之外,TDengine也提供了数据分级存储的功能,即根据数据文件的新老程度存储在不同的存储介质上。比如最新的数据存储在SSD上,超过一周的数据存储在本地硬盘上,超过4周的数据存储在网络存储设备上,这样来降低存储成本,而又保证高效的访问数据。数据在不同存储介质上的移动是由系统自动完成的,对应用是完全透明的。数据的分级存储也是通过系统参数dataDir来配置。 dataDir的配置格式如下: @@ -345,10 +376,12 @@ dataDir /mnt/disk6/taos 2 注:多级存储功能仅企业版支持 -## 数据查询 +## 数据查询 + TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode, mnode节点协同完成。 ### 单表查询 + SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成抽象语法树(Abstract Syntax Tree, AST),然后对其进行校验和检查。以及向管理节点(mnode)请求查询中指定表的元数据信息(table metadata)。 根据元数据信息中的End Point信息,将查询请求序列化后发送到该表所在的数据节点(dnode)。dnode接收到查询请求后,识别出该查询请求指向的虚拟节点(vnode),将消息转发到vnode的查询执行队列。vnode的查询执行线程建立基础的查询执行环境,并立即返回该查询请求,同时开始执行该查询。 @@ -374,18 +407,21 @@ select count(*) from d1001 interval(1h) fill(prev); 针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。 ### 多表聚合查询 + TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: -
    +![多表聚合查询原理图](page://images/architecture/multi_tables.png)
    图 5 多表聚合查询原理图
    -1:应用将一个查询条件发往系统; -2: taosc将超级表的名字发往 Meta Node(管理节点); -3:管理节点将超级表所拥有的 vnode 列表发回 taosc; -4:taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点; -5:每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc; -6:taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。 + +1. 应用将一个查询条件发往系统; +2. taosc将超级表的名字发往 Meta Node(管理节点); +3. 管理节点将超级表所拥有的 vnode 列表发回 taosc; +4. taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点; +5. 每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc; +6. taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。 由于TDengine在vnode内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个vnode/dnode,聚合计算操作在多个vnode里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 ### 预计算 + 为有效提升查询处理的性能,针对物联网数据的不可更改的特点,在数据块头部记录该数据块中存储数据的统计信息:包括最大值、最小值、和。我们称之为预计算单元。如果查询处理涉及整个数据块的全部数据,直接使用预计算结果,完全不需要读取数据块的内容。由于预计算数据量远小于磁盘上存储的数据块数据的大小,对于磁盘IO为瓶颈的查询处理,使用预计算结果可以极大地减小读取IO压力,加速查询处理的流程。预计算机制与Postgre SQL的索引BRIN(block range index)有异曲同工之妙。 diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/cn/04.model/docs.md similarity index 81% rename from documentation20/webdocs/markdowndocs/Model-ch.md rename to documentation20/cn/04.model/docs.md index ea1be899a85fe6bb31ab03674ab496d7b301432f..6f85381588c6cd131f2204bba13e64e7058f93f2 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/cn/04.model/docs.md @@ -4,16 +4,16 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 -关于数据建模请参考视频教程。 +关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。 -## 创建库 +## 创建库 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: ```mysql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; ``` -上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见TAOS SQL +上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 创建库之后,需要使用SQL命令USE将当前库切换过来,例如: @@ -28,23 +28,31 @@ USE power; - 任何一张表或超级表是属于一个库的,在创建表之前,必须先创建库。 - 处于两个不同库的表是不能进行JOIN操作的。 -## 创建超级表 +## 创建超级表 + 一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表: + ```mysql -CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); ``` -与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 TAOS SQL 一节。 + +**注意:**这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。 + +与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](https://www.taosdata.com/cn/documentation/taos-sql#super-table) 章节。 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。 一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。 -## 创建表 +## 创建表 + TDengine对每个数据采集点需要独立建表。与标准的关系型数据一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以表一中的智能电表为例,可以使用如下的SQL命令建表: -```cmd + +```mysql CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); ``` -其中d1001是表名,meters是超级表的表名,后面紧跟标签Location的具体标签值”Beijing.Chaoyang",标签groupId的具体标签值2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 TAOS SQL。 + +其中d1001是表名,meters是超级表的表名,后面紧跟标签Location的具体标签值”Beijing.Chaoyang",标签groupId的具体标签值2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](https://www.taosdata.com/cn/documentation/taos-sql#table) 章节。 **注意:**目前 TDengine 没有从技术层面限制使用一个 database (dbA)的超级表作为模板建立另一个 database (dbB)的子表,后续会禁止这种用法,不建议使用这种方法建表。 @@ -52,12 +60,16 @@ TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列 **自动建表**:在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。比如: -```cmd +```mysql INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); ``` -上述SQL语句将记录(now, 10.2, 219, 0.32) 插入进表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。 + +上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。 + +关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。 ## 多列模型 vs 单列模型 + TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。 TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。 diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/cn/05.insert/docs.md similarity index 72% rename from documentation20/webdocs/markdowndocs/insert-ch.md rename to documentation20/cn/05.insert/docs.md index 7d380ac952dce5f57ff259159c33dd9e9b53edf3..ce2d65e7d2259c6dac9efc67a61f7c009dd96984 100644 --- a/documentation20/webdocs/markdowndocs/insert-ch.md +++ b/documentation20/cn/05.insert/docs.md @@ -2,7 +2,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 -## SQL写入 +## SQL写入 应用通过C/C++, JDBC, GO, 或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql @@ -18,7 +18,7 @@ TDengine也支持一次向多个表写入数据,比如下面这条命令就向 INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); ``` -详细的SQL INSERT语法规则请见TAOS SQL +详细的SQL INSERT语法规则请见 [TAOS SQL 的数据写入](https://www.taosdata.com/cn/documentation/taos-sql#insert) 章节。 **Tips:** @@ -27,11 +27,13 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认(没有使用 UPDATE 1 创建数据库)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。 -## Prometheus直接写入 -Prometheus作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具Bailongma,只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文用Docker容器快速搭建一个Devops监控Demo即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 +## Prometheus直接写入 + +[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 ### 从源代码编译blm_prometheus -用户需要从github下载Bailongma的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: + +用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 - 安装好Golang, 1.10版本以上 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器) @@ -45,10 +47,12 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。 ### 安装Prometheus -通过Prometheus的官网下载安装。下载地址 + +通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/) ### 配置Prometheus -参考Prometheus的配置文档,在Prometheus的配置文件中的部分,增加以下配置 + +参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置 - url: bailongma API服务提供的URL, 参考下面的blm_prometheus启动示例章节 @@ -112,11 +116,13 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf直接写入 -是一流行的IT运维数据采集开源工具,TDengine提供一个小工具Bailongma,只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文用Docker容器快速搭建一个Devops监控Demo即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 +## Telegraf直接写入 + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 ### 从源代码编译blm_telegraf -用户需要从github下载Bailongma的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: + +用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 - 安装好Golang, 1.10版本以上 @@ -132,9 +138,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 ### 安装Telegraf -目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads + +目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads ### 配置Telegraf + 修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 在output plugins部分,增加[[outputs.http]]配置项: @@ -148,7 +156,7 @@ go build - hostname: 区分不同采集设备的机器名称,需确保其唯一性 - metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。 -关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的文档。 +关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 ### 启动blm_telegraf程序 blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 @@ -174,6 +182,7 @@ blm_telegraf对telegraf提供服务的端口号。 ``` ### 启动示例 + 通过以下命令启动一个blm_telegraf的API服务 ```bash ./blm_telegraf -host 127.0.0.1 -port 8089 @@ -186,6 +195,7 @@ url = "http://10.1.2.3:8089/telegraf" ``` ### 查询telegraf写入数据 + telegraf产生的数据格式如下: ```json { @@ -220,10 +230,10 @@ select * from cpu; MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。 -## EMQ Broker 直接写入 +## EMQ Broker 直接写入 -EMQ是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考EMQ 官方文档。 +[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考[EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。 -## HiveMQ Broker 直接写入 +## HiveMQ Broker 直接写入 -HiveMQ 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 HiveMQ extension - TDengine 说明文档。 +[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/documentation20/webdocs/markdowndocs/Queries-ch.md b/documentation20/cn/06.queries/docs.md similarity index 90% rename from documentation20/webdocs/markdowndocs/Queries-ch.md rename to documentation20/cn/06.queries/docs.md index 839809ccba1914a9d5cfa9005be9f32e94f19924..a161778a72728ca05a75538c8b04ca0277e01bb2 100644 --- a/documentation20/webdocs/markdowndocs/Queries-ch.md +++ b/documentation20/cn/06.queries/docs.md @@ -1,10 +1,7 @@ - - - # 高效查询数据 -## 主要查询功能 +## 主要查询功能 TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: @@ -26,9 +23,10 @@ Query OK, 2 row(s) in set (0.001100s) ``` 为满足物联网场景的需求,TDengine支持几个特殊的函数,比如twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine还支持连续查询。 -具体的查询语法请看TAOS SQL 。 +具体的查询语法请看 [TAOS SQL 的数据查询](https://www.taosdata.com/cn/documentation/taos-sql#select) 章节。 + +## 多表聚合查询 -## 多表聚合查询 物联网场景中,往往同一个类型的数据采集点有多个。TDengine采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时TDengine使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 **示例1**:在TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照location分组 @@ -51,9 +49,9 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - Query OK, 1 row(s) in set (0.002136s) ``` -TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在TAOS SQL 一章,查询类操作都会注明是否支持超级表。 +TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](https://www.taosdata.com/cn/documentation/taos-sql#select) 一章,查询类操作都会注明是否支持超级表。 -## 降采样查询、插值 +## 降采样查询、插值 物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每10秒钟求和 ```mysql @@ -91,5 +89,5 @@ Query OK, 5 row(s) in set (0.001521s) 物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。 -语法规则细节请见TAOS SQL 。 +语法规则细节请见 [TAOS SQL 的时间维度聚合](https://www.taosdata.com/cn/documentation/taos-sql#aggregation) 章节。 diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/cn/07.advanced-features/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/advanced features-ch.md rename to documentation20/cn/07.advanced-features/docs.md index 0ca8428ecee2c5ef162810737f77cb9cf4b9412b..bdf93fdc3d74184bd7a6fd6f4eefaf3db6853c22 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/cn/07.advanced-features/docs.md @@ -1,6 +1,6 @@ # 高级功能 -## 连续查询(Continuous Query) +## 连续查询(Continuous Query) 连续查询是TDengine定期自动执行的查询,采用滑动窗口的方式进行计算,是一种简化的时间驱动的流式计算。 针对库中的表或超级表,TDengine可提供定期自动执行的连续查询, @@ -17,10 +17,8 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以 - 不同于流计算的实时反馈计算结果,连续查询只在时间窗口关闭以后才开始计算。 例如时间周期是1天,那么当天的结果只会在23:59:59以后才会生成。 - - 如果有历史记录写入到已经计算完成的时间区间,连续查询并不会重新进行计算, 也不会重新将结果推送给用户。对于写回TDengine的模式,也不会更新已经存在的计算结果。 - - 使用连续查询推送结果的模式,服务端并不缓存客户端计算状态,也不提供Exactly-Once的语意保证。 如果用户的应用端崩溃,再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。 如果使用写回模式,TDengine可确保数据写回的有效性和连续性。 @@ -95,7 +93,7 @@ create table avg_vol as select avg(voltage) from meters where ts > now and ts <= 后续版本会提供更细粒度和便捷的连续查询管理命令。 -## 数据订阅(Publisher/Subscriber) +## 数据订阅(Publisher/Subscriber) 基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致, 均可视为系统中插入一条带时间戳的新记录。 @@ -118,7 +116,7 @@ taos_consume taos_unsubscribe ``` -这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/), +这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/), 下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”), 完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c) 找到。 @@ -296,7 +294,7 @@ $ taos ### Java 使用数据订阅功能 -订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation20/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 +订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 下面以一个示例程序介绍其具体使用方法。它所完成的功能与前面介绍的 C 语言示例基本相同,也是订阅数据库中所有电流超过 10A 的记录。 @@ -406,7 +404,7 @@ ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang ``` -## 缓存(Cache) +## 缓存(Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 @@ -425,7 +423,7 @@ select last_row(voltage) from meters where location='Beijing.Chaoyang'; 该SQL语句将获取所有位于北京朝阳区的电表最后记录的电压值。 -## 报警监测(Alert) +## 报警监测(Alert) 在 TDengine 的应用场景中,报警监测是一个常见需求,从概念上说,它要求程序从最近一段时间的数据中筛选出符合一定条件的数据,并基于这些数据根据定义好的公式计算出一个结果,当这个结果符合某个条件且持续一定时间后,以某种形式通知用户。 diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/cn/08.connector/01.java/docs.md similarity index 93% rename from documentation20/webdocs/markdowndocs/connector-java-ch.md rename to documentation20/cn/08.connector/01.java/docs.md index 62c1f11baba7b113749e99537585dc910cfdef15..c39f6ffb4c98e97c21369b5767eff38f0fa9fe3c 100644 --- a/documentation20/webdocs/markdowndocs/connector-java-ch.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -4,7 +4,7 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实 `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 -![tdengine-connector](../assets/tdengine-jdbc-connector.png) +![tdengine-connector](page://images/tdengine-jdbc-connector.png) 上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: @@ -119,7 +119,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 -JDBC-JNI 的使用请参见视频教程。 +JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 TDengine 的 JDBC URL 规范格式为: `jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` @@ -170,6 +170,7 @@ properties 中的配置参数如下: 如下所示: 1. 在 Java 应用中不指定 hostname 和 port + ```java public Connection getConn() throws Exception{ Class.forName("com.taosdata.jdbc.TSDBDriver"); @@ -182,7 +183,9 @@ public Connection getConn() throws Exception{ return conn; } ``` + 2. 在配置文件中指定 firstEp 和 secondEp + ``` # first fully qualified domain name (FQDN) for TDengine system firstEp cluster_node1:6030 @@ -191,7 +194,7 @@ firstEp cluster_node1:6030 secondEp cluster_node2:6030 # default system charset -# charset UTF-8 +# charset UTF-8 # system locale # locale en_US.UTF-8 @@ -263,6 +266,25 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 处理异常 +在报错后,通过SQLException可以获取到错误的信息和错误码: +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` +JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。 +具体的错误码请参考: +* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h + ### 订阅 #### 创建 @@ -322,6 +344,7 @@ conn.close(); **HikariCP** * 引入相应 HikariCP maven 依赖: + ```xml com.zaxxer @@ -331,6 +354,7 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); @@ -374,6 +398,7 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws Exception { @@ -479,7 +504,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 [10]: https://maven.aliyun.com/mvn/search [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo -[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[13]: https://www.taosdata.com/cn/documentation/administrator/#client [14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client -[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B +[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/cn/08.connector/docs.md similarity index 85% rename from documentation20/webdocs/markdowndocs/connector-ch.md rename to documentation20/cn/08.connector/docs.md index 6736eea7c75ec12f017aa272385d4301c7875317..fb50e20e513aaf9b3791989c76c36eaca8df8f2b 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/cn/08.connector/docs.md @@ -2,7 +2,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用。 -![image-connecotr](../assets/connector.png) +![image-connecotr](page://images/connector.png) 目前TDengine的连接器可支持的平台广泛,包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。对照矩阵如下: @@ -24,8 +24,9 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 * 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。 * 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 * 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。 +* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。 -## 安装连接器驱动步骤 +## 安装连接器驱动步骤 服务器应该已经安装TDengine服务端安装包。连接器驱动安装步骤如下: @@ -136,7 +137,7 @@ taos> taos> ``` -## C/C++ Connector +## C/C++ Connector **C/C++连接器支持的系统有**: @@ -156,7 +157,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine * 在编译时需要链接TDengine动态库。Linux 为 *libtaos.so* ,安装后,位于 _/usr/local/taos/driver_。Windows为 taos.dll,安装后位于 *C:\TDengine*。 * 如未特别说明,当API的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 -使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c。 +使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c 。 ### 基础API @@ -208,7 +209,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine - `TAOS_RES* taos_query(TAOS *taos, const char *sql)` - 该API用来执行SQL语句,可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。返回值 NULL 表示失败。 + 该API用来执行SQL语句,可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。不能通过返回值是否是 NULL 来判断执行结果是否失败,而是需要用`taos_errno`函数解析结果集中的错误代码来进行判断。 - `int taos_result_precision(TAOS_RES *res)` @@ -238,13 +239,13 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 获取查询结果集每列数据的属性(数据类型、名字、字节数),与taos_num_fileds配合使用,可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下: - ```c - typedef struct taosField { - char name[65]; // 列名 - uint8_t type; // 数据类型 - int16_t bytes; // 字节数 - } TAOS_FIELD; - ``` +```c +typedef struct taosField { + char name[65]; // 列名 + uint8_t type; // 数据类型 + int16_t bytes; // 字节数 +} TAOS_FIELD; +``` - `void taos_stop_query(TAOS_RES *res)` @@ -266,7 +267,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ### 异步查询API -同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2\~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 +同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。 @@ -306,17 +307,17 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下: - ```c - typedef struct TAOS_BIND { - int buffer_type; - void * buffer; - unsigned long buffer_length; // 未实际使用 - unsigned long *length; - int * is_null; - int is_unsigned; // 未实际使用 - int * error; // 未实际使用 - } TAOS_BIND; - ``` +```c +typedef struct TAOS_BIND { + int buffer_type; + void * buffer; + unsigned long buffer_length; // 未实际使用 + unsigned long *length; + int * is_null; + int is_unsigned; // 未实际使用 + int * error; // 未实际使用 +} TAOS_BIND; +``` - `int taos_stmt_add_batch(TAOS_STMT *stmt)` @@ -385,12 +386,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 -## Python Connector +## Python Connector -Python连接器的使用参见视频教程 +Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html) ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * 已安装python 2.7 or >= 3.4 * 已安装pip 或 pip3 @@ -431,6 +432,7 @@ python -m pip install python3\ import taos ``` * 获取连接并获取游标对象 + ```python conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") c1 = conn.cursor() @@ -438,6 +440,7 @@ c1 = conn.cursor() * host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录 * 写入数据 + ```python import datetime @@ -459,6 +462,7 @@ affected_rows = c1.execute(' '.join(sqlcmd)) ``` * 查询数据 + ```python c1.execute('select * from tb') # 拉取查询结果 @@ -476,6 +480,7 @@ for data in c1: ``` * 创建订阅 + ```python # 创建一个主题为 'test' 消费周期为1000毫秒的订阅 # 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 @@ -483,6 +488,7 @@ sub = conn.subscribe(True, "test", "select * from tb;", 1000) ``` * 消费订阅的数据 + ```python data = sub.consume() for d in data: @@ -490,15 +496,18 @@ for d in data: ``` * 取消订阅 + ```python sub.close() ``` * 关闭连接 + ```python c1.close() conn.close() ``` + #### 帮助信息 用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法: @@ -518,6 +527,7 @@ conn.close() 用于生成taos.TDengineConnection的实例。 ### Python客户端使用示例代码 + 在tests/examples/python中,我们提供了一个示例Python程序read_example.py,可以参考这个程序来设计用户自己的写入、查询程序。在安装了对应的客户端后,通过import taos引入taos类。主要步骤如下 - 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。 @@ -527,9 +537,9 @@ conn.close() - 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。 -## RESTful Connector +## RESTful Connector -为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见视频教程。 +为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 ### HTTP请求格式 @@ -581,7 +591,8 @@ curl -u username:password -d '' :/rest/sql ```json { "status": "succ", - "head": ["Time Stamp","current", …], + "head": ["ts","current", …], + "column_meta": [["ts",9,8],["current",6,4], …], "data": [ ["2018-10-03 14:38:05.000", 10.3, …], ["2018-10-03 14:38:15.000", 12.6, …] @@ -592,10 +603,23 @@ curl -u username:password -d '' :/rest/sql 说明: -- status: 告知操作结果是成功还是失败 -- head: 表的定义,如果不返回结果集,仅有一列“affected_rows” -- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]] -- rows: 表明总共多少行数据 +- status: 告知操作结果是成功还是失败。 +- head: 表的定义,如果不返回结果集,则仅有一列“affected_rows”。(从 2.0.17 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。) +- column_meta: 从 2.0.17 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。 +- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 +- rows: 表明总共多少行数据。 + +column_meta 中的列类型说明: +* 1:BOOL +* 2:TINYINT +* 3:SMALLINT +* 4:INT +* 5:BIGINT +* 6:FLOAT +* 7:DOUBLE +* 8:BINARY +* 9:TIMESTAMP +* 10:NCHAR ### 自定义授权码 @@ -641,7 +665,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 ```json { "status": "succ", - "head": ["Time Stamp","current","voltage","phase"], + "head": ["ts","current","voltage","phase"], + "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]], "data": [ ["2018-10-03 14:38:05.000",10.3,219,0.31], ["2018-10-03 14:38:15.000",12.6,218,0.33] @@ -661,8 +686,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19 { "status": "succ", "head": ["affected_rows"], + "column_meta": [["affected_rows",4,4]], "data": [[1]], - "rows": 1, + "rows": 1 } ``` @@ -681,7 +707,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 ```json { "status": "succ", - "head": ["column1","column2","column3"], + "head": ["ts","current","voltage","phase"], + "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]], "data": [ [1538548685000,10.3,219,0.31], [1538548695000,12.6,218,0.33] @@ -702,7 +729,8 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ```json { "status": "succ", - "head": ["column1","column2","column3"], + "head": ["ts","current","voltage","phase"], + "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]], "data": [ ["2018-10-03T14:38:05.000+0800",10.3,219,0.31], ["2018-10-03T14:38:15.000+0800",12.6,218,0.33] @@ -713,21 +741,21 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ### 重要配置项 -下面仅列出一些与RESTFul接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 +下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 -- httpPort: 对外提供RESTFul服务的端口号,默认绑定到6041 -- httpMaxThreads: 启动的线程数量,默认为2 +- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041 +- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整) - restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 - httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131 -## CSharp Connector +## CSharp Connector C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 * 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 @@ -766,15 +794,15 @@ https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos https://www.taosdata.com/blog/2020/11/02/1901.html ``` -## Go Connector +## Go Connector ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。 -使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及视频教程。 +使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 ```Go import ( @@ -821,7 +849,7 @@ go env -w GOPROXY=https://goproxy.io,direct sql.Open内置的方法,Close closes the statement. -## Node.js Connector +## Node.js Connector Node.js连接器支持的系统有: @@ -830,47 +858,47 @@ Node.js连接器支持的系统有: | **OS类型** | Linux | Win64 | Win32 | Linux | Linux | | **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** | -Node.js连接器的使用参见视频教程 +Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html) ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 ### 安装Node.js连接器 -用户可以通过npm来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: +用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: -首先,通过npm安装node.js 连接器. +首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. ```bash npm install td2.0-connector ``` 我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 -我们使用node-gyp和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: +我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: ### Linux - `python` (建议`v2.7` , `v3.x.x` 目前还不支持) - `node` 2.0.6支持v12.x和v10.x,2.0.5及更早版本支持v10.x版本,其他版本可能存在包兼容性的问题。 - `make` -- c语言编译器比如GCC +- c语言编译器比如[GCC](https://gcc.gnu.org) ### Windows #### 安装方法1 -使用微软的windows-build-tools在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 +使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 #### 安装方法2 手动安装以下工具: -- 安装Visual Studio相关:Visual Studio 2017 Community -- 安装 Python 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` +- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` - 进入`cmd`命令行界面, `npm config set msvs_version 2017` -如果以上步骤不能成功执行, 可以参考微软的node.js用户手册Microsoft's Node.js Guidelines for Windows +如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) 如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64". @@ -887,7 +915,7 @@ Node-example-raw.js 验证方法: -1. 新建安装验证目录,例如:\~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。 +1. 新建安装验证目录,例如:`~/tdengine-test`,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。 2. 在命令中执行以下命令: @@ -901,8 +929,7 @@ node nodejsChecker.js host=localhost ### Node.js连接器的使用 -(http://docs.taosdata.com/node) -以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考该文档 +以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node) #### 建立连接 @@ -973,6 +1000,7 @@ promise.then(function(result) { }) ``` #### 异步函数 + 异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。 ```javascript var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() @@ -986,6 +1014,7 @@ promise2.then(function(result) { ``` ### 示例 -这里提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 -这里同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. +[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 + +[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. diff --git a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md b/documentation20/cn/09.connections/docs.md similarity index 82% rename from documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md rename to documentation20/cn/09.connections/docs.md index 4e55f3c5e8f12f3277898b1f825dca5e22b2e6d1..79380f3bbd9680120f63f89a0bfbe6f31f5c7a74 100644 --- a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md +++ b/documentation20/cn/09.connections/docs.md @@ -1,7 +1,7 @@ # 与其他工具的连接 -## Grafana +## Grafana TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 @@ -13,7 +13,11 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/) TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。 -以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。 +以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。 + +```bash +sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine +``` ### 使用 Grafana @@ -21,15 +25,15 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: -![img](../assets/add_datasource1.jpg) +![img](page://images/connections/add_datasource1.jpg) 点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](../assets/add_datasource2.jpg) +![img](page://images/connections/add_datasource2.jpg) 进入数据源配置页面,按照默认提示修改相应配置即可: -![img](../assets/add_datasource3.jpg) +![img](page://images/connections/add_datasource3.jpg) * Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041 * User:TDengine 用户名。 @@ -37,13 +41,13 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 点击 `Save & Test` 进行测试,成功会有如下提示: -![img](../assets/add_datasource4.jpg) +![img](page://images/connections/add_datasource4.jpg) #### 创建 Dashboard 回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -![img](../assets/create_dashboard1.jpg) +![img](page://images/connections/create_dashboard1.jpg) 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下: @@ -54,7 +58,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](../assets/create_dashboard2.jpg) +![img](page://images/connections/create_dashboard2.jpg) > 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息,请参考Grafana官方的[文档](https://grafana.com/docs/)。 @@ -64,14 +68,14 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件: -![img](../assets/import_dashboard1.jpg) +![img](page://images/connections/import_dashboard1.jpg) 导入完成之后可看到如下效果: -![img](../assets/import_dashboard2.jpg) +![img](page://images/connections/import_dashboard2.jpg) -## Matlab +## Matlab MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。 @@ -82,12 +86,15 @@ MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为 - 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox - 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64 - 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行 - -​ `$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar` - +​ +``` +$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar +``` - 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行: - -​ `C:\Windows\System32` +​ +``` +C:\Windows\System32 +``` ### 在MatLab中连接TDengine获取数据 @@ -95,23 +102,25 @@ MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为 - 创建一个连接: - `conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)` - +```matlab +conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’) +``` - 执行一次查询: - `sql0 = [‘select * from tb’]` - - `data = select(conn, sql0);` - +```matlab +sql0 = [‘select * from tb’] +data = select(conn, sql0); +``` - 插入一条记录: - `sql1 = [‘insert into tb values (now, 1)’]` - - `exec(conn, sql1)` +```matlab +sql1 = [‘insert into tb values (now, 1)’] +exec(conn, sql1) +``` 更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。 -## R +## R R语言支持通过JDBC接口来连接TDengine数据库。首先需要安装R语言的JDBC包。启动R语言环境,然后执行以下命令安装R语言的JDBC支持库: @@ -146,4 +155,3 @@ TDengine客户端暂不支持如下函数: - dbExistsTable(conn, "test"):是否存在表test - dbListTables(conn):显示连接中的所有表 - diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/cn/10.cluster/docs.md similarity index 92% rename from documentation20/webdocs/markdowndocs/cluster-ch.md rename to documentation20/cn/10.cluster/docs.md index f6019b1a5aabdaea095eafc1919c46432ec88c77..6d7d68fe1b5adc3fd7896c4070a4979398553cb2 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/cn/10.cluster/docs.md @@ -1,19 +1,19 @@ # TDengine 集群安装、管理 -多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。 +多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。 集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。 TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。 -关于集群搭建请参考视频教程。 +关于集群搭建请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1961.html)。 -## 准备工作 +## 准备工作 **第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】 -**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) -**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/); +**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) +**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); **注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 **第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; @@ -59,15 +59,15 @@ arbitrator ha.taosdata.com:6042 | 8 | charset | 字符集编码 | | 9 | balance | 是否启动负载均衡 | | 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 | -| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 | +| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 | -## 启动第一个数据节点 +## 启动第一个数据节点 -按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个数据节点,例如h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示: +按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)里的指示,启动第一个数据节点,例如h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示: - ``` +``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. @@ -78,15 +78,15 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.006385s) taos> - ``` +``` 上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。 -## 启动后续数据节点 +## 启动后续数据节点 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个物理节点启动taosd; +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd; 2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令: @@ -115,7 +115,7 @@ taos> - firstEp这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的mnode的End Point列表,不再依赖这个参数。 - 两个没有配置firstEp参数的数据节点dnode启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 -## 数据节点管理 +## 数据节点管理 上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。 @@ -169,7 +169,7 @@ SHOW DNODES; SHOW VGROUPS; ``` -## vnode的高可用性 +## vnode的高可用性 TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。 @@ -185,7 +185,7 @@ CREATE DATABASE demo replica 3; 因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。 -## Mnode的高可用性 +## Mnode的高可用性 TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。 @@ -202,7 +202,7 @@ SHOW MNODES; **注意:**一个TDengine高可用系统,无论是vnode还是mnode, 都必须配置多个副本。 -## 负载均衡 +## 负载均衡 有三种情况,将触发负载均衡,而且都无需人工干预。 @@ -214,7 +214,7 @@ SHOW MNODES; **【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。** -## 数据节点离线处理 +## 数据节点离线处理 如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况: @@ -223,7 +223,7 @@ SHOW MNODES; **注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。 -## Arbitrator的使用 +## Arbitrator的使用 如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含Arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到Arbitrator,那么节点B就能正常工作。 diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/cn/11.administrator/docs.md similarity index 91% rename from documentation20/webdocs/markdowndocs/administrator-ch.md rename to documentation20/cn/11.administrator/docs.md index 7c8344d9ca5cb2c9738c9b308406a6c96f871db1..86ad8e5bb91e8883b4be4a2bec7b5fb7dcb4c483 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/cn/11.administrator/docs.md @@ -1,12 +1,12 @@ # TDengine的运营与维护 -## 容量规划 +## 容量规划 使用TDengine来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU以及硬盘空间。 ### 内存需求 -每个DB可以创建固定数目的vnode,默认与CPU核数相同,可通过maxVgroupsPerDb配置;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算: +每个DB可以创建固定数目的vgroup,默认与CPU核数相同,可通过maxVgroupsPerDb配置;vgroup中的每个副本会是一个vnode;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算: ``` Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb) @@ -47,9 +47,9 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable 因为TDengine具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。 -**立即计算CPU、内存、存储,请参见:资源估算方法** +**立即计算CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)** -## 容错和灾备 +## 容错和灾备 ### 容错 @@ -76,7 +76,7 @@ TDengine集群的节点数必须大于等于副本数,否则创建表时将报 当TDengine集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine企业版还可以将副本部署在不同机房,从而实现异地容灾。 -## 服务端配置 +## 服务端配置 TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。 @@ -102,7 +102,7 @@ taosd -C - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 - stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。 -- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为字节。 +- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。 - ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。 @@ -111,15 +111,16 @@ taosd -C - days:一个数据文件存储数据的时间跨度,单位为天,默认值:10。 - keep:数据库中数据保留的天数,单位为天,默认值:3650。 -- minRows: 文件块中记录的最小条数,单位为条,默认值:100。 -- maxRows: 文件块中记录的最大条数,单位为条,默认值:4096。 -- comp: 文件压缩标志位,0:关闭,1:一阶段压缩,2:两阶段压缩。默认值:2。 -- walLevel:WAL级别。1:写wal, 但不执行fsync; 2:写wal, 而且执行fsync。默认值:1。 +- minRows:文件块中记录的最小条数,单位为条,默认值:100。 +- maxRows:文件块中记录的最大条数,单位为条,默认值:4096。 +- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。 +- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。 - fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。 -- cache: 内存块的大小,单位为兆字节(MB),默认值:16。 -- blocks: 每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。 +- cache:内存块的大小,单位为兆字节(MB),默认值:16。 +- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。 - replica:副本个数,取值范围:1-3。单位为个,默认值:1 - precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms +- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(从 2.0.11 版本开始支持此参数) 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: @@ -137,7 +138,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。 - statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 -- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 +- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。 - arbitrator: 系统中裁决器的end point,缺省为空。 - timezone、locale、charset 的配置见客户端配置。 @@ -157,9 +158,9 @@ ALTER DNODE alter dnode 1 debugFlag 135; ``` -## 客户端配置 +## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见Shell命令行程序。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见帮助信息 `taos --help`。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 **2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置** @@ -247,7 +248,7 @@ taos -C 或 taos --dump-config Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 -## 用户管理 +## 用户管理 系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下: @@ -285,7 +286,7 @@ SHOW USERS; **注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 -## 数据导入 +## 数据导入 TDengine提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是taosdump工具导入本身导出的文件。 @@ -337,9 +338,9 @@ Query OK, 9 row(s) affected (0.004763s) **taosdump工具导入** -TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:TDengine DUMP工具使用指南 +TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) -## 数据导出 +## 数据导出 为方便数据导出,TDengine提供了两种导出方式,分别是按表导出和用taosdump导出。 @@ -355,9 +356,9 @@ select * from >> data.csv; **用taosdump导出数据** -TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:TDengine DUMP工具使用指南 +TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) -## 系统连接、任务查询管理 +## 系统连接、任务查询管理 系统管理员可以从CLI查询系统的连接、正在进行的查询、流式计算,并且可以关闭连接、停止正在进行的查询和流式计算。CLI里SQL语法如下: @@ -403,7 +404,7 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 -## 文件目录结构 +## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: @@ -429,7 +430,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 -## TDengine参数限制与保留关键字 +## TDengine参数限制与保留关键字 - 数据库名:不能包含“.”以及特殊字符,不能超过32个字符 - 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符 diff --git a/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md b/documentation20/cn/12.taos-sql/01.error-code/docs.md similarity index 100% rename from documentation20/webdocs/markdowndocs/Taos Error Code-ch.md rename to documentation20/cn/12.taos-sql/01.error-code/docs.md diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/cn/12.taos-sql/docs.md similarity index 85% rename from documentation20/webdocs/markdowndocs/TAOS SQL-ch.md rename to documentation20/cn/12.taos-sql/docs.md index 0da28836056ee8e3b50bbf96499a6a3de1d205a4..aba8faa6771ad09a90b30b5bf3bb08664e8b6ca1 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -25,7 +25,7 @@ taos> DESCRIBE meters; ``` 数据集包含4个智能电表的数据,按照TDengine的建模规则,对应4个子表,其名称分别是 d1001, d1002, d1003, d1004。 -## 支持的数据类型 +## 支持的数据类型 使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: @@ -42,19 +42,21 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | | 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null | -| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL | -| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 用于记录字符串,理论上,最长可以有16374字节,但由于每行数据最多16K字节,实际上限一般小于理论值。 binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL | -| 9 | BOOL | 1 | 布尔型,{true, false} | -| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将会报错。 | - -**Tips**: TDengine对SQL语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 - -## 数据库管理 +| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | +| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | +| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | +| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | +| 6 | BINARY | 自定义 | 用于记录 ASCII 型字符串。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | +| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | +| 9 | BOOL | 1 | 布尔型,{true, false} | +| 10 | NCHAR | 自定义 | 用于记录非 ASCII 型字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | + +**Tips**: +1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +2. 应避免使用 BINARY 类型来保存非 ASCII 型的字符串,会很容易导致数据乱码等错误。正确的做法是使用 NCHAR 类型来保存中文字符。 + +## 数据库管理 - **创建数据库** @@ -96,27 +98,32 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name COMP 2; ``` - COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。 + COMP 参数是指修改数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。0 表示不压缩,1 表示一阶段压缩,2 表示两阶段压缩。 ```mysql ALTER DATABASE db_name REPLICA 2; ``` - REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。 + REPLICA 参数是指修改数据库副本数,取值范围 [1, 3]。在集群中使用,副本数必须小于或等于 DNODE 的数目。 ```mysql ALTER DATABASE db_name KEEP 365; ``` - KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。 + KEEP 参数是指修改数据文件保存的天数,缺省值为 3650,取值范围 [days, 365000],必须大于或等于 days 参数值。 ```mysql ALTER DATABASE db_name QUORUM 2; ``` - QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 + QUORUM 参数是指数据写入成功所需要的确认数,取值范围 [1, 3]。对于异步复制,quorum 设为 1,具有 master 角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于 2。原则上,Quorum >= 1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 ```mysql ALTER DATABASE db_name BLOCKS 100; ``` - BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。 + BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache * blocks)。取值范围 [3, 1000]。 + + ```mysql + ALTER DATABASE db_name CACHELAST 0; + ``` + CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持) **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 @@ -126,7 +133,8 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic SHOW DATABASES; ``` -## 表管理 +## 表管理 + - **创建数据表** ```mysql @@ -151,6 +159,14 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 以指定的超级表为模板,指定 tags 的值来创建数据表。 +- **以超级表为模板创建数据表,并指定具体的 tags 列** + + ```mysql + CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); + ``` + 以指定的超级表为模板,指定一部分 tags 列的值来创建数据表。(没被指定的 tags 列会设为空值。) + 说明:从 2.0.17 版本开始支持这种方式。在之前的版本中,不允许指定 tags 列,而必须显式给出所有 tags 列的取值。 + - **批量创建数据表** ```mysql @@ -212,9 +228,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构 -## 超级表STable管理 +## 超级表STable管理 -注意:在 2.0.15 以前的版本中,并不支持 STABLE 保留字,而是写作 TABLE。也即,在本节后文的指令说明中,CREATE、DROP、ALTER 三个指令在老版本中保留字需写作 TABLE 而不是 STABLE。 +注意:在 2.0.15.0 及以后的版本中,开始支持 STABLE 保留字。也即,在本节后文的指令说明中,CREATE、DROP、ALTER 三个指令在老版本中保留字需写作 TABLE 而不是 STABLE。 - **创建超级表** @@ -265,7 +281,8 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ALTER STABLE stb_name DROP COLUMN field_name; ``` -## 超级表 STable 中 TAG 管理 +## 超级表 STable 中 TAG 管理 + - **添加标签** ```mysql @@ -290,11 +307,11 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic - **修改子表标签值** ```mysql - ALTER STABLE tb_name SET TAG tag_name=new_tag_value; + ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` 说明:除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于 STable,不能对单个子表操作。对 STable 添加标签以后,依托于该 STable 建立的所有表将自动增加了一个标签,所有新增标签的默认值都是 NULL。 -## 数据写入 +## 数据写入 - **插入一条记录** ```mysql @@ -304,7 +321,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic - **插入一条记录,数据对应到指定的列** ```mysql - INSERT INTO tb_name (field1_name, ...) VALUES (field1_value, ...) + INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...); ``` 向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。 @@ -334,29 +351,21 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 同时向表tb1_name和tb2_name中按列分别插入多条记录 - 注意: - 1) 如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳; - 2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 + 注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 -**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 - -## 数据查询 +- **插入记录时自动建表** + ```mysql + INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...); + ``` + 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 tags 取值。 -### 查询语法: +- **插入记录时自动建表,并指定具体的 tags 列** + ```mysql + INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...); + ``` + 在自动建表时,可以只是指定部分 tags 列的取值,未被指定的 tags 列将取为空值。 -```mysql -SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [INTERVAL (interval_val [, interval_offset])] - [FILL fill_val] - [SLIDING fill_val] - [GROUP BY col_list] - [ORDER BY col_list { DESC | ASC }] - [SLIMIT limit_val [, SOFFSET offset_val]] - [LIMIT limit_val [, OFFSET offset_val]] - [>> export_file] -``` +**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。 @@ -384,10 +393,30 @@ taos> SHOW TABLES; Query OK, 1 row(s) in set (0.001091s) ``` +## 数据查询 + +### 查询语法: + +```mysql +SELECT select_expr [, select_expr ...] + FROM {tb_name_list} + [WHERE where_condition] + [INTERVAL (interval_val [, interval_offset])] + [FILL fill_val] + [SLIDING fill_val] + [GROUP BY col_list] + [ORDER BY col_list { DESC | ASC }] + [SLIMIT limit_val [, SOFFSET offset_val]] + [LIMIT limit_val [, OFFSET offset_val]] + [>> export_file]; +``` + #### SELECT子句 + 一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 ##### 通配符 + 通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。 ```mysql taos> SELECT * FROM d1001; @@ -496,6 +525,7 @@ Query OK, 3 row(s) in set (0.001191s) 但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。 #### 隐式结果列 + ```Select_exprs```可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出,列输出完全由select子句控制。 #### 表(超级表)列表 @@ -510,6 +540,7 @@ SELECT * FROM d1001; ``` #### 特殊功能 + 部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database() ```mysql taos> SELECT DATABASE(); @@ -554,12 +585,14 @@ taos> SELECT SERVER_STATUS() AS status; 1 | Query OK, 1 row(s) in set (0.000081s) ``` + #### TAOS SQL中特殊关键词 > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名
    \_c0: 表示表(超级表)的第一列 #### 小技巧 + 获取一个超级表所有的子表名及相关的标签信息: ```mysql SELECT TBNAME, location FROM meters; @@ -594,19 +627,21 @@ Query OK, 1 row(s) in set (0.001091s) ### 支持的条件过滤操作 -| Operation | Note | Applicable Data Types | -| --------- | ----------------------------- | ------------------------------------- | -| > | larger than | **`timestamp`** and all numeric types | -| < | smaller than | **`timestamp`** and all numeric types | -| >= | larger than or equal to | **`timestamp`** and all numeric types | -| <= | smaller than or equal to | **`timestamp`** and all numeric types | -| = | equal to | all types | -| <> | not equal to | all types | -| % | match with any char sequences | **`binary`** **`nchar`** | -| _ | match with a single char | **`binary`** **`nchar`** | +| Operation | Note | Applicable Data Types | +| ----------- | ----------------------------- | ------------------------------------- | +| > | larger than | **`timestamp`** and all numeric types | +| < | smaller than | **`timestamp`** and all numeric types | +| >= | larger than or equal to | **`timestamp`** and all numeric types | +| <= | smaller than or equal to | **`timestamp`** and all numeric types | +| = | equal to | all types | +| <> | not equal to | all types | +| between and | within a certain range | **`timestamp`** and all numeric types | +| % | match with any char sequences | **`binary`** **`nchar`** | +| _ | match with a single char | **`binary`** **`nchar`** | 1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 -2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用``` OR``` 关键字进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12)) 。 +2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。 +3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 ### SQL 示例 @@ -640,7 +675,7 @@ Query OK, 1 row(s) in set (0.001091s) SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; ``` -## SQL 函数 +## SQL 函数 ### 聚合函数 @@ -757,7 +792,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:表。(从 2.0.15 版本开始,本函数也支持超级表) + 适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表) 示例: ```mysql @@ -1119,7 +1154,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 3 row(s) in set (0.001046s) ``` -## 时间维度聚合 +## 时间维度聚合 + TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下: ```mysql @@ -1138,10 +1174,10 @@ SELECT function_list FROM stb_name - 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。 - WHERE语句可以指定查询的起止时间和其他过滤条件 - FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种: - 1. 不进行填充:NONE(默认填充模式)。 - 2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。 - 3. NULL填充:使用NULL填充数据。例如:fill(null)。 - 4. PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。 + * 不进行填充:NONE(默认填充模式)。 + * VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。 + * NULL填充:使用NULL填充数据。例如:fill(null)。 + * PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。 说明: 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。 @@ -1164,7 +1200,8 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P FILL(PREV); ``` -## TAOS SQL 边界限制 +## TAOS SQL 边界限制 + - 数据库名最大长度为32 - 表名最大长度为192,每行数据最大长度16k个字符 - 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳 diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/cn/13.faq/docs.md similarity index 81% rename from documentation20/webdocs/markdowndocs/faq-ch.md rename to documentation20/cn/13.faq/docs.md index cd6f0ae08caf19340b6cef9a9428abcb66c97dc6..727ccf653f50cb449c1f15e1c903706a2fab2068 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/cn/13.faq/docs.md @@ -6,7 +6,7 @@ 1. /var/log/taos (如果没有修改过默认路径) 2. /etc/taos -附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 GitHub提交Issue。 +附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交Issue。 为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。 ``` @@ -25,13 +25,16 @@ 5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 ## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办? -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/jdbcdriver找不到动态链接库/) ## 3. 创建数据表时提示more dnodes are needed -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/) ## 4. 如何让TDengine crash时生成core文件? -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/tdengine-crash时生成core文件的方法/) ## 5. 遇到错误"Unable to establish connection", 我怎么办? @@ -46,7 +49,7 @@ 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* -4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:一篇文章说清楚TDengine的FQDN。 +4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件 @@ -65,12 +68,12 @@ * Windows 系统请使用 PowerShell 命令 Net-TestConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 -10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):TDengine 内嵌网络检测工具使用指南。 +10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 ## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办? 产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查: -1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:一篇文章说清楚TDengine的FQDN。 +1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 2. 如果网络配置有DNS server, 请检查是否正常工作 3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。 4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的 @@ -93,11 +96,11 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支 使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。 -## 10. 最有效的写入数据的方法是什么? +## 11. 最有效的写入数据的方法是什么? 批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 -## 11. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决? +## 12. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决? Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下: ```JAVA @@ -106,10 +109,9 @@ Properties properties = new Properties(); properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); Connection = DriverManager.getConnection(url, properties); ``` -## 12.TDengine GO windows驱动的如何编译? -请看为此问题撰写的技术博客 ## 13.JDBC报错: the excuted SQL is not a DML or a DDL? + 请更新至最新的JDBC驱动 ```JAVA @@ -118,6 +120,7 @@ Connection = DriverManager.getConnection(url, properties); 2.0.4 ``` + ## 14. taos connect failed, reason: invalid timestamp 常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 @@ -134,3 +137,20 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 +## 17. 如何在命令行程序 taos 中临时调整日志级别 + +为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: + +```mysql +ALTER LOCAL flag_name flag_value; +``` + +其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): +- flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag +- flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) + +```mysql +ALTER LOCAL RESETLOG; +``` + +其含义是,清空本机所有由客户端生成的日志文件。 diff --git a/documentation20/webdocs/assets/dnode.png b/documentation20/cn/images/architecture/dnode.png similarity index 100% rename from documentation20/webdocs/assets/dnode.png rename to documentation20/cn/images/architecture/dnode.png diff --git a/documentation20/webdocs/assets/message.png b/documentation20/cn/images/architecture/message.png similarity index 100% rename from documentation20/webdocs/assets/message.png rename to documentation20/cn/images/architecture/message.png diff --git a/documentation20/webdocs/assets/modules.png b/documentation20/cn/images/architecture/modules.png similarity index 100% rename from documentation20/webdocs/assets/modules.png rename to documentation20/cn/images/architecture/modules.png diff --git a/documentation20/cn/images/architecture/multi_tables.png b/documentation20/cn/images/architecture/multi_tables.png new file mode 100644 index 0000000000000000000000000000000000000000..0cefaab6a9a4cdd671c671f7c6186dea41415ff0 Binary files /dev/null and b/documentation20/cn/images/architecture/multi_tables.png differ diff --git a/documentation20/cn/images/architecture/replica-forward.png b/documentation20/cn/images/architecture/replica-forward.png new file mode 100644 index 0000000000000000000000000000000000000000..bf616e030b130603eceb5dccfd30b4a1dfa68ea5 Binary files /dev/null and b/documentation20/cn/images/architecture/replica-forward.png differ diff --git a/documentation20/webdocs/assets/replica-master.png b/documentation20/cn/images/architecture/replica-master.png similarity index 100% rename from documentation20/webdocs/assets/replica-master.png rename to documentation20/cn/images/architecture/replica-master.png diff --git a/documentation20/cn/images/architecture/replica-restore.png b/documentation20/cn/images/architecture/replica-restore.png new file mode 100644 index 0000000000000000000000000000000000000000..1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28 Binary files /dev/null and b/documentation20/cn/images/architecture/replica-restore.png differ diff --git a/documentation20/cn/images/architecture/structure.png b/documentation20/cn/images/architecture/structure.png new file mode 100644 index 0000000000000000000000000000000000000000..4fc8f47ab0a30d95b85ba1d85105726ed981e56e Binary files /dev/null and b/documentation20/cn/images/architecture/structure.png differ diff --git a/documentation20/webdocs/assets/vnode.png b/documentation20/cn/images/architecture/vnode.png similarity index 100% rename from documentation20/webdocs/assets/vnode.png rename to documentation20/cn/images/architecture/vnode.png diff --git a/documentation20/webdocs/assets/write_master.png b/documentation20/cn/images/architecture/write_master.png similarity index 100% rename from documentation20/webdocs/assets/write_master.png rename to documentation20/cn/images/architecture/write_master.png diff --git a/documentation20/webdocs/assets/write_slave.png b/documentation20/cn/images/architecture/write_slave.png similarity index 100% rename from documentation20/webdocs/assets/write_slave.png rename to documentation20/cn/images/architecture/write_slave.png diff --git a/documentation20/cn/images/connections/add_datasource1.jpg b/documentation20/cn/images/connections/add_datasource1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f0f5110f312c57f3ec1788bbc02f04fac6ac142 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource1.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource2.jpg b/documentation20/cn/images/connections/add_datasource2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa7a83e00e96fae649910dff4edf5f5bdadd7850 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource2.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource3.jpg b/documentation20/cn/images/connections/add_datasource3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc850ad08ff1174de972906842e0d5ee64e6e5cb Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource3.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource4.jpg b/documentation20/cn/images/connections/add_datasource4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ba73e50d455111f8621f4165746078554c2d790 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource4.jpg differ diff --git a/documentation20/cn/images/connections/create_dashboard1.jpg b/documentation20/cn/images/connections/create_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c Binary files /dev/null and b/documentation20/cn/images/connections/create_dashboard1.jpg differ diff --git a/documentation20/cn/images/connections/create_dashboard2.jpg b/documentation20/cn/images/connections/create_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe5d768ac55254251e0290bf257178f5ff28f5a5 Binary files /dev/null and b/documentation20/cn/images/connections/create_dashboard2.jpg differ diff --git a/documentation20/cn/images/connections/import_dashboard1.jpg b/documentation20/cn/images/connections/import_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d2ce7ed65eb0c2c729de50283b30491793493dc Binary files /dev/null and b/documentation20/cn/images/connections/import_dashboard1.jpg differ diff --git a/documentation20/cn/images/connections/import_dashboard2.jpg b/documentation20/cn/images/connections/import_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94b09f0ee39552bb84f7ba1f65815ce2c9548b2d Binary files /dev/null and b/documentation20/cn/images/connections/import_dashboard2.jpg differ diff --git a/documentation20/webdocs/assets/connector.png b/documentation20/cn/images/connector.png similarity index 100% rename from documentation20/webdocs/assets/connector.png rename to documentation20/cn/images/connector.png diff --git a/documentation20/webdocs/assets/EcoSystem.png b/documentation20/cn/images/eco_system.png similarity index 100% rename from documentation20/webdocs/assets/EcoSystem.png rename to documentation20/cn/images/eco_system.png diff --git a/documentation20/webdocs/assets/tdengine-jdbc-connector.png b/documentation20/cn/images/tdengine-jdbc-connector.png similarity index 100% rename from documentation20/webdocs/assets/tdengine-jdbc-connector.png rename to documentation20/cn/images/tdengine-jdbc-connector.png diff --git a/documentation20/webdocs/assets/replica-forward.jpg b/documentation20/webdocs/assets/replica-forward.jpg deleted file mode 100644 index 00b8c357b9ca2f067127c7dfae37b386b268f8dd..0000000000000000000000000000000000000000 Binary files a/documentation20/webdocs/assets/replica-forward.jpg and /dev/null differ diff --git a/documentation20/webdocs/assets/replica-restore.jpg b/documentation20/webdocs/assets/replica-restore.jpg deleted file mode 100644 index 6da1bed4cc90b8a119b2fddf5f66ef3ab1b0a2fd..0000000000000000000000000000000000000000 Binary files a/documentation20/webdocs/assets/replica-restore.jpg and /dev/null differ diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md deleted file mode 100644 index 20f9bcb7307c68f1330519d8f6ed249419bf46db..0000000000000000000000000000000000000000 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ /dev/null @@ -1,145 +0,0 @@ -# TDengine文档 - -TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是[数据模型](https://www.taosdata.com/cn/documentation20/data-model-and-architecture)与[数据建模](https://www.taosdata.com/cn/documentation20/model)一节。除本文档之外,欢迎[下载产品白皮书](https://www.taosdata.com/downloads/TDengine White Paper.pdf)。如需查阅TDengine 1.6 文档,请点击[这里](https://www.taosdata.com/cn/documentation16/)访问。 - -## TDengine介绍 - -- TDengine 简介及特色 -- TDengine 适用场景 - -## [立即开始](https://www.taosdata.com/cn/getting-started) - -- [快捷安装](https://www.taosdata.com/cn/documentation20/getting-started/#快捷安装):可通过源码、安装包或docker安装,三秒钟搞定 -- [轻松启动](https://www.taosdata.com/cn/documentation20/getting-started/#轻松启动):使用systemctl 启停TDengine -- [命令行程序TAOS](https://www.taosdata.com/cn/documentation20/getting-started/#TDengine命令行程序):访问TDengine的简便方式 -- [极速体验](https://www.taosdata.com/cn/documentation20/getting-started/#TDengine-极速体验):运行示例程序,快速体验高效的数据插入、查询 - -## [数据模型和整体架构](https://www.taosdata.com/cn/documentation20/architecture) - -- [数据模型](https://www.taosdata.com/cn/documentation20/architecture/#数据模型):关系型数据库模型,但要求每个采集点单独建表 -- [集群与基本逻辑单元](https://www.taosdata.com/cn/documentation20/architecture/#集群与基本逻辑单元):吸取NoSQL优点,支持水平扩展,支持高可靠 -- [存储模型与数据分区、分片](https://www.taosdata.com/cn/documentation20/architecture/#存储模型与数据分区、分片):标签数据与时序数据完全分离,按vnode和时间两个维度对数据切分 -- [数据写入与复制流程](https://www.taosdata.com/cn/documentation20/architecture/#数据写入与复制流程):先写入WAL、之后写入缓存,再给应用确认,支持多副本 -- [缓存与持久化](https://www.taosdata.com/cn/documentation20/architecture/#缓存与持久化):最新数据缓存在内存中,但落盘时采用列式存储、超高压缩比 -- [数据查询](https://www.taosdata.com/cn/documentation20/architecture/#数据查询):支持各种函数、时间轴聚合、插值、多表聚合 - -## [数据建模](https://www.taosdata.com/cn/documentation20/model) - -- [创建库](https://www.taosdata.com/cn/documentation20/model/#创建库):为具有相似数据特征的数据采集点创建一个库 -- [创建超级表](https://www.taosdata.com/cn/documentation20/model/#创建超级表):为同一类型的数据采集点创建一个超级表 -- [创建表](https://www.taosdata.com/cn/documentation20/model/#创建表):使用超级表做模板,为每一个具体的数据采集点单独建表 - -## [高效写入数据](https://www.taosdata.com/cn/documentation20/insert) - -- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录 -- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入 -- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入 -- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将 MQTT 数据直接写入 -- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension,不用任何代码,就可将 MQTT 数据直接写入 - -## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries) - -- [主要查询功能](https://www.taosdata.com/cn/documentation20/queries/#主要查询功能):支持各种标准函数,设置过滤条件,时间段查询 -- [多表聚合查询](https://www.taosdata.com/cn/documentation20/queries/#多表聚合查询):使用超级表,设置标签过滤条件,进行高效聚合查询 -- [降采样查询值](https://www.taosdata.com/cn/documentation20/queries/#降采样查询、插值):按时间段分段聚合,支持插值 - -## [高级功能](https://www.taosdata.com/cn/documentation20/advanced-features) - -- [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation20/advanced-features/#连续查询(Continuous-Query)):基于滑动窗口,定时自动的对数据流进行查询计算 -- [数据订阅(Publisher/Subscriber)](https://www.taosdata.com/cn/documentation20/advanced-features/#数据订阅(Publisher/Subscriber)):象典型的消息队列,应用可订阅接收到的最新数据 -- [缓存(Cache)](https://www.taosdata.com/cn/documentation20/advanced-features/#缓存(Cache)):每个设备最新的数据都会缓存在内存中,可快速获取 -- [报警监测](https://www.taosdata.com/cn/documentation20/advanced-features/#报警监测(Alert)):根据配置规则,自动监测超限行为数据,并主动推送 - -## [连接器](https://www.taosdata.com/cn/documentation20/connector) - -- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法 -- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接 -- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动 -- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式 -- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动 -- [Node.js Connector](https://www.taosdata.com/cn/documentation20/connector/#Node.js-Connector):给node应用提供一个链接TDengine服务器的驱动 - -## [与其他工具的连接](https://www.taosdata.com/cn/documentation20/connections-with-other-tools) - -- [Grafana](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#Grafana):获取并可视化保存在TDengine的数据 -- [Matlab](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#Matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据 -- [R](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#R):通过配置R的JDBC数据源访问保存在TDengine的数据 - -## [TDengine集群的安装、管理](https://www.taosdata.com/cn/documentation20/cluster) - -- [安装](https://www.taosdata.com/cn/documentation20/cluster/#创建第一个节点):与单节点的安装一样,但要设好配置文件里的参数first -- [节点管理](https://www.taosdata.com/cn/documentation20/cluster/#节点管理):增加、删除、查看集群的节点 -- [mnode的管理](https://www.taosdata.com/cn/documentation20/cluster/#Mnode的高可用):系统自动创建、无需任何人工干预 -- [负载均衡](https://www.taosdata.com/cn/documentation20/cluster/#负载均衡):一旦节点个数或负载有变化,自动进行 -- [节点离线处理](https://www.taosdata.com/cn/documentation20/cluster/#节点离线处理):节点离线超过一定时长,将从集群中剔除 -- [Arbitrator](https://www.taosdata.com/cn/documentation20/cluster/#Arbitrator的使用):对于偶数个副本的情形,使用它可以防止split brain - -## [TDengine的运营和维护](https://www.taosdata.com/cn/documentation20/administrator) - -- [容量规划](https://www.taosdata.com/cn/documentation20/administrator/#容量规划):根据场景,估算硬件资源 -- [容错和灾备](https://www.taosdata.com/cn/documentation20/administrator/#容错和灾备):设置正确的WAL和数据副本数 -- [系统配置](https://www.taosdata.com/cn/documentation20/administrator/#服务端配置):端口,缓存大小,文件块大小和其他系统配置 -- [用户管理](https://www.taosdata.com/cn/documentation20/administrator/#用户管理):添加、删除TDengine用户,修改用户密码 -- [数据导入](https://www.taosdata.com/cn/documentation20/administrator/#数据导入):可按脚本文件导入,也可按数据文件导入 -- [数据导出](https://www.taosdata.com/cn/documentation20/administrator/#数据导出):从shell按表导出,也可用taosdump工具做各种导出 -- [系统监控](https://www.taosdata.com/cn/documentation20/administrator/#系统监控):检查系统现有的连接、查询、流式计算,日志和事件等 -- [文件目录结构](https://www.taosdata.com/cn/documentation20/administrator/#文件目录结构):TDengine数据文件、配置文件等所在目录 -- [参数限制和保留关键字](https://www.taosdata.com/cn/documentation20/administrator/#参数限制和保留关键字):TDengine的参数限制和保留关键字列表 - -## [TAOS SQL](https://www.taosdata.com/cn/documentation20/taos-sql) - -- [支持的数据类型](https://www.taosdata.com/cn/documentation20/taos-sql/#支持的数据类型):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型 -- [数据库管理](https://www.taosdata.com/cn/documentation20/taos-sql/#数据库管理):添加、删除、查看数据库 -- [表管理](https://www.taosdata.com/cn/documentation20/taos-sql/#表管理):添加、删除、查看、修改表 -- [超级表管理](https://www.taosdata.com/cn/documentation20/taos-sql/#超级表STable管理):添加、删除、查看、修改超级表 -- [标签管理](https://www.taosdata.com/cn/documentation20/taos-sql/#超级表-STable-中-TAG-管理):增加、删除、修改标签 -- [数据写入](https://www.taosdata.com/cn/documentation20/taos-sql/#数据写入):支持单表单条、多条、多表多条写入,支持历史数据写入 -- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 -- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 -- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 -- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制):TAOS SQL的边界限制 -- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code):TDengine 2.0 错误码以及对应的十进制码 - -## TDengine的技术设计 - -- 系统模块:taosd的功能和模块划分 -- 数据复制:支持实时同步、异步复制,保证系统的High Availibility -- [技术博客](https://www.taosdata.com/cn/blog/?categories=3):更多的技术分析和架构设计文章 - -## 常用工具 - -- [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) - -## TDengine与其他数据库的对比测试 - -- [用InfluxDB开源的性能测试工具对比InfluxDB和TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) -- [TDengine与OpenTSDB对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) -- [TDengine与Cassandra对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) -- [TDengine与InfluxDB对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) -- [TDengine与InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) - -##物联网大数据 - -- [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html) -- [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html) -- [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html) -- [物联网、车联网、工业互联网大数据平台,为什么推荐使用TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html) - -## [培训和FAQ](https://www.taosdata.com/cn/faq) - - - diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 3d57ece2adf4e8efd3338f52787d5d9424016e78..d24502a1cb8e69ddaf3989a89e51cc07dfb55f00 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -26,7 +26,6 @@ else ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 850c636940a34e5a2b58c227e1fd105fb57fa285..36870b2ebe49d45390d7b5ce18f6984b9e8e2ac2 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -51,7 +51,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index e13cad41204c3f28ed678a664bf7b9b7f5f8715d..230741d036a3012db1ea6351a2f30ca4b704d350 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -13,9 +13,8 @@ WORKDIR /root/${dirName}/ RUN /bin/bash install.sh -e no ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" -ENV LANG=en_US.UTF-8 -ENV LANGUAGE=en_US:en -ENV LC_ALL=en_US.UTF-8 +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 CMD ["taosd"] VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index ca2c3c66c9875a697b372f73448aa53deb887f68..b52580cfa6bdc13fcc7b716a85029fd1c679a6b6 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -35,10 +35,11 @@ done echo "verNumber=${verNumber}" -docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber} +#docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber} +docker manifest create -a tdengine/tdengine tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password -docker manifest push tdengine/tdengine:${verNumber} +docker manifest push tdengine/tdengine # how set latest version ??? diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index d20a6c91cdd78a8c54b89b14d7b77807ebef4877..92c917cb3d6d4cd5f41441c9ca75a742aa3641b6 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -9,6 +9,7 @@ Summary: tdengine from taosdata Group: Application/Database License: AGPL URL: www.taosdata.com +AutoReqProv: no #BuildRoot: %_topdir/BUILDROOT BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root @@ -61,7 +62,6 @@ cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepat cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin -cp %{_compiledir}/build/bin/taosdemox %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include @@ -139,7 +139,6 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 9cec9963af6f8abdcaedeeb4e7d40f9244508be2..dca3dd2ff623672eb85b3de72bcc34e0ea5e3d8a 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -147,8 +147,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -168,7 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + + if [[ -e ${script_dir}/email ]]; then + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -176,7 +179,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : @@ -188,7 +190,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : @@ -680,7 +681,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop taosd - kill_taosd + kill_process taosd fi } @@ -749,9 +750,22 @@ function update_TDengine() { elif ((${service_mod}==1)); then ${csudo} service taosd stop || : else - kill_taosd + kill_process taosd + fi + sleep 1 + fi + + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx fi sleep 1 + fi fi install_main_path diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index d52428dc83805b6f03a36bb39edc774e79d04398..0a0a6633e376d084532abb5f490917abd1a173f2 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -86,7 +86,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -98,7 +97,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh index 04fd23d5abb2874d0b3a68b937be529bd2c91a59..8d7463366ff46bcae2822ee3e76dbc9b588f2a89 100755 --- a/packaging/tools/install_client_power.sh +++ b/packaging/tools/install_client_power.sh @@ -86,7 +86,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/power || : if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : fi ${csudo} rm -f ${bin_link_dir}/rmpower || : @@ -98,7 +97,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || : [ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || : fi [ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || : diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 89b5ce5b4f1f9edc91b7abbf459303cd8f632edf..ba6ace400935c10caadd9426c0701c16b4f86baa 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -146,8 +146,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_powerd() { - pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -174,7 +174,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerd || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -185,7 +184,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : [ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || : [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || : [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : @@ -652,7 +650,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop powerd - kill_powerd + kill_process powerd fi } @@ -721,9 +719,21 @@ function update_PowerDB() { elif ((${service_mod}==1)); then ${csudo} service powerd stop || : else - kill_powerd + kill_process powerd fi sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi fi install_main_path diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 52a4e059065ba666e9624800a24f267f74e454f9..30e9fa51a7d9c4a98d2c8f300287ebd242fecd74 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox\ + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \ ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" @@ -55,7 +55,11 @@ else fi header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_client.sh" diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 15f8994e945aac89fc30b1fe7eaaf3c72ea8c105..181536b7f19d252164201291d9c37cade6cf3490 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -54,7 +54,11 @@ else fi header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_client_power.sh" @@ -77,7 +81,6 @@ if [ "$osType" != "Darwin" ]; then cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 267338ed06d528be84d13f4a871865f76b940344..36b1fe5bd88950f69a56e84d98fba9c4dae0cf05 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,13 +36,18 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${build_dir}/bin/tarbitrator\ + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + install_files="${script_dir}/install.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 7227a08b7ab5e6fdfbdb4801fac3d174fe481f1e..554e7884b1c3db69acd3ba0e3234e468b1d31c79 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -42,7 +42,11 @@ fi lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_power.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" @@ -78,7 +82,6 @@ else cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index c6ef73932d8dbaedbe82fdc69997ef092c9953e7..8665b3fec3a392b3dcae8c6a197625ba85ed953b 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -96,7 +96,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -107,7 +106,6 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${bin_dir}/taosdemox ] && ${csudo} ln -s ${bin_dir}/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 8d96ef851c9812dcf7764dd1150350721bd2ec6e..2f2660d44635c86df3b51d2b86e37b3399869a88 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -72,7 +72,6 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index e84cdd2620f7e83ea12af7500c2385892d7072fa..7579162dc60e290754e71ed6a71c10cfaee5537b 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -38,7 +38,6 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh index 1842e86a5b2c55b6c2aac02e6150bc9eaa5836a6..580c46e2077d7f21e06d4d4a8f69dcd5b6bbf51d 100755 --- a/packaging/tools/remove_client_power.sh +++ b/packaging/tools/remove_client_power.sh @@ -38,7 +38,6 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/set_core || : diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh index 59073105de9828bc1255d269bdc773d613500e5a..816869cf444d8001e0c0aae30840d2c40a9e6af4 100755 --- a/packaging/tools/remove_power.sh +++ b/packaging/tools/remove_power.sh @@ -72,7 +72,6 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerd || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 102fea6b9e3f8892d8e8af26c6ee54bdaa948fb8..7b6bfee42bd1a533798365edecacdff528420d73 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.14.0' +version: '2.0.16.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.14.0 + - usr/lib/libtaos.so.2.0.16.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d67aba4b66be6ec2f13e994248c8214178b55663..b0f2cc0a48f906b40d7be5185ae5f081c2ed4418 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) # Base compile diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index bcb37690876462c227b43343d9bd3228d4405963..967635e52ce20761dbd674a380563deeeb9af189 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 660ad564a5cb611da4411b5f23eb494f6255c7af..fb43751b9e8fd715d538abb1198e1bdfd0a2e9ae 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 9a1c87c1a915dbaab2ab529c4b5f7f9cab745d60..60a8ff894ff98fa1b9021c73dbe57c246d88c86e 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -271,7 +271,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); bool hasMoreVnodesToTry(SSqlObj *pSql); bool hasMoreClauseToTry(SSqlObj* pSql); -void tscFreeQueryInfo(SSqlCmd* pCmd); +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta); void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index bdb0173e9c86870446a0d31eed09d5f81d82dbba..0bfbf3f946a4a1428549a493ab990a2be2173266 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -334,7 +334,7 @@ typedef struct SSubqueryState { typedef struct SSqlObj { void *signature; - pthread_t owner; // owner of sql object, by which it is executed + int64_t owner; // owner of sql object, by which it is executed STscObj *pTscObj; int64_t rpcRid; __async_cb_func_t fp; @@ -442,6 +442,8 @@ void tscCloseTscObj(void *pObj); TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, TAOS **taos); TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res); +TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param); + void waitForQueryRsp(void *param, TAOS_RES *tres, int code); void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen); @@ -449,6 +451,9 @@ void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *para void tscImportDataFromFile(SSqlObj *pSql); void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); bool tscIsUpdateQuery(SSqlObj* pSql); +char* tscGetSqlStr(SSqlObj* pSql); +bool tscIsQueryWithLimit(SSqlObj* pSql); + bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes); char *tscGetErrorMsgPayload(SSqlCmd *pCmd); diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index dbf312e3f5f9b0ee45d3a1d5466cb1b4be870f37..5cba897b3028a2d6301e9363ca6180f6b41022f8 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -74,12 +74,16 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para // TODO return the correct error code to client in tscQueueAsyncError void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) { + taos_query_ra(taos, sqlstr, fp, param); +} + +TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { tscError("bug!!! pObj:%p", pObj); terrno = TSDB_CODE_TSC_DISCONNECTED; tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED); - return; + return NULL; } int32_t sqlLen = (int32_t)strlen(sqlstr); @@ -87,7 +91,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa tscError("sql string exceeds max length:%d", tsMaxSQLStringLen); terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT; tscQueueAsyncError(fp, param, terrno); - return; + return NULL; } nPrintTsc("%s", sqlstr); @@ -96,12 +100,15 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa if (pSql == NULL) { tscError("failed to malloc sqlObj"); tscQueueAsyncError(fp, param, TSDB_CODE_TSC_OUT_OF_MEMORY); - return; + return NULL; } doAsyncQuery(pObj, pSql, fp, param, sqlstr, sqlLen); + + return pSql; } + static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { if (tres == NULL) { return; @@ -273,14 +280,15 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { taosScheduleTask(tscQhandle, &schedMsg); } -void tscAsyncResultOnError(SSqlObj *pSql) { +static void tscAsyncResultCallback(SSchedMsg *pMsg) { + SSqlObj* pSql = pMsg->ahandle; if (pSql == NULL || pSql->signature != pSql) { tscDebug("%p SqlObj is freed, not add into queue async res", pSql); return; } assert(pSql->res.code != TSDB_CODE_SUCCESS); - tscError("%p invoke user specified function due to error occured, code:%s", pSql, tstrerror(pSql->res.code)); + tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code)); SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ @@ -291,6 +299,16 @@ void tscAsyncResultOnError(SSqlObj *pSql) { (*pSql->fp)(pSql->param, pSql, pRes->code); } +void tscAsyncResultOnError(SSqlObj* pSql) { + SSchedMsg schedMsg = {0}; + schedMsg.fp = tscAsyncResultCallback; + schedMsg.ahandle = pSql; + schedMsg.thandle = (void *)1; + schedMsg.msg = 0; + taosScheduleTask(tscQhandle, &schedMsg); +} + + int tscSendMsgToServer(SSqlObj *pSql); void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index bb015bce3d1a8ea9d92f3915d1edccd7f31539a3..820572859e88540656b8ce42dc3a6e77467c1423 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -892,7 +892,12 @@ int tscProcessLocalCmd(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; if (pCmd->command == TSDB_SQL_CFG_LOCAL) { - pRes->code = (uint8_t)taosCfgDynamicOptions(pCmd->payload); + if (taosCfgDynamicOptions(pCmd->payload)) { + pRes->code = TSDB_CODE_SUCCESS; + } else { + pRes->code = TSDB_CODE_COM_INVALID_CFG_MSG; + } + pRes->numOfRows = 0; } else if (pCmd->command == TSDB_SQL_DESCRIBE_TABLE) { pRes->code = (uint8_t)tscProcessDescribeTable(pSql); } else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index f813ff85d99e6642827a49defe6b96f29720fc57..9203dcfbbab8d4b512b490bf13ab91fe1b475c22 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -233,6 +233,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { // We extract the lock to tscBuildHeartBeatMsg function. + int64_t now = taosGetTimestampMs(); SSqlObj *pSql = pObj->sqlList; while (pSql) { /* @@ -247,7 +248,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql)); pQdesc->stime = htobe64(pSql->stime); pQdesc->queryId = htonl(pSql->queryId); - pQdesc->useconds = htobe64(pSql->res.useconds); + //pQdesc->useconds = htobe64(pSql->res.useconds); + pQdesc->useconds = htobe64(now - pSql->stime); pQdesc->qHandle = htobe64(pSql->res.qhandle); pHeartbeat->numOfQueries++; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 5fed6d6df3c38b1887a830b7d6b9425095a39261..37fae6b7f56a12c5f5771211d69ae0ccc6d889cc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -128,6 +128,7 @@ static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +static bool validateDebugFlag(int32_t flag); int16_t getNewResColId(SQueryInfo* pQueryInfo) { return pQueryInfo->resColumnId--; @@ -172,6 +173,16 @@ static uint8_t convertOptr(SStrToken *pToken) { } } +static bool validateDebugFlag(int32_t v) { + const static int validFlag[] = {131, 135, 143}; + + for (int i = 0; i < tListLen(validFlag); i++) { + if (v == validFlag[i]) { + return true; + } + } + return false; +} /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. @@ -564,16 +575,16 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a); - SStrToken* t = taosArrayGet(pMiscInfo->a, 0); - SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1); + assert(numOfToken >= 1 && numOfToken <= 2); + SStrToken* t = taosArrayGet(pMiscInfo->a, 0); strncpy(pCmd->payload, t->z, t->n); if (numOfToken == 2) { + SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1); pCmd->payload[t->n] = ' '; // add sep strncpy(&pCmd->payload[t->n + 1], t1->z, t1->n); } - - break; + return TSDB_CODE_SUCCESS; } case TSDB_SQL_CREATE_TABLE: { @@ -1132,7 +1143,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { return false; } - if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_NCHAR)) { + if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_UBIGINT)) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } @@ -1196,7 +1207,7 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { return false; } - if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_NCHAR) { + if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_UBIGINT) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } @@ -1872,6 +1883,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg6 = "function applied to tags not allowed"; const char* msg7 = "normal table can not apply this function"; const char* msg8 = "multi-columns selection does not support alias column name"; + const char* msg10 = "diff can no be applied to unsigned numeric type"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -1994,6 +2006,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } + if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -2005,6 +2018,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (!IS_NUMERIC_TYPE(colType)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } else if (IS_UNSIGNED_NUMERIC_TYPE(colType) && functionId == TSDB_FUNC_DIFF) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); } int16_t resultType = 0; @@ -5270,13 +5285,15 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}}; + SStrToken* pOptionToken = taosArrayGet(pOptions->a, 0); if (numOfToken == 1) { // reset log does not need value for (int32_t i = 0; i < 1; ++i) { SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i]; - if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { + if ((pOption->len == pOptionToken->n) && + (strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0)) { return TSDB_CODE_SUCCESS; } } @@ -5284,15 +5301,14 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { SStrToken* pValToken = taosArrayGet(pOptions->a, 1); int32_t val = strtol(pValToken->z, NULL, 10); - if (val < 131 || val > 199) { - // options value is out of valid range + if (!validateDebugFlag(val)) { return TSDB_CODE_TSC_INVALID_SQL; } for (int32_t i = 1; i < tListLen(LOCAL_DYNAMIC_CFG_OPTIONS); ++i) { SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i]; - if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { - // options is valid + if ((pOption->len == pOptionToken->n) + && (strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0)) { return TSDB_CODE_SUCCESS; } } @@ -6039,8 +6055,8 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ index = 2; } else { for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { - if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && - functionsInfo[i].len == pExpr->operand.n) { + if (strncasecmp(functionsInfo[i].name, pExpr->token.z, functionsInfo[i].len) == 0 && + functionsInfo[i].len == pExpr->token.n) { index = i; break; } @@ -6277,16 +6293,14 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { // get table meta from mnode code = tNameExtractFullName(&pStableMetaInfo->name, pCreateTableInfo->tagdata.name); - SArray* pList = pCreateTableInfo->pTagVals; + SArray* pValList = pCreateTableInfo->pTagVals; code = tscGetTableMeta(pSql, pStableMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - size_t size = taosArrayGetSize(pList); - if (tscGetNumOfTags(pStableMetaInfo->pTableMeta) != size) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); - } + size_t valSize = taosArrayGetSize(pValList); + // too long tag values will return invalid sql, not be truncated automatically SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta); @@ -6297,36 +6311,115 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } + + SArray* pNameList = NULL; + size_t nameSize = 0; + int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta); int32_t ret = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < size; ++i) { - SSchema* pSchema = &pTagSchema[i]; - tVariantListItem* pItem = taosArrayGet(pList, i); - char tagVal[TSDB_MAX_TAGS_LEN]; - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - if (pItem->pVar.nLen > pSchema->bytes) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } + if (pCreateTableInfo->pTagNames) { + pNameList = pCreateTableInfo->pTagNames; + nameSize = taosArrayGetSize(pNameList); + + if (valSize != nameSize) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + if (schemaSize < valSize) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + bool findColumnIndex = false; + + for (int32_t i = 0; i < nameSize; ++i) { + SStrToken* sToken = taosArrayGet(pNameList, i); + if (TK_STRING == sToken->type) { + tscDequoteAndTrimToken(sToken); + } + + tVariantListItem* pItem = taosArrayGet(pValList, i); + + findColumnIndex = false; + + // todo speedup by using hash list + for (int32_t t = 0; t < schemaSize; ++t) { + if (strncmp(sToken->z, pTagSchema[t].name, sToken->n) == 0 && strlen(pTagSchema[t].name) == sToken->n) { + SSchema* pSchema = &pTagSchema[t]; - // check again after the convert since it may be converted from binary to nchar. - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - int16_t len = varDataTLen(tagVal); - if (len > pSchema->bytes) { + char tagVal[TSDB_MAX_TAGS_LEN]; + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + if (pItem->pVar.nLen > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + + // check again after the convert since it may be converted from binary to nchar. + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + int16_t len = varDataTLen(tagVal); + if (len > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + + findColumnIndex = true; + break; + } + } + + if (!findColumnIndex) { tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z); } } - - if (ret != TSDB_CODE_SUCCESS) { + } else { + if (schemaSize != valSize) { tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + for (int32_t i = 0; i < valSize; ++i) { + SSchema* pSchema = &pTagSchema[i]; + tVariantListItem* pItem = taosArrayGet(pValList, i); + + char tagVal[TSDB_MAX_TAGS_LEN]; + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + if (pItem->pVar.nLen > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + + // check again after the convert since it may be converted from binary to nchar. + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + int16_t len = varDataTLen(tagVal); + if (len > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + } } SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 444bdc025d0b06b9be96e53f761373b5454d9a35..d243e797c2c8c8471478f0b1c87b69bab9f0392b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -330,7 +330,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pSql->cmd.submitSchema = 1; } - if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && + if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || @@ -418,6 +418,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { bool shouldFree = tscShouldBeFreed(pSql); if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + if (rpcMsg->code != TSDB_CODE_SUCCESS) { + pRes->code = rpcMsg->code; + } rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code; (*pSql->fp)(pSql->param, pSql, rpcMsg->code); } @@ -451,7 +454,7 @@ int doProcessSql(SSqlObj *pSql) { if (pRes->code != TSDB_CODE_SUCCESS) { tscAsyncResultOnError(pSql); - return pRes->code; + return TSDB_CODE_SUCCESS; } int32_t code = tscSendMsgToServer(pSql); @@ -460,7 +463,7 @@ int doProcessSql(SSqlObj *pSql) { if (code != TSDB_CODE_SUCCESS) { pRes->code = code; tscAsyncResultOnError(pSql); - return code; + return TSDB_CODE_SUCCESS; } return TSDB_CODE_SUCCESS; @@ -607,7 +610,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { } return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + - tableSerialize + sqlLen + 4096; + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { @@ -812,6 +815,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; @@ -875,6 +885,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // this should be switched to projection query if (pExpr != NULL) { + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; @@ -1809,7 +1826,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { pMetaMsg->uid = htobe64(pMetaMsg->uid); pMetaMsg->contLen = htons(pMetaMsg->contLen); pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns); - + if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) && (pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) { tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId, diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8a240accecc904675bd305c1966891bbfc62916a..13539a9b197875210d76c86bd93ec986190c0c37 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -52,7 +52,9 @@ static bool validPassword(const char* passwd) { static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, TAOS **taos) { - taos_init(); + if (taos_init()) { + return NULL; + } if (!validUserName(user)) { terrno = TSDB_CODE_TSC_INVALID_USER_LENGTH; @@ -696,7 +698,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) { } tscAsyncResultOnError(pSubObj); - taosReleaseRef(tscObjRef, pSubObj->self); + // taosRelekaseRef(tscObjRef, pSubObj->self); } if (pSql->subState.numOfSub <= 0) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b46829aedadd43ae42fb2d15c43fbf1ce0ced824..c00b1300384ff9ce2dc4c42b7c603e58a245d6ba 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -95,11 +95,21 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { pthread_mutex_lock(&subState->mutex); + bool done = allSubqueryDone(pParentSql); + + if (done) { + tscDebug("%p subquery:%p,%d all subs already done", pParentSql, pSql, idx); + + pthread_mutex_unlock(&subState->mutex); + + return false; + } + tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx); subState->states[idx] = 1; - bool done = allSubqueryDone(pParentSql); + done = allSubqueryDone(pParentSql); pthread_mutex_unlock(&subState->mutex); @@ -674,10 +684,14 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr prev = tt; } - pTableMetaInfo->pVgroupTables = result; pTableMetaInfo->vgroupIndex = 0; + + if (taosArrayGetSize(result) <= 0) { + pTableMetaInfo->pVgroupTables = NULL; + taosArrayDestroy(result); + } else { + pTableMetaInfo->pVgroupTables = result; - if (taosArrayGetSize(result) > 0) { SVgroupTableInfo* g = taosArrayGet(result, taosArrayGetSize(result) - 1); tscDebug("%p vgId:%d, tables:%"PRIzu, pSql, g->vgInfo.vgId, taosArrayGetSize(g->itemList)); } @@ -1841,7 +1855,7 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S TSKEY key = INT64_MIN; for(int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { continue; } @@ -1974,7 +1988,8 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { // set the parameters for the second round query process SSqlCmd *pPCmd = &pParent->cmd; SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0); - + int32_t resRows = pSup->numOfRows; + if (pSup->numOfRows > 0) { SBufferWriter bw = tbufInitWriter(NULL, false); interResToBinary(&bw, pSup->pResult, pSup->tagLen); @@ -1992,6 +2007,12 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { taos_free_result(pSql); + if (resRows == 0) { + pParent->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParent->fp)(pParent->param, pParent, 0); + return; + } + pQueryInfo1->round = 1; tscDoQuery(pParent); } @@ -2093,7 +2114,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG); p->resColId = pExpr->resColId; } else if (pExpr->functionId == TSDB_FUNC_PRJ) { - int32_t num = taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); + int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); for(int32_t k = 0; k < num; ++k) { SColIndex* pIndex = taosArrayGet(pNewQueryInfo->groupbyExpr.columnInfo, k); if (pExpr->colInfo.colId == pIndex->colId) { @@ -2302,7 +2323,9 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ -static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) { +static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code, int32_t *sent) { + *sent = 0; + SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport)); if (trsupport == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2334,21 +2357,28 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", - trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex); + oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); pParentSql->res.code = terrno; - trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + tfree(trsupport); return pParentSql->res.code; } int32_t ret = tscProcessSql(pNew); + *sent = 1; + // if failed to process sql, let following code handle the pSql if (ret == TSDB_CODE_SUCCESS) { + tscFreeRetrieveSup(pSql); taos_free_result(pSql); return ret; - } else { + } else { + pParentSql->pSubs[trsupport->subqueryIndex] = pSql; + tscFreeRetrieveSup(pNew); + taos_free_result(pNew); return ret; } } @@ -2385,7 +2415,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO subqueryIndex, tstrerror(pParentSql->res.code)); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { // reach the maximum retry count, abort @@ -2507,7 +2540,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SRetrieveSupport *trsupport = (SRetrieveSupport *)param; if (pSql->param == NULL || param == NULL) { tscDebug("%p already freed in dnodecallback", pSql); - assert(pSql->res.code == TSDB_CODE_TSC_QUERY_CANCELLED); return; } @@ -2539,7 +2571,10 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { @@ -2666,7 +2701,11 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) { + + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, code, &sent); + if (sent) { return; } } else { @@ -2771,7 +2810,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) numOfFailed += 1; // clean up tableMeta in cache - tscFreeQueryInfo(&pSql->cmd); + tscFreeQueryInfo(&pSql->cmd, false); SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0); STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0); tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 52ede2318fbe62eafbfe763a03aa4c24c7e72983..bd79f81846cbfd730fd9b3134b89ba0b328472a1 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,10 +47,11 @@ void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +static volatile int tscInitRes = 0; void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosGetDisk(); - taosTmrReset(tscCheckDiskUsage, 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); + taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } void tscFreeRpcObj(void *param) { assert(param); @@ -64,7 +65,7 @@ void tscReleaseRpc(void *param) { return; } pthread_mutex_lock(&rpcObjMutex); - taosCacheRelease(tscRpcCache, (void *)¶m, true); + taosCacheRelease(tscRpcCache, (void *)¶m, false); pthread_mutex_unlock(&rpcObjMutex); } @@ -82,7 +83,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry memset(&rpcInit, 0, sizeof(rpcInit)); rpcInit.localPort = 0; rpcInit.label = "TSC"; - rpcInit.numOfThreads = tscNumOfThreads * 2; + rpcInit.numOfThreads = tscNumOfThreads; rpcInit.cfp = tscProcessMsgFromServer; rpcInit.sessions = tsMaxConnections; rpcInit.connType = TAOS_CONN_CLIENT; @@ -101,7 +102,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry tscError("failed to init connection to TDengine"); return -1; } - pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*10); + pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5); if (pRpcObj == NULL) { rpcClose(rpcObj.pDnodeConn); pthread_mutex_unlock(&rpcObjMutex); @@ -137,7 +138,11 @@ void taos_init_imp(void) { } taosReadGlobalCfg(); - taosCheckGlobalCfg(); + if (taosCheckGlobalCfg()) { + tscInitRes = -1; + return; + } + taosInitNotes(); rpcInit(); @@ -154,16 +159,16 @@ void taos_init_imp(void) { if (tscNumOfThreads < 2) { tscNumOfThreads = 2; } - tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { tscError("failed to init scheduler"); + tscInitRes = -1; return; } tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC"); if(0 == tscEmbedded){ - taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr); + taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } if (tscTableMetaInfo == NULL) { @@ -186,7 +191,7 @@ void taos_init_imp(void) { tscDebug("client is initialized successfully"); } -void taos_init() { pthread_once(&tscinit, taos_init_imp); } +int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;} // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c5b84a6ad67728f132aa680205a353dc0c33296..8fd4e6e6f1f143f8569b77c4273e2f1ffe51710a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -30,7 +30,7 @@ #include "ttokendef.h" static void freeQueryInfoImpl(SQueryInfo* pQueryInfo); -static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo); +static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta); static void tscStrToLower(char *str, int32_t n) { if (str == NULL || n <= 0) { return;} @@ -383,7 +383,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } -void tscFreeQueryInfo(SSqlCmd* pCmd) { +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) { if (pCmd == NULL || pCmd->numOfClause == 0) { return; } @@ -392,7 +392,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i); freeQueryInfoImpl(pQueryInfo); - clearAllTableMetaInfo(pQueryInfo); + clearAllTableMetaInfo(pQueryInfo, removeMeta); tfree(pQueryInfo); } @@ -420,7 +420,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) { pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta); pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); - tscFreeQueryInfo(pCmd); + tscFreeQueryInfo(pCmd, removeMeta); } void tscFreeSqlResult(SSqlObj* pSql) { @@ -1868,10 +1868,17 @@ SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) { return pa; } -void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) { +void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) { for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); + if (removeMeta) { + char name[TSDB_TABLE_FNAME_LEN] = {0}; + tNameExtractFullName(&pTableMetaInfo->name, name); + + taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); + } + tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); tscClearTableMetaInfo(pTableMetaInfo); free(pTableMetaInfo); @@ -2078,6 +2085,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->parseFinished = 1; pnCmd->pTableNameList = NULL; pnCmd->pTableBlockHashList = NULL; + pnCmd->tagData.data = NULL; + pnCmd->tagData.dataLen = 0; if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2329,6 +2338,35 @@ bool tscIsUpdateQuery(SSqlObj* pSql) { return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command); } +char* tscGetSqlStr(SSqlObj* pSql) { + if (pSql == NULL || pSql->signature != pSql) { + return NULL; + } + + return pSql->sqlstr; +} + +bool tscIsQueryWithLimit(SSqlObj* pSql) { + if (pSql == NULL || pSql->signature != pSql) { + return false; + } + + SSqlCmd* pCmd = &pSql->cmd; + for (int32_t i = 0; i < pCmd->numOfClause; ++i) { + SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i); + if (pqi == NULL) { + continue; + } + + if (pqi->limit.limit > 0) { + return true; + } + } + + return false; +} + + int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql) { const char* msgFormat1 = "syntax error near \'%s\'"; const char* msgFormat2 = "syntax error near \'%s\' (%s)"; @@ -2568,11 +2606,7 @@ bool tscSetSqlOwner(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; // set the sql object owner -#ifdef __APPLE__ - pthread_t threadId = (pthread_t)taosGetSelfPthreadId(); -#else // __APPLE__ - uint64_t threadId = taosGetSelfPthreadId(); -#endif // __APPLE__ + int64_t threadId = taosGetSelfPthreadId(); if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) { pRes->code = TSDB_CODE_QRY_IN_EXEC; return false; @@ -2582,7 +2616,6 @@ bool tscSetSqlOwner(SSqlObj* pSql) { } void tscClearSqlOwner(SSqlObj* pSql) { - assert(taosCheckPthreadValid(pSql->owner)); atomic_store_64(&pSql->owner, 0); } @@ -2709,7 +2742,11 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild) { uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) { assert(pTableMeta != NULL); - int32_t totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; + int32_t totalCols = 0; + if (pTableMeta->tableInfo.numOfColumns >= 0 && pTableMeta->tableInfo.numOfTags >= 0) { + totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; + } + return sizeof(STableMeta) + totalCols * sizeof(SSchema); } diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt index 4ea0b80bf7c1c870532f1bc3cac313c43f1a57f0..f07af85e255eaa5e285d9a4ce0853251e0fdaa21 100644 --- a/src/client/tests/CMakeLists.txt +++ b/src/client/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index df0ac7986599da3e53d97f64c0c5113a357f9177..0da7bda994db83882e36e9d52a7983635ad85330 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index ed5ebaa80f590c993a03f7529561d4b9244a6b42..959654d1585b13c31e96ab7972d0aa2782a51901 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -134,6 +134,22 @@ typedef uint64_t TKEY; #define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key))) #define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1)) +#define MIN_TS_KEY ((TSKEY)0x8000000000000001) +#define MAX_TS_KEY ((TSKEY)0x3fffffffffffffff) + +#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key)) + +static FORCE_INLINE TKEY keyToTkey(TSKEY key) { + TSKEY lkey = key; + if (key > MAX_TS_KEY) { + lkey = MAX_TS_KEY; + } else if (key < MIN_TS_KEY) { + lkey = MIN_TS_KEY; + } + + return tdGetTKEY(lkey); +} + static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { TSKEY key1 = tdGetKey(*(TKEY *)tkey1); TSKEY key2 = tdGetKey(*(TKEY *)tkey2); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 3d88cf83127672f8f5780da695e24cee4850c035..4ed4e0473bdf3c11c2838842b24c15d886a02f2e 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -47,7 +47,7 @@ char tsEmail[TSDB_FQDN_LEN] = {0}; // common int32_t tsRpcTimer = 1000; int32_t tsRpcMaxTime = 600; // seconds; -int32_t tsMaxShellConns = 5000; +int32_t tsMaxShellConns = 50000; int32_t tsMaxConnections = 5000; int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; @@ -273,7 +273,7 @@ bool taosCfgDynamicOptions(char *msg) { int32_t vint = 0; paGetToken(msg, &option, &olen); - if (olen == 0) return TSDB_CODE_COM_INVALID_CFG_MSG; + if (olen == 0) return false;; paGetToken(option + olen + 1, &value, &vlen); if (vlen == 0) @@ -316,11 +316,9 @@ bool taosCfgDynamicOptions(char *msg) { } return true; } - if (strncasecmp(cfg->option, "debugFlag", olen) == 0) { - taosSetAllDebugFlag(); + taosSetAllDebugFlag(); } - return true; } @@ -375,6 +373,23 @@ static void taosCheckDataDirCfg() { } } +static int32_t taosCheckTmpDir(void) { + if (strlen(tsTempDir) <= 0){ + uError("tempDir is not set"); + return -1; + } + + DIR *dir = opendir(tsTempDir); + if (dir == NULL) { + uError("can not open tempDir:%s, error:%s", tsTempDir, strerror(errno)); + return -1; + } + + closedir(dir); + + return 0; +} + static void doInitGlobalConfig(void) { osInit(); srand(taosSafeRand()); @@ -415,10 +430,10 @@ static void doInitGlobalConfig(void) { // port cfg.option = "serverPort"; cfg.ptr = &tsServerPort; - cfg.valType = TAOS_CFG_VTYPE_INT16; + cfg.valType = TAOS_CFG_VTYPE_UINT16; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 1; - cfg.maxValue = 65535; + cfg.maxValue = 65056; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -1136,7 +1151,7 @@ static void doInitGlobalConfig(void) { cfg.ptr = &tsHttpMaxThreads; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; - cfg.minValue = 1; + cfg.minValue = 2; cfg.maxValue = 1000000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; @@ -1490,6 +1505,11 @@ int32_t taosCheckGlobalCfg() { } taosCheckDataDirCfg(); + + if (taosCheckTmpDir()) { + return -1; + } + taosGetSystemInfo(); tsSetLocale(); @@ -1503,6 +1523,13 @@ int32_t taosCheckGlobalCfg() { tsNumOfCores = 1; } + if (tsHttpMaxThreads == 2) { + int32_t halfNumOfCores = tsNumOfCores >> 1; + if (halfNumOfCores > 2) { + tsHttpMaxThreads = halfNumOfCores; + } + } + if (tsMaxTablePerVnode < tsMinTablePerVnode) { uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode); @@ -1535,6 +1562,8 @@ int32_t taosCheckGlobalCfg() { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } + uInfo(" check global cfg completed"); + uInfo("=================================="); taosPrintGlobalCfg(); return 0; diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 59b09c8695d59c1fd0584f73a7e4be1eb1ab1c0b..3c50ac566b5fe49a619a50c9eac446285a2b431c 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.21-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 34b0a3c6d37530ad01600a455d426c1606613c23..1c24b621efe52e152ac8ea70ef3c2afdbc72d5b0 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.21 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 0626bcf1fb1161cc38ad05002b984b2200b68d68..7a421eff2296cc8b61ec6e07174747ef8e224ff0 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.21 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -34,23 +34,9 @@ UTF-8 1.8 3.6.0 - 1.1.2 - 3.5 - - commons-logging - commons-logging - ${commons-logging.version} - - - * - * - - - - junit junit @@ -64,25 +50,12 @@ httpclient 4.5.8 - - org.apache.commons - commons-lang3 - 3.9 - com.alibaba fastjson 1.2.58 - - - org.apache.commons - commons-dbcp2 - 2.7.0 - - - @@ -130,8 +103,11 @@ **/AppMemoryLeakTest.java + **/AuthenticationTest.java **/TaosInfoMonitorTest.java **/FailOverTest.java + **/InvalidResultSetPointerTest.java + **/RestfulConnectionTest.java true diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java new file mode 100644 index 0000000000000000000000000000000000000000..a2496b0099fe27c6380b1e2bac954ac361c939ba --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -0,0 +1,530 @@ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.Enumeration; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.*; + +public abstract class AbstractConnection extends WrapperImpl implements Connection { + + protected volatile boolean isClosed; + protected volatile String catalog; + protected volatile Properties clientInfoProps = new Properties(); + + @Override + public abstract Statement createStatement() throws SQLException; + + @Override + public abstract PreparedStatement prepareStatement(String sql) throws SQLException; + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + // do nothing + return sql; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + //do nothing + } + + @Override + public boolean getAutoCommit() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return true; + } + + @Override + public void commit() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public void rollback() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public abstract void close() throws SQLException; + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public abstract DatabaseMetaData getMetaData() throws SQLException; + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + //do nothing + } + + @Override + public boolean isReadOnly() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + return true; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + /* + try (Statement stmt = createStatement()) { + boolean execute = stmt.execute("use " + catalog); + if (execute) + this.catalog = catalog; + } catch (SQLException e) { + // do nothing + } + */ + + this.catalog = catalog; + } + + @Override + public String getCatalog() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return this.catalog; + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (level) { + case Connection.TRANSACTION_NONE: + break; + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Connection.TRANSACTION_SERIALIZABLE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + //do nothing + } + + @Override + public int getTransactionIsolation() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return Connection.TRANSACTION_NONE; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return null; + } + + @Override + public void clearWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetType) { + case ResultSet.TYPE_FORWARD_ONLY: + break; + case ResultSet.TYPE_SCROLL_INSENSITIVE: + case ResultSet.TYPE_SCROLL_SENSITIVE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + switch (resultSetConcurrency) { + case ResultSet.CONCUR_READ_ONLY: + break; + case ResultSet.CONCUR_UPDATABLE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + return createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetType) { + case ResultSet.TYPE_FORWARD_ONLY: + break; + case ResultSet.TYPE_SCROLL_INSENSITIVE: + case ResultSet.TYPE_SCROLL_SENSITIVE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + switch (resultSetConcurrency) { + case ResultSet.CONCUR_READ_ONLY: + break; + case ResultSet.CONCUR_UPDATABLE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + return prepareStatement(sql); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Map> getTypeMap() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (holdability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + //do nothing + } + + @Override + public int getHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetHoldability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + return createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetHoldability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + return prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (autoGeneratedKeys) { + case Statement.RETURN_GENERATED_KEYS: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + case Statement.NO_GENERATED_KEYS: + break; + } + return prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob createClob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob createBlob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob createNClob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + //true if the connection is valid, false otherwise + if (isClosed()) + return false; + if (timeout < 0) //SQLException - if the value supplied for timeout is less then 0 + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + + ExecutorService executor = Executors.newCachedThreadPool(); + Future future = executor.submit(() -> { + int status; + try (Statement stmt = createStatement()) { + ResultSet resultSet = stmt.executeQuery("select server_status()"); + resultSet.next(); + status = resultSet.getInt("server_status()"); + resultSet.close(); + } + return status == 1 ? true : false; + }); + + boolean status = false; + try { + if (timeout == 0) + status = future.get(); + else + status = future.get(timeout, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } catch (TimeoutException e) { + future.cancel(true); + status = false; + } finally { + executor.shutdownNow(); + } + return status; + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + if (isClosed) + throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); + + if (clientInfoProps == null) + clientInfoProps = new Properties(); + clientInfoProps.setProperty(name, value); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + if (isClosed) + throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); + + + for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { + String name = (String) enumer.nextElement(); + clientInfoProps.put(name, properties.getProperty(name)); + } + } + + @Override + public String getClientInfo(String name) throws SQLException { + if (isClosed) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return clientInfoProps.getProperty(name); + } + + @Override + public Properties getClientInfo() throws SQLException { + if (isClosed) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return clientInfoProps; + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setSchema(String schema) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + //do nothing + } + + @Override + public String getSchema() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return null; + } + + @Override + public void abort(Executor executor) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + if (milliseconds < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getNetworkTimeout() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java index 08414d05e9f8b03582ac1257e6c460c05522f57e..5dcaa77ebd4a15087785a6a9b642b85f160f5287 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java @@ -4,7 +4,7 @@ import java.sql.*; import java.util.ArrayList; import java.util.List; -public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrapper { +public abstract class AbstractDatabaseMetaData extends WrapperImpl implements DatabaseMetaData { private final static String PRODUCT_NAME = "TDengine"; private final static String PRODUCT_VESION = "2.0.x.x"; @@ -981,9 +981,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap return getEmptyResultSet(); } - public Connection getConnection() throws SQLException { - return null; - } + public abstract Connection getConnection() throws SQLException; public boolean supportsSavepoints() throws SQLException { return false; @@ -1067,6 +1065,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap } public ResultSet getClientInfoProperties() throws SQLException { + //TODO: see https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#setClientInfo-java.lang.String-java.lang.String- return getEmptyResultSet(); } @@ -1093,20 +1092,6 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap return new EmptyResultSet(); } - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - protected ResultSet getCatalogs(Connection conn) throws SQLException { try (Statement stmt = conn.createStatement()) { DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java similarity index 99% rename from src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java rename to src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java index f864788bfffc8bdfefb0b91ec645a10ae8eec843..21bf8e7a932b0515d77c8f124eae2d0a4596b3b6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java @@ -8,7 +8,7 @@ import java.util.List; import java.util.Properties; import java.util.StringTokenizer; -public abstract class AbstractTaosDriver implements Driver { +public abstract class AbstractDriver implements Driver { private static final String TAOS_CFG_FILENAME = "taos.cfg"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java new file mode 100644 index 0000000000000000000000000000000000000000..14bd2929f17e344381510897528bc479c50a4d36 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -0,0 +1,1210 @@ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; +import java.util.Map; + +public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { + private int fetchSize; + + @Override + public abstract boolean next() throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public boolean wasNull() throws SQLException { + return false; + } + + @Override + public abstract String getString(int columnIndex) throws SQLException; + + @Override + public abstract boolean getBoolean(int columnIndex) throws SQLException; + + @Override + public byte getByte(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract short getShort(int columnIndex) throws SQLException; + + @Override + public abstract int getInt(int columnIndex) throws SQLException; + + @Override + public abstract long getLong(int columnIndex) throws SQLException; + + @Override + public abstract float getFloat(int columnIndex) throws SQLException; + + @Override + public abstract double getDouble(int columnIndex) throws SQLException; + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Timestamp getTimestamp(int columnIndex) throws SQLException; + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(findColumn(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(findColumn(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(findColumn(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(findColumn(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(findColumn(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(findColumn(columnLabel)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return getAsciiStream(findColumn(columnLabel)); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return getUnicodeStream(findColumn(columnLabel)); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return getBinaryStream(findColumn(columnLabel)); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + return null; + } + + @Override + public void clearWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + } + + @Override + public String getCursorName() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract ResultSetMetaData getMetaData() throws SQLException; + + @Override + public Object getObject(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(findColumn(columnLabel)); + } + + @Override + public abstract int findColumn(String columnLabel) throws SQLException; + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return getCharacterStream(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public abstract boolean isBeforeFirst() throws SQLException; + + @Override + public abstract boolean isAfterLast() throws SQLException; + + @Override + public abstract boolean isFirst() throws SQLException; + + @Override + public abstract boolean isLast() throws SQLException; + + @Override + public abstract void beforeFirst() throws SQLException; + + @Override + public abstract void afterLast() throws SQLException; + + @Override + public abstract boolean first() throws SQLException; + + @Override + public abstract boolean last() throws SQLException; + + @Override + public abstract int getRow() throws SQLException; + + @Override + public abstract boolean absolute(int row) throws SQLException; + + @Override + public abstract boolean relative(int rows) throws SQLException; + + @Override + public abstract boolean previous() throws SQLException; + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return this.fetchSize; + } + + @Override + public int getType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowInserted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowDeleted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void insertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void deleteRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void refreshRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void cancelRowUpdates() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToInsertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToCurrentRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Statement getStatement() throws SQLException; + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract String getNString(int columnIndex) throws SQLException; + + @Override + public String getNString(String columnLabel) throws SQLException { + return getNString(findColumn(columnLabel)); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java new file mode 100644 index 0000000000000000000000000000000000000000..8b6c074d1b859f179aac04a83cafb24cfae7c190 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -0,0 +1,326 @@ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; + +public abstract class AbstractStatement extends WrapperImpl implements Statement { + + protected List batchedArgs; + private int fetchSize; + + @Override + public abstract ResultSet executeQuery(String sql) throws SQLException; + + @Override + public abstract int executeUpdate(String sql) throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public int getMaxFieldSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return TSDBConstants.maxFieldSize; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // nothing to do + } + + @Override + public int getMaxRows() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // do nothing + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // do nothing + } + + @Override + public int getQueryTimeout() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (seconds < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + @Override + public void cancel() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + // nothing to do + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + @Override + public void setCursorName(String name) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract boolean execute(String sql) throws SQLException; + + @Override + public abstract ResultSet getResultSet() throws SQLException; + + @Override + public abstract int getUpdateCount() throws SQLException; + + @Override + public boolean getMoreResults() throws SQLException { + return getMoreResults(CLOSE_CURRENT_RESULT); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + switch (direction) { + case ResultSet.FETCH_FORWARD: + case ResultSet.FETCH_REVERSE: + case ResultSet.FETCH_UNKNOWN: + break; + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.fetchSize; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public void addBatch(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + if (batchedArgs == null) { + batchedArgs = new ArrayList<>(); + } + batchedArgs.add(sql); + } + + @Override + public void clearBatch() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs != null) + batchedArgs.clear(); + } + + @Override + public int[] executeBatch() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs == null || batchedArgs.isEmpty()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY); + + int[] res = new int[batchedArgs.size()]; + for (int i = 0; i < batchedArgs.size(); i++) { + boolean isSelect = execute(batchedArgs.get(i)); + if (isSelect) { + res[i] = SUCCESS_NO_INFO; + } else { + res[i] = getUpdateCount(); + } + } + return res; + } + + @Override + public abstract Connection getConnection() throws SQLException; + + @Override + public boolean getMoreResults(int current) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + switch (current) { + case Statement.CLOSE_CURRENT_RESULT: + return false; + case Statement.KEEP_CURRENT_RESULT: + case Statement.CLOSE_ALL_RESULTS: + break; + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getResultSetHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void setPoolable(boolean poolable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // do nothing + } + + @Override + public boolean isPoolable() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // do nothing + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return false; + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java index f82c064e751c195eb6327580c285a815346c917b..f6a0fcca316cb07d28c71a4e1d51d9405de083ba 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -160,12 +160,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Date getDate(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public Time getTime(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -176,17 +176,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -256,22 +256,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -281,7 +281,7 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -308,17 +308,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { return colMetaData.getColIndex() + 1; } } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } @Override public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -353,22 +353,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -383,17 +383,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -443,227 +443,227 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -673,12 +673,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -1043,12 +1043,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 547fe6a9e9900981551b138c204d0f24d6ef152b..8d947b9411eb91eded49b3c7b1f12586682346ff 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -14,41 +14,25 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.NClob; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLClientInfoException; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Struct; -import java.util.*; -import java.util.concurrent.Executor; +import java.sql.*; +import java.util.Properties; -public class TSDBConnection implements Connection { +public class TSDBConnection extends AbstractConnection { - private TSDBJNIConnector connector = null; + private TSDBJNIConnector connector; + private TSDBDatabaseMetaData databaseMetaData; + private boolean batchFetch; - private String catalog = null; - - private TSDBDatabaseMetaData dbMetaData; - - private Properties clientInfoProps = new Properties(); - - private int timeoutMilliseconds = 0; + public Boolean getBatchFetch() { + return this.batchFetch; + } - private boolean batchFetch = false; + public void setBatchFetch(Boolean batchFetch) { + this.batchFetch = batchFetch; + } public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { - this.dbMetaData = meta; + this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), @@ -64,8 +48,8 @@ public class TSDBConnection implements Connection { private void connect(String host, int port, String dbName, String user, String password) throws SQLException { this.connector = new TSDBJNIConnector(); this.connector.connect(host, port, dbName, user, password); - this.setCatalog(dbName); - this.dbMetaData.setConnection(this); + this.catalog = dbName; + this.databaseMetaData.setConnection(this); } public TSDBJNIConnector getConnection() { @@ -102,52 +86,11 @@ public class TSDBConnection implements Connection { return new TSDBPreparedStatement(this, this.connector, sql); } - public CallableStatement prepareCall(String sql) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public String nativeSQL(String sql) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setAutoCommit(boolean autoCommit) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - - } - - public boolean getAutoCommit() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - - return true; - } - - public void commit() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - public void rollback() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - public void close() throws SQLException { if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } + this.isClosed = true; this.connector.closeConnection(); } @@ -155,105 +98,11 @@ public class TSDBConnection implements Connection { return this.connector != null && this.connector.isClosed(); } - /** - * A connection's database is able to provide information describing its tables, - * its supported SQL grammar, its stored procedures, the capabilities of this - * connection, etc. This information is made available through a - * DatabaseMetaData object. - * - * @return a DatabaseMetaData object for this connection - * @throws SQLException if a database access error occurs - */ public DatabaseMetaData getMetaData() throws SQLException { if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - return this.dbMetaData; - } - - /** - * This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to - * support HikariCP connection. - * - * @param readOnly - * @throws SQLException - */ - public void setReadOnly(boolean readOnly) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - public boolean isReadOnly() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return true; - } - - public void setCatalog(String catalog) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - this.catalog = catalog; - } - - public String getCatalog() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return this.catalog; - } - - /** - * The transaction isolation level option is not supported by TDengine. - * This method is intentionally left empty to support HikariCP connection. - * - * @param level - * @throws SQLException - */ - public void setTransactionIsolation(int level) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - switch (level) { - case Connection.TRANSACTION_NONE: - case Connection.TRANSACTION_READ_COMMITTED: - case Connection.TRANSACTION_READ_UNCOMMITTED: - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - break; - default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - } - - /** - * The transaction isolation level option is not supported by TDengine. - * - * @return - * @throws SQLException - */ - public int getTransactionIsolation() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return Connection.TRANSACTION_NONE; - } - - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - //todo: implement getWarnings according to the warning messages returned from TDengine - return null; - } - - public void clearWarnings() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - //todo: implement clearWarnings according to the warning messages returned from TDengine + return this.databaseMetaData; } public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { @@ -263,253 +112,4 @@ public class TSDBConnection implements Connection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - // This method is implemented in the current way to support Spark - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - - if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - - return this.prepareStatement(sql); - } - - public Boolean getBatchFetch() { - return this.batchFetch; - } - - public void setBatchFetch(Boolean batchFetch) { - this.batchFetch = batchFetch; - } - - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Map> getTypeMap() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setTypeMap(Map> map) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setHoldability(int holdability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - /** - * the transaction is not supported by TDengine, so the opened ResultSet Objects will remain open - * - * @return - * @throws SQLException - */ - public int getHoldability() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - public Savepoint setSavepoint() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Savepoint setSavepoint(String name) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void rollback(Savepoint savepoint) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Clob createClob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Blob createBlob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public NClob createNClob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public SQLXML createSQLXML() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public boolean isValid(int timeout) throws SQLException { - return !this.isClosed(); - } - - public void setClientInfo(String name, String value) throws SQLClientInfoException { - clientInfoProps.setProperty(name, value); - } - - public void setClientInfo(Properties properties) throws SQLClientInfoException { - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { - String name = (String) enumer.nextElement(); - clientInfoProps.put(name, properties.getProperty(name)); - } - } - - public String getClientInfo(String name) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return clientInfoProps.getProperty(name); - } - - public Properties getClientInfo() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return clientInfoProps; - } - - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setSchema(String schema) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public String getSchema() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void abort(Executor executor) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - this.timeoutMilliseconds = milliseconds; - } - - public int getNetworkTimeout() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return this.timeoutMilliseconds; - } - - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index 0cf33692b05ab5e19e198463dc420b8a07c637a5..6179b47da1041a3461f1be43625f8c7d4e284e64 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -14,16 +14,13 @@ *****************************************************************************/ package com.taosdata.jdbc; +import java.sql.SQLException; +import java.sql.Types; import java.util.HashMap; import java.util.Map; public abstract class TSDBConstants { - public static final String STATEMENT_CLOSED = "statement is closed"; - public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; - public static final String INVALID_VARIABLES = "invalid variables"; - public static final String RESULT_SET_IS_CLOSED = "resultSet is closed"; - public static final String DEFAULT_PORT = "6200"; public static Map DATATYPE_MAP = null; @@ -36,6 +33,7 @@ public abstract class TSDBConstants { public static final int JNI_NUM_OF_FIELDS_0 = -4; public static final int JNI_SQL_NULL = -5; public static final int JNI_FETCH_END = -6; + public static final int JNI_OUT_OF_MEMORY = -7; public static final int TSDB_DATA_TYPE_NULL = 0; public static final int TSDB_DATA_TYPE_BOOL = 1; @@ -76,8 +74,65 @@ public abstract class TSDBConstants { return WrapErrMsg("unkown error!"); } + public static int taosType2JdbcType(int taosType) throws SQLException { + switch (taosType) { + case TSDBConstants.TSDB_DATA_TYPE_NULL: + return Types.NULL; + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + return Types.BOOLEAN; + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + return Types.TINYINT; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + return Types.SMALLINT; + case TSDBConstants.TSDB_DATA_TYPE_INT: + return Types.INTEGER; + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + return Types.BIGINT; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return Types.FLOAT; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return Types.DOUBLE; + case TSDBConstants.TSDB_DATA_TYPE_BINARY: + return Types.BINARY; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + return Types.TIMESTAMP; + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + return Types.NCHAR; + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE); + } + + public static int jdbcType2TaosType(int jdbcType) throws SQLException { + switch (jdbcType){ + case Types.NULL: + return TSDBConstants.TSDB_DATA_TYPE_NULL; + case Types.BOOLEAN: + return TSDBConstants.TSDB_DATA_TYPE_BOOL; + case Types.TINYINT: + return TSDBConstants.TSDB_DATA_TYPE_TINYINT; + case Types.SMALLINT: + return TSDBConstants.TSDB_DATA_TYPE_SMALLINT; + case Types.INTEGER: + return TSDBConstants.TSDB_DATA_TYPE_INT; + case Types.BIGINT: + return TSDBConstants.TSDB_DATA_TYPE_BIGINT; + case Types.FLOAT: + return TSDBConstants.TSDB_DATA_TYPE_FLOAT; + case Types.DOUBLE: + return TSDBConstants.TSDB_DATA_TYPE_DOUBLE; + case Types.BINARY: + return TSDBConstants.TSDB_DATA_TYPE_BINARY; + case Types.TIMESTAMP: + return TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP; + case Types.NCHAR: + return TSDBConstants.TSDB_DATA_TYPE_NCHAR; + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE); + } + static { DATATYPE_MAP = new HashMap<>(); + DATATYPE_MAP.put(0, "NULL"); DATATYPE_MAP.put(1, "BOOL"); DATATYPE_MAP.put(2, "TINYINT"); DATATYPE_MAP.put(3, "SMALLINT"); @@ -89,4 +144,8 @@ public abstract class TSDBConstants { DATATYPE_MAP.put(9, "TIMESTAMP"); DATATYPE_MAP.put(10, "NCHAR"); } + + public static String jdbcType2TaosTypeName(int type) throws SQLException { + return DATATYPE_MAP.get(jdbcType2TaosType(type)); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index d1f1e77b1c1e325e04c018a23d5589b7501f4919..8b7ede148e89cce0d8db22e62627bd1e1c49f9bb 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -57,39 +57,38 @@ public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData { */ public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { if (conn == null || conn.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTables(catalog, schemaPattern, tableNamePattern, types, conn); } - public ResultSet getCatalogs() throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getCatalogs(conn); } public ResultSet getTableTypes() throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getTableTypes(); } public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, conn); } public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getPrimaryKeys(catalog, schema, table, conn); } public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getSuperTables(catalog, schemaPattern, tableNamePattern, conn); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index c171ca2a36f78b6899cafd6348b3ebc3407d1b2a..2b87b72fef0f2b621536c5a11aba69975aa86434 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -37,7 +37,7 @@ import java.util.logging.Logger; * register it with the DriverManager. This means that a user can load and * register a driver by doing Class.forName("foo.bah.Driver") */ -public class TSDBDriver extends AbstractTaosDriver { +public class TSDBDriver extends AbstractDriver { @Deprecated private static final String URL_PREFIX1 = "jdbc:TSDB://"; @@ -90,7 +90,7 @@ public class TSDBDriver extends AbstractTaosDriver { * fetch data from native function in a batch model */ public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; - + private TSDBDatabaseMetaData dbMetaData = null; static { @@ -179,18 +179,18 @@ public class TSDBDriver extends AbstractTaosDriver { while (queryParams.hasMoreElements()) { String oneToken = queryParams.nextToken(); String[] pair = oneToken.split("="); - + if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) { urlProps.setProperty(pair[0].trim(), pair[1].trim()); } } } - + // parse Product Name String dbProductName = url.substring(0, beginningOfSlashes); dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1); dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); - + // parse database name url = url.substring(beginningOfSlashes + 2); int indexOfSlash = url.indexOf("/"); @@ -200,7 +200,7 @@ public class TSDBDriver extends AbstractTaosDriver { } url = url.substring(0, indexOfSlash); } - + // parse port int indexOfColon = url.indexOf(":"); if (indexOfColon != -1) { @@ -209,11 +209,11 @@ public class TSDBDriver extends AbstractTaosDriver { } url = url.substring(0, indexOfColon); } - + if (url != null && url.length() > 0 && url.trim().length() > 0) { urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); } - + this.dbMetaData = new TSDBDatabaseMetaData(urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER)); return urlProps; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index ede0b4e4e847e758d6f4e335d4077d39096e7afc..c7717e331b39247542d1a022cafd1cab2ac65627 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -1,6 +1,8 @@ package com.taosdata.jdbc; +import java.sql.SQLClientInfoException; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.util.HashMap; import java.util.Map; @@ -13,19 +15,54 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variables"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED, "statement is closed"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED, "resultSet is closed"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY, "Batch is empty!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY, "Can not issue data manipulation statements with executeQuery()"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE, "Can not issue SELECT via executeUpdate()"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: (?)"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE, "Database not specified or available"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: (?)"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: (?)"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "parameter index out of range"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED, "connection already closed"); + /**************************************************/ + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); /**************************************************/ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding"); + + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection is NULL"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "JNI result set is NULL"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultSet"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed"); } - public static String wrapErrMsg(String msg) { - return "TDengine Error: " + msg; + public static SQLException createSQLException(int errorCode) { + String message; + if (TSDBErrorNumbers.contains(errorCode)) + message = TSDBErrorMap.get(errorCode); + else + message = TSDBErrorMap.get(TSDBErrorNumbers.ERROR_UNKNOWN); + return createSQLException(errorCode, message); } - public static SQLException createSQLException(int errorNumber) { - // JDBC exception code is less than 0x2350 - if (errorNumber <= 0x2350) - return new SQLException(TSDBErrorMap.get(errorNumber)); - // JNI exception code is - return new SQLException(wrapErrMsg(TSDBErrorMap.get(errorNumber))); + public static SQLException createSQLException(int errorCode, String message) { + // throw SQLFeatureNotSupportedException + if (errorCode == TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD) + return new SQLFeatureNotSupportedException(message, "", errorCode); + // throw SQLClientInfoException + if (errorCode == TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED) + return new SQLClientInfoException(message, null); + + if (errorCode > 0x2300 && errorCode < 0x2350) + // JDBC exception's error number is less than 0x2350 + return new SQLException("ERROR (" + Integer.toHexString(errorCode) + "): " + message, "", errorCode); + if (errorCode > 0x2350 && errorCode < 0x2400) + // JNI exception's error number is large than 0x2350 + return new SQLException("JNI ERROR (" + Integer.toHexString(errorCode) + "): " + message, "", errorCode); + return new SQLException("TDengine ERROR (" + Integer.toHexString(errorCode) + "): " + message, "", errorCode); } + } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index 74dbb8ab9af0a19a2d969e9fefc0c863f444a77b..78e7aec79c252a2e6b2eeafcba37ab1c66f8674d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -1,5 +1,7 @@ package com.taosdata.jdbc; +import java.util.HashSet; + public class TSDBErrorNumbers { public static final int ERROR_CONNECTION_CLOSED = 0x2301; // connection already closed @@ -7,9 +9,67 @@ public class TSDBErrorNumbers { public static final int ERROR_INVALID_VARIABLE = 0x2303; //invalid variables public static final int ERROR_STATEMENT_CLOSED = 0x2304; //statement already closed public static final int ERROR_RESULTSET_CLOSED = 0x2305; //resultSet is closed + public static final int ERROR_BATCH_IS_EMPTY = 0x2306; //Batch is empty! + public static final int ERROR_INVALID_WITH_EXECUTEQUERY = 0x2307; //Can not issue data manipulation statements with executeQuery() + public static final int ERROR_INVALID_WITH_EXECUTEUPDATE = 0x2308; //Can not issue SELECT via executeUpdate() + public static final int ERROR_INVALID_FOR_EXECUTE_QUERY = 0x2309; //not a valid sql for executeQuery: (SQL) + public static final int ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE = 0x230a; //Database not specified or available + public static final int ERROR_INVALID_FOR_EXECUTE_UPDATE = 0x230b; //not a valid sql for executeUpdate: (SQL) + public static final int ERROR_INVALID_FOR_EXECUTE = 0x230c; //not a valid sql for execute: (SQL) + public static final int ERROR_PARAMETER_INDEX_OUT_RANGE = 0x230d; // parameter index out of range + public static final int ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED = 0x230e; // connection already closed + public static final int ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE = 0x230f; //unknown sql type in tdengine + + public static final int ERROR_UNKNOWN = 0x2350; //unknown error + + public static final int ERROR_SUBSCRIBE_FAILED = 0x2351; // failed to create subscription + public static final int ERROR_UNSUPPORTED_ENCODING = 0x2352; // Unsupported encoding + + public static final int ERROR_JNI_TDENGINE_ERROR = 0x2353; // internal error of database + public static final int ERROR_JNI_CONNECTION_NULL = 0x2354; // JNI connection is NULL + public static final int ERROR_JNI_RESULT_SET_NULL = 0x2355; // invalid JNI result set + public static final int ERROR_JNI_NUM_OF_FIELDS_0 = 0x2356; // invalid num of fields + public static final int ERROR_JNI_SQL_NULL = 0x2357; // empty sql string + public static final int ERROR_JNI_FETCH_END = 0x2358; // fetch to the end of resultSet + public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; // JNI alloc memory failed + + private static final HashSet errorNumbers; + + static { + errorNumbers = new HashSet(); + errorNumbers.add(ERROR_CONNECTION_CLOSED); + errorNumbers.add(ERROR_UNSUPPORTED_METHOD); + errorNumbers.add(ERROR_INVALID_VARIABLE); + errorNumbers.add(ERROR_STATEMENT_CLOSED); + errorNumbers.add(ERROR_RESULTSET_CLOSED); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEQUERY); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEUPDATE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE_QUERY); + errorNumbers.add(ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE_UPDATE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE); + errorNumbers.add(ERROR_PARAMETER_INDEX_OUT_RANGE); + errorNumbers.add(ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); + errorNumbers.add(ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE); - public static final int ERROR_SUBSCRIBE_FAILED = 0x2350; //failed to create subscription + /*****************************************************/ + errorNumbers.add(ERROR_SUBSCRIBE_FAILED); + errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); + + errorNumbers.add(ERROR_JNI_TDENGINE_ERROR); + errorNumbers.add(ERROR_JNI_CONNECTION_NULL); + errorNumbers.add(ERROR_JNI_RESULT_SET_NULL); + errorNumbers.add(ERROR_JNI_NUM_OF_FIELDS_0); + errorNumbers.add(ERROR_JNI_SQL_NULL); + errorNumbers.add(ERROR_JNI_FETCH_END); + errorNumbers.add(ERROR_JNI_OUT_OF_MEMORY); + + } private TSDBErrorNumbers() { } + + public static boolean contains(int errorNumber) { + return errorNumbers.contains(errorNumber); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 349a02fb37c28b263bb73bcc05f644bc53f71079..f3c4e025798e8e3fd8db0e5c1a1b25aa6fea3dc7 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -38,7 +38,7 @@ public class TSDBJNIConnector { /** * Result set pointer for the current connection */ - private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; /** * result set status in current connection @@ -102,7 +102,7 @@ public class TSDBJNIConnector { this.taos = this.connectImp(host, port, dbName, user, password); if (this.taos == TSDBConstants.JNI_NULL_POINTER) { - throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg(0L)), "", this.getErrCode(0l)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); } // invoke connectImp only here taosInfo.conn_open_increment(); @@ -119,9 +119,9 @@ public class TSDBJNIConnector { public long executeQuery(String sql) throws SQLException { // close previous result set if the user forgets to invoke the // free method to close previous result set. - if (!this.isResultsetClosed) { - freeResultSet(taosResultSetPointer); - } +// if (!this.isResultsetClosed) { +// freeResultSet(taosResultSetPointer); +// } Long pSql = 0l; try { @@ -130,21 +130,32 @@ public class TSDBJNIConnector { } catch (Exception e) { e.printStackTrace(); this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding")); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); + } + if (pSql == TSDBConstants.JNI_CONNECTION_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + } + if (pSql == TSDBConstants.JNI_SQL_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); + } + if (pSql == TSDBConstants.JNI_OUT_OF_MEMORY) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); } int code = this.getErrCode(pSql); - if (code != 0) { + if (code != TSDBConstants.JNI_SUCCESS) { affectedRows = -1; String msg = this.getErrMsg(pSql); - this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code); + throw TSDBError.createSQLException(code, msg); } // Try retrieving result set for the executed SQL using the current connection pointer. - taosResultSetPointer = this.getResultSetImp(this.taos, pSql); - isResultsetClosed = (taosResultSetPointer == TSDBConstants.JNI_NULL_POINTER); + pSql = this.getResultSetImp(this.taos, pSql); + isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER); return pSql; } @@ -173,10 +184,9 @@ public class TSDBJNIConnector { * Get resultset pointer * Each connection should have a single open result set at a time */ - public long getResultSet() { - return taosResultSetPointer; - } - +// public long getResultSet() { +// return taosResultSetPointer; +// } private native long getResultSetImp(long connection, long pSql); public boolean isUpdateQuery(long pSql) { @@ -188,16 +198,16 @@ public class TSDBJNIConnector { /** * Free resultset operation from C to release resultset pointer by JNI */ - public int freeResultSet(long result) { + public int freeResultSet(long pSql) { int res = TSDBConstants.JNI_SUCCESS; - if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - throw new RuntimeException("Invalid result set pointer"); - } +// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// throw new RuntimeException("Invalid result set pointer"); +// } - if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - res = this.freeResultSetImp(this.taos, result); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - } +// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { + res = this.freeResultSetImp(this.taos, pSql); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// } isResultsetClosed = true; return res; @@ -207,16 +217,15 @@ public class TSDBJNIConnector { * Close the open result set which is associated to the current connection. If the result set is already * closed, return 0 for success. */ - public int freeResultSet() { - int resCode = TSDBConstants.JNI_SUCCESS; - if (!isResultsetClosed) { - resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - isResultsetClosed = true; - } - return resCode; - } - +// public int freeResultSet() { +// int resCode = TSDBConstants.JNI_SUCCESS; +// if (!isResultsetClosed) { +// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// isResultsetClosed = true; +// } +// return resCode; +// } private native int freeResultSetImp(long connection, long result); /** @@ -264,7 +273,7 @@ public class TSDBJNIConnector { public void closeConnection() throws SQLException { int code = this.closeConnectionImp(this.taos); if (code < 0) { - throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode(0l)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); } else if (code == 0) { this.taos = TSDBConstants.JNI_NULL_POINTER; } else { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index c6b41ce004d739a94c00e87f141b9180efa18e57..d294c8974543db409187a60cf77ea03c98f5ff17 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -32,7 +32,7 @@ import java.util.regex.Pattern; public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { protected String rawSql; protected String sql; - protected ArrayList parameters = new ArrayList(); + protected ArrayList parameters = new ArrayList<>(); //start with insert or import and is case-insensitive private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)"); @@ -41,6 +41,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private boolean isSaved; private SavedPreparedStatement savedPreparedStatement; + private ParameterMetaData parameterMetaData; TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { super(connection, connecter); @@ -53,7 +54,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.isSaved = isSavedSql(this.rawSql); if (this.isSaved) { - try { this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this); } catch (SQLException e) { @@ -90,10 +90,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.parameters = parameters; } - public String getRawSql() { - return rawSql; - } - /* * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in @@ -192,99 +188,191 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } + private boolean isSupportedSQLType(int sqlType) { + switch (sqlType) { + case Types.TIMESTAMP: + case Types.INTEGER: + case Types.BIGINT: + case Types.FLOAT: + case Types.DOUBLE: + case Types.SMALLINT: + case Types.TINYINT: + case Types.BOOLEAN: + case Types.BINARY: + case Types.NCHAR: + return true; + case Types.ARRAY: + case Types.BIT: + case Types.BLOB: + case Types.CHAR: + case Types.CLOB: + case Types.DATALINK: + case Types.DATE: + case Types.DECIMAL: + case Types.DISTINCT: + case Types.JAVA_OBJECT: + case Types.LONGNVARCHAR: + case Types.LONGVARBINARY: + case Types.LONGVARCHAR: + case Types.NCLOB: + case Types.NULL: + case Types.NUMERIC: + case Types.NVARCHAR: + case Types.OTHER: + case Types.REAL: + case Types.REF: + case Types.REF_CURSOR: + case Types.ROWID: + case Types.SQLXML: + case Types.STRUCT: + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP_WITH_TIMEZONE: + case Types.VARBINARY: + case Types.VARCHAR: + default: + return false; + } + } + @Override public void setNull(int parameterIndex, int sqlType) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!isSupportedSQLType(sqlType) || parameterIndex < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); +// if (parameterIndex >= parameters.size()) +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_BOUNDARY); + setObject(parameterIndex, "NULL"); } @Override public void setBoolean(int parameterIndex, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + setObject(parameterIndex, x); } @Override public void setByte(int parameterIndex, byte x) throws SQLException { - setObject(parameterIndex, x); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setShort(int parameterIndex, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setInt(int parameterIndex, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setLong(int parameterIndex, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setFloat(int parameterIndex, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setDouble(int parameterIndex, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - setObject(parameterIndex, x); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setString(int parameterIndex, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setBytes(int parameterIndex, byte[] x) throws SQLException { - setObject(parameterIndex, x); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setDate(int parameterIndex, Date x) throws SQLException { - setObject(parameterIndex, x); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setTime(int parameterIndex, Time x) throws SQLException { - setObject(parameterIndex, x); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void clearParameters() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); parameters.clear(); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override @@ -311,9 +399,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat if (isSaved) { this.savedPreparedStatement.addBatch(); } else { - if (this.batchedArgs == null) { - batchedArgs = new ArrayList(); + batchedArgs = new ArrayList<>(); } super.addBatch(getNativeSql()); } @@ -321,156 +408,226 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setRef(int parameterIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setBlob(int parameterIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setClob(int parameterIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setArray(int parameterIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public ResultSetMetaData getMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); +// return this.getResultSet().getMetaData(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // TODO: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setURL(int parameterIndex, URL x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public ParameterMetaData getParameterMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //TODO: parameterMetaData not supported +// return null; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setRowId(int parameterIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNString(int parameterIndex, String value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNClob(int parameterIndex, NClob value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //TODO: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index d06922c680ccec9c4ec6f53cdbe60cb530deae97..80ff49253016c632b6bf85f4756fe2d702a9bffc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -14,36 +14,17 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.io.InputStream; -import java.io.Reader; import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; +import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; -import java.util.Iterator; import java.util.List; -import java.util.Map; -public class TSDBResultSet implements ResultSet { - private TSDBJNIConnector jniConnector = null; +public class TSDBResultSet extends AbstractResultSet implements ResultSet { + private TSDBJNIConnector jniConnector; + private final TSDBStatement statement; private long resultSetPointer = 0L; - private List columnMetaDataList = new ArrayList(); + private List columnMetaDataList = new ArrayList<>(); private TSDBResultSetRowData rowData; private TSDBResultSetBlockData blockData; @@ -52,24 +33,6 @@ public class TSDBResultSet implements ResultSet { private boolean lastWasNull = false; private final int COLUMN_INDEX_START_VALUE = 1; - private int rowIndex = 0; - - public TSDBJNIConnector getJniConnector() { - return jniConnector; - } - - public void setJniConnector(TSDBJNIConnector jniConnector) { - this.jniConnector = jniConnector; - } - - public long getResultSetPointer() { - return resultSetPointer; - } - - public void setResultSetPointer(long resultSetPointer) { - this.resultSetPointer = resultSetPointer; - } - public void setBatchFetch(boolean batchFetch) { this.batchFetch = batchFetch; } @@ -78,10 +41,6 @@ public class TSDBResultSet implements ResultSet { return this.batchFetch; } - public List getColumnMetaDataList() { - return columnMetaDataList; - } - public void setColumnMetaDataList(List columnMetaDataList) { this.columnMetaDataList = columnMetaDataList; } @@ -90,56 +49,25 @@ public class TSDBResultSet implements ResultSet { return rowData; } - public void setRowData(TSDBResultSetRowData rowData) { - this.rowData = rowData; - } - - public boolean isLastWasNull() { - return lastWasNull; - } - - public void setLastWasNull(boolean lastWasNull) { - this.lastWasNull = lastWasNull; - } - - public TSDBResultSet() { - - } - - public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + public TSDBResultSet(TSDBStatement statement, TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + this.statement = statement; this.jniConnector = connector; this.resultSetPointer = resultSetPointer; + int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList); if (code == TSDBConstants.JNI_CONNECTION_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + } + if (code == TSDBConstants.JNI_RESULT_SET_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); - } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + } + if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); } - this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size()); this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size()); } - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } - public boolean next() throws SQLException { if (this.getBatchFetch()) { if (this.blockData.forward()) { @@ -273,7 +201,7 @@ public class TSDBResultSet implements ResultSet { } public long getLong(int columnIndex) throws SQLException { - long res = 0l; + long res = 0L; int colIndex = getTrueColumnIndex(columnIndex); if (!this.getBatchFetch()) { @@ -317,14 +245,6 @@ public class TSDBResultSet implements ResultSet { } } - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getBigDecimal(int, int) - * - * @deprecated Use {@code getBigDecimal(int columnIndex)} or {@code - * getBigDecimal(String columnLabel)} - */ @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { return new BigDecimal(getLong(columnIndex)); @@ -334,16 +254,6 @@ public class TSDBResultSet implements ResultSet { return getString(columnIndex).getBytes(); } - public Date getDate(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public Timestamp getTimestamp(int columnIndex) throws SQLException { Timestamp res = null; int colIndex = getTrueColumnIndex(columnIndex); @@ -359,112 +269,11 @@ public class TSDBResultSet implements ResultSet { } } - public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getUnicodeStream(int) - * - * * @deprecated use getCharacterStream in place of - * getUnicodeStream - */ - @Deprecated - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getString(String columnLabel) throws SQLException { - return this.getString(this.findColumn(columnLabel)); - } - - public boolean getBoolean(String columnLabel) throws SQLException { - return this.getBoolean(this.findColumn(columnLabel)); - } - - public byte getByte(String columnLabel) throws SQLException { - return this.getByte(this.findColumn(columnLabel)); - } - - public short getShort(String columnLabel) throws SQLException { - return this.getShort(this.findColumn(columnLabel)); - } - - public int getInt(String columnLabel) throws SQLException { - return this.getInt(this.findColumn(columnLabel)); - } - - public long getLong(String columnLabel) throws SQLException { - return this.getLong(this.findColumn(columnLabel)); - } - - public float getFloat(String columnLabel) throws SQLException { - return this.getFloat(this.findColumn(columnLabel)); - } - - public double getDouble(String columnLabel) throws SQLException { - return this.getDouble(this.findColumn(columnLabel)); - } - - /* - * used by spark - */ - @Deprecated - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel), scale); - } - - public byte[] getBytes(String columnLabel) throws SQLException { - return this.getBytes(this.findColumn(columnLabel)); - } - - public Date getDate(String columnLabel) throws SQLException { - return this.getDate(this.findColumn(columnLabel)); - } - - public Time getTime(String columnLabel) throws SQLException { - return this.getTime(this.findColumn(columnLabel)); - } - - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return this.getTimestamp(this.findColumn(columnLabel)); - } - - public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Deprecated - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public ResultSetMetaData getMetaData() throws SQLException { return new TSDBResultSetMetaData(this.columnMetaDataList); } + @Override public Object getObject(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -476,32 +285,21 @@ public class TSDBResultSet implements ResultSet { } } + @Override public Object getObject(String columnLabel) throws SQLException { return this.getObject(this.findColumn(columnLabel)); } public int findColumn(String columnLabel) throws SQLException { - Iterator colMetaDataIt = this.columnMetaDataList.iterator(); - while (colMetaDataIt.hasNext()) { - ColumnMetaData colMetaData = colMetaDataIt.next(); + for (ColumnMetaData colMetaData : this.columnMetaDataList) { if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) { return colMetaData.getColIndex() + 1; } } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } - public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ + @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -513,403 +311,111 @@ public class TSDBResultSet implements ResultSet { } } - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel)); - } - + @Override public boolean isBeforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isAfterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchSize(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchSize() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - public int getConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowUpdated() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowInserted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowDeleted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isAfterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public void beforeFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public void afterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean first() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean last() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public int getRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean absolute(int row) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean relative(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean previous() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Statement getStatement() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public int getHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return this.statement; } public boolean isClosed() throws SQLException { + //TODO: check if need release resources boolean isClosed = true; if (jniConnector != null) { isClosed = jniConnector.isResultsetClosed(); @@ -917,183 +423,11 @@ public class TSDBResultSet implements ResultSet { return isClosed; } - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public String getNString(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); return (String) rowData.get(colIndex); } - public String getNString(String columnLabel) throws SQLException { - return (String) this.getString(columnLabel); - } - - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - private int getTrueColumnIndex(int columnIndex) throws SQLException { if (columnIndex < this.COLUMN_INDEX_START_VALUE) { throw new SQLException("Column Index out of range, " + columnIndex + " < " + this.COLUMN_INDEX_START_VALUE); @@ -1103,7 +437,6 @@ public class TSDBResultSet implements ResultSet { if (columnIndex > numOfCols) { throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols); } - return columnIndex - 1; } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java index d6d69bd8b02db7de9350c023e696f4cf2e2f5e65..e0b1c246cbea246f885ab4c56c7ece0be212ff66 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -20,7 +20,7 @@ import java.sql.Timestamp; import java.sql.Types; import java.util.List; -public class TSDBResultSetMetaData implements ResultSetMetaData { +public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaData { List colMetaDataList = null; @@ -28,14 +28,6 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { this.colMetaDataList = metaDataList; } - public T unwrap(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public int getColumnCount() throws SQLException { return colMetaDataList.size(); } @@ -94,7 +86,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getSchemaName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public int getPrecision(int column) throws SQLException { @@ -125,18 +117,18 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getTableName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public String getCatalogName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public int getColumnType(int column) throws SQLException { ColumnMetaData meta = this.colMetaDataList.get(column - 1); switch (meta.getColType()) { case TSDBConstants.TSDB_DATA_TYPE_BOOL: - return java.sql.Types.BIT; + return Types.BOOLEAN; case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return java.sql.Types.TINYINT; case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: @@ -150,13 +142,13 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return java.sql.Types.DOUBLE; case TSDBConstants.TSDB_DATA_TYPE_BINARY: - return java.sql.Types.CHAR; + return Types.BINARY; case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: - return java.sql.Types.BIGINT; + return java.sql.Types.TIMESTAMP; case TSDBConstants.TSDB_DATA_TYPE_NCHAR: - return java.sql.Types.CHAR; + return Types.NCHAR; } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } public String getColumnTypeName(int column) throws SQLException { @@ -173,7 +165,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public boolean isDefinitelyWritable(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public String getColumnClassName(int column) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java index 059962a7a120073c17258652f9fb3609cc5072be..48854e773f89a45784de3cd709ec5bbe6185e09b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java @@ -1153,11 +1153,11 @@ public class TSDBResultSetWrapper implements ResultSet { } public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 82a6b4a3fff634b8bedfe338aeede940860e866a..996ea4a185d97b2294ac06cacccd08862de2de94 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -14,34 +14,20 @@ *****************************************************************************/ package com.taosdata.jdbc; -import com.taosdata.jdbc.utils.TaosInfo; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; -import java.sql.*; -import java.util.ArrayList; -import java.util.List; +public class TSDBStatement extends AbstractStatement { -public class TSDBStatement implements Statement { private TSDBJNIConnector connector; - - /** - * To store batched commands - */ - protected List batchedArgs; - - /** - * Timeout for a query - */ - protected int queryTimeout = 0; - - private Long pSql = 0l; - /** * Status of current statement */ - private boolean isClosed = true; - private int affectedRows = 0; - + private boolean isClosed; + private int affectedRows = -1; private TSDBConnection connection; + private TSDBResultSet resultSet; public void setConnection(TSDBConnection connection) { this.connection = connection; @@ -50,304 +36,96 @@ public class TSDBStatement implements Statement { TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) { this.connection = connection; this.connector = connector; - this.isClosed = false; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); } public ResultSet executeQuery(String sql) throws SQLException { - if (isClosed()) { + // check if closed + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } + //TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了 - // TODO make sure it is not a update query - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - - // create/insert/update/delete/alter - if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - this.connector.freeResultSet(pSql); - return null; - } - - if (!this.connector.isUpdateQuery(pSql)) { - TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); - res.setBatchFetch(this.connection.getBatchFetch()); - return res; - } else { - this.connector.freeResultSet(pSql); - return null; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY); } + TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql); + res.setBatchFetch(this.connection.getBatchFetch()); + return res; } public int executeUpdate(String sql) throws SQLException { - if (isClosed()) { + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // TODO check if current query is update query - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (!this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE); } - - this.affectedRows = this.connector.getAffectedRows(pSql); + int affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - - return this.affectedRows; - } - - public String getErrorMsg(long pSql) { - return this.connector.getErrMsg(pSql); + return affectedRows; } public void close() throws SQLException { if (!isClosed) { - if (!this.connector.isResultsetClosed()) { - this.connector.freeResultSet(); - } + if (this.resultSet != null) + this.resultSet.close(); isClosed = true; } } - public int getMaxFieldSize() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - return 0; - } - - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getMaxRows() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // always set maxRows to zero, meaning unlimitted rows in a resultSet - return 0; - } - - public void setMaxRows(int max) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // always set maxRows to zero, meaning unlimited rows in a resultSet - } - - public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getQueryTimeout() throws SQLException { - return queryTimeout; - } - - public void setQueryTimeout(int seconds) throws SQLException { - this.queryTimeout = seconds; - } - - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setCursorName(String name) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public boolean execute(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - boolean res = true; - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - // no result set is retrieved + // check if closed + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { + this.affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - res = false; + return false; } - return res; + this.resultSet = new TSDBResultSet(this, this.connector, pSql); + this.resultSet.setBatchFetch(this.connection.getBatchFetch()); + return true; } public ResultSet getResultSet() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - long resultSetPointer = connector.getResultSet(); - TSDBResultSet resSet = null; - if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - resSet = new TSDBResultSet(connector, resultSetPointer); - } - return resSet; + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); +// long resultSetPointer = connector.getResultSet(); +// TSDBResultSet resSet = null; +// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// resSet = new TSDBResultSet(connector, resultSetPointer); +// } + return this.resultSet; } public int getUpdateCount() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); return this.affectedRows; } - public boolean getMoreResults() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; -// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ - public void setFetchSize(int rows) throws SQLException { - } - - /* - * used by spark - */ - public int getFetchSize() throws SQLException { - return 4096; - } - - public int getResultSetConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetType() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void addBatch(String sql) throws SQLException { - if (batchedArgs == null) { - batchedArgs = new ArrayList<>(); - } - batchedArgs.add(sql); - } - - public void clearBatch() throws SQLException { - batchedArgs.clear(); - } - - public int[] executeBatch() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - if (batchedArgs == null) { - throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); - } else { - int[] res = new int[batchedArgs.size()]; - for (int i = 0; i < batchedArgs.size(); i++) { - res[i] = executeUpdate(batchedArgs.get(i)); - } - return res; - } - } - public Connection getConnection() throws SQLException { - if (this.connector != null) - return this.connection; - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean getMoreResults(int current) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (this.connector == null) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + return this.connection; } public boolean isClosed() throws SQLException { return isClosed; } - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isPoolable() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public void closeOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isCloseOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index deffd9aa2ae88802f71af5cbec66c5896cf4e19a..dd0d0d5b7b2744e0e6d28a3d73ec2b29fab37743 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -14,15 +14,11 @@ *****************************************************************************/ package com.taosdata.jdbc; -import javax.management.OperationsException; import java.sql.SQLException; -import java.util.Map; -import java.util.TimerTask; -import java.util.concurrent.*; public class TSDBSubscribe { - private TSDBJNIConnector connecter = null; - private long id = 0; + private final TSDBJNIConnector connecter; + private final long id; TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException { if (null != connecter) { @@ -36,9 +32,8 @@ public class TSDBSubscribe { /** * consume * - * @throws OperationsException, SQLException */ - public TSDBResultSet consume() throws OperationsException, SQLException { + public TSDBResultSet consume() throws SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } @@ -50,7 +45,7 @@ public class TSDBSubscribe { } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { return null; } else { - return new TSDBResultSet(this.connecter, resultSetPointer); + return new TSDBResultSet(null, this.connecter, resultSetPointer); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..5b7539d434e0c5595d75da0a50baca9ab59c953b --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java @@ -0,0 +1,21 @@ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.Wrapper; + +public class WrapperImpl implements Wrapper { + + @Override + public T unwrap(Class iface) throws SQLException { + try { + return iface.cast(this); + } catch (ClassCastException cce) { + throw new SQLException("Unable to unwrap to " + iface.toString()); + } + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 5260b780bd510edad6ef8ea9a481fa334cca50f6..0484701f68f317dab2a1415b321d093d488c5b6a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -1,34 +1,23 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.*; import java.sql.*; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Map; import java.util.Properties; -import java.util.concurrent.Executor; -public class RestfulConnection implements Connection { +public class RestfulConnection extends AbstractConnection { - private static final String CONNECTION_IS_CLOSED = "connection is closed."; - private static final String AUTO_COMMIT_IS_TRUE = "auto commit is true"; private final String host; private final int port; - private final Properties props; - private volatile String database; private final String url; + private volatile String database; /******************************************************/ private boolean isClosed; - private DatabaseMetaData metadata; - private Map> typeMap; - private Properties clientInfoProps = new Properties(); + private final DatabaseMetaData metadata; public RestfulConnection(String host, String port, Properties props, String database, String url) { this.host = host; this.port = Integer.parseInt(port); - this.props = props; this.database = database; this.url = url; this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this); @@ -37,7 +26,8 @@ public class RestfulConnection implements Connection { @Override public Statement createStatement() throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + ; return new RestfulStatement(this, database); } @@ -45,59 +35,8 @@ public class RestfulConnection implements Connection { @Override public PreparedStatement prepareStatement(String sql) throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - //TODO: prepareStatement - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String nativeSQL(String sql) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - //nothing did - return sql; - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (!autoCommit) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean getAutoCommit() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return true; - } - - @Override - public void commit() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(AUTO_COMMIT_IS_TRUE); - //nothing to do - } - - @Override - public void rollback() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(AUTO_COMMIT_IS_TRUE); - //nothing to do + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + return new RestfulPreparedStatement(this, database, sql); } @Override @@ -116,356 +55,12 @@ public class RestfulConnection implements Connection { @Override public DatabaseMetaData getMetaData() throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + ; return this.metadata; } - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - // nothing to do - } - - @Override - public boolean isReadOnly() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return true; - } - - @Override - public void setCatalog(String catalog) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - synchronized (RestfulConnection.class) { - this.database = catalog; - } - } - - @Override - public String getCatalog() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return this.database; - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - switch (level) { - case Connection.TRANSACTION_NONE: - break; - case Connection.TRANSACTION_READ_UNCOMMITTED: - case Connection.TRANSACTION_READ_COMMITTED: - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - default: - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - } - } - - @Override - public int getTransactionIsolation() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - //Connection.TRANSACTION_NONE specifies that transactions are not supported. - return Connection.TRANSACTION_NONE; - } - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - return null; - } - - @Override - public void clearWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - //nothing to do - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return createStatement(); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); - - return this.prepareStatement(sql); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Map> getTypeMap() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - synchronized (RestfulConnection.class) { - if (this.typeMap == null) { - this.typeMap = new HashMap<>(); - } - return this.typeMap; - } - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - synchronized (RestfulConnection.class) { - this.typeMap = map; - } - } - - @Override - public void setHoldability(int holdability) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - @Override - public Savepoint setSavepoint() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return createStatement(resultSetType, resultSetConcurrency); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return prepareStatement(sql, resultSetType, resultSetConcurrency); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob createClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob createBlob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob createNClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - if (timeout < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // TODO: - /* The driver shall submit a query on the connection or use some other mechanism that positively verifies - the connection is still valid when this method is called.*/ - return !isClosed(); - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - if (isClosed) - throw new SQLClientInfoException(); - clientInfoProps.setProperty(name, value); - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - if (isClosed) - throw new SQLClientInfoException(); - - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { - String name = (String) enumer.nextElement(); - clientInfoProps.put(name, properties.getProperty(name)); - } - } - - @Override - public String getClientInfo(String name) throws SQLException { - if (isClosed) - throw new SQLClientInfoException(); - - return clientInfoProps.getProperty(name); - } - - @Override - public Properties getClientInfo() throws SQLException { - if (isClosed) - throw new SQLClientInfoException(); - - return clientInfoProps; - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void setSchema(String schema) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - synchronized (RestfulConnection.class) { - this.database = schema; - } - } - - @Override - public String getSchema() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return this.database; - } - - @Override - public void abort(Executor executor) throws SQLException { - if (executor == null) { - throw new SQLException("Executor can not be null"); - } - - executor.execute(() -> { - try { - close(); - } catch (SQLException e) { - e.printStackTrace(); - } - }); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getNetworkTimeout() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return 0; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - public String getHost() { return host; } @@ -474,10 +69,6 @@ public class RestfulConnection implements Connection { return port; } - public Properties getProps() { - return props; - } - public String getDatabase() { return database; } @@ -485,4 +76,4 @@ public class RestfulConnection implements Connection { public String getUrl() { return url; } -} +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java index 0ef64e4a9ee9e58f9e1010889b030a6c0be13304..d108f46a796bbae350f942a17f90efa29cc3021e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java @@ -1,10 +1,12 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.*; +import com.taosdata.jdbc.AbstractDatabaseMetaData; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.sql.*; -import java.util.ArrayList; -import java.util.List; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @@ -33,11 +35,10 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { return RestfulDriver.class.getName(); } - @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { if (connection == null || connection.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTables(catalog, schemaPattern, tableNamePattern, types, connection); } @@ -45,14 +46,14 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @Override public ResultSet getCatalogs() throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getCatalogs(connection); } @Override public ResultSet getTableTypes() throws SQLException { if (connection == null || connection.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTableTypes(); } @@ -60,21 +61,26 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @Override public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, connection); } @Override public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getPrimaryKeys(catalog, schema, table, connection); } + @Override + public Connection getConnection() throws SQLException { + return this.connection; + } + @Override public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getSuperTables(catalog, schemaPattern, tableNamePattern, connection); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index fca8847114529280bf70d4bf8e56508a572ef7e4..d45ca0c3a0bc4ea596b9e9577db33e8aa1bee696 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -2,16 +2,18 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; -import com.taosdata.jdbc.AbstractTaosDriver; +import com.taosdata.jdbc.AbstractDriver; import com.taosdata.jdbc.TSDBConstants; import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; +import com.taosdata.jdbc.utils.HttpClientPoolUtil; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; import java.sql.*; import java.util.Properties; import java.util.logging.Logger; -public class RestfulDriver extends AbstractTaosDriver { +public class RestfulDriver extends AbstractDriver { private static final String URL_PREFIX = "jdbc:TAOS-RS://"; @@ -41,6 +43,15 @@ public class RestfulDriver extends AbstractTaosDriver { + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; + try { + String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8"); + String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8"); + loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + String result = HttpClientPoolUtil.execute(loginUrl); JSONObject jsonResult = JSON.parseObject(result); String status = jsonResult.getString("status"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java new file mode 100644 index 0000000000000000000000000000000000000000..3a0ff56dd71979f21b24f68b41ea9d7bb994586e --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -0,0 +1,465 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; + +public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement { + + private ParameterMetaData parameterMetaData; + private final String rawSql; + private Object[] parameters; + private boolean isPrepared; + + public RestfulPreparedStatement(RestfulConnection conn, String database, String sql) { + super(conn, database); + this.rawSql = sql; + if (sql.contains("?")) { + int parameterCnt = 0; + for (int i = 0; i < sql.length(); i++) { + if ('?' == sql.charAt(i)) { + parameterCnt++; + } + } + parameters = new Object[parameterCnt]; + this.isPrepared = true; + } + //TODO: build parameterMetaData + } + + @Override + public ResultSet executeQuery() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!isPrepared) + return executeQuery(this.rawSql); + + final String sql = getNativeSql(this.rawSql); + return executeQuery(sql); + } + + @Override + public int executeUpdate() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!isPrepared) + return executeUpdate(this.rawSql); + + final String sql = getNativeSql(this.rawSql); + return executeUpdate(sql); + } + + private String getNativeSql(String rawSql) throws SQLException { + String sql = rawSql; + for (int i = 0; i < parameters.length; ++i) { + Object para = parameters[i]; + if (para != null) { + String paraStr = para.toString(); + if (para instanceof Timestamp || para instanceof String) { + paraStr = "'" + paraStr + "'"; + } + sql = sql.replaceFirst("[?]", paraStr); + } else { + sql = sql.replaceFirst("[?]", "NULL"); + } + } + clearParameters(); + return sql; + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + setObject(parameterIndex, "NULL"); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + setObject(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + setObject(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void clearParameters() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + this.parameters = new Object[parameters.length]; + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + if (parameterIndex < 1 && parameterIndex >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + parameters[parameterIndex - 1] = x; + + } + + @Override + public boolean execute() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!isPrepared) + return execute(this.rawSql); + final String sql = getNativeSql(rawSql); + return execute(sql); + } + + @Override + public void addBatch() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + final String sql = getNativeSql(this.rawSql); + addBatch(sql); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + return this.parameterMetaData; + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 38d3f2b6aa21427647d108038fdd616927221520..9d394b8b038917d637ab43826d57e4d79b1746c7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -2,29 +2,26 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractResultSet; import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; import java.util.List; -import java.util.Map; -public class RestfulResultSet implements ResultSet { +public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; private final String database; private final Statement statement; // data - private ArrayList> resultSet = new ArrayList<>(); + private ArrayList> resultSet; // meta - private ArrayList columnNames = new ArrayList<>(); - private ArrayList columns = new ArrayList<>(); + private ArrayList columnNames; + private ArrayList columns; private RestfulResultSetMetaData metaData; /** @@ -32,11 +29,36 @@ public class RestfulResultSet implements ResultSet { * * @param resultJson: 包含data信息的结果集,有sql返回的结果集 ***/ - public RestfulResultSet(String database, Statement statement, JSONObject resultJson) { + public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException { this.database = database; this.statement = statement; + // column metadata + JSONArray columnMeta = resultJson.getJSONArray("column_meta"); + columnNames = new ArrayList<>(); + columns = new ArrayList<>(); + for (int colIndex = 0; colIndex < columnMeta.size(); colIndex++) { + JSONArray col = columnMeta.getJSONArray(colIndex); + String col_name = col.getString(0); + int col_type = TSDBConstants.taosType2JdbcType(col.getInteger(1)); + int col_length = col.getInteger(2); + columnNames.add(col_name); + columns.add(new Field(col_name, col_type, col_length, "")); + } + this.metaData = new RestfulResultSetMetaData(this.database, columns, this); + // row data JSONArray data = resultJson.getJSONArray("data"); + resultSet = new ArrayList<>(); + for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) { + ArrayList row = new ArrayList(); + JSONArray jsonRow = data.getJSONArray(rowIndex); + for (int colIndex = 0; colIndex < jsonRow.size(); colIndex++) { + row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).type)); + } + resultSet.add(row); + } + + /* int columnIndex = 0; for (; columnIndex < data.size(); columnIndex++) { ArrayList oneRow = new ArrayList<>(); @@ -55,50 +77,77 @@ public class RestfulResultSet implements ResultSet { columns.add(new Field(name, "", 0, "")); } this.metaData = new RestfulResultSetMetaData(this.database, columns, this); - } - - /** - * 由多个resultSet的JSON构造结果集 - * - * @param resultJson: 包含data信息的结果集,有sql返回的结果集 - * @param fieldJson: 包含多个(最多2个)meta信息的结果集,有describe xxx - **/ - public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List fieldJson) { - this(database, statement, resultJson); - ArrayList newColumns = new ArrayList<>(); - - for (Field column : columns) { - Field field = findField(column.name, fieldJson); - if (field != null) { - newColumns.add(field); - } else { - newColumns.add(column); - } + */ + } + + private Object parseColumnData(JSONArray row, int colIndex, int sqlType) { + switch (sqlType) { + case Types.NULL: + return null; + case Types.BOOLEAN: + return row.getBoolean(colIndex); + case Types.TINYINT: + case Types.SMALLINT: + return row.getShort(colIndex); + case Types.INTEGER: + return row.getInteger(colIndex); + case Types.BIGINT: + return row.getBigInteger(colIndex); + case Types.FLOAT: + return row.getFloat(colIndex); + case Types.DOUBLE: + return row.getDouble(colIndex); + case Types.TIMESTAMP: + return new Timestamp(row.getDate(colIndex).getTime()); + case Types.BINARY: + case Types.NCHAR: + default: + return row.getString(colIndex); } - this.columns = newColumns; - this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this); } - public Field findField(String columnName, List fieldJsonList) { - for (JSONObject fieldJSON : fieldJsonList) { - JSONArray fieldDataJson = fieldJSON.getJSONArray("data"); - for (int i = 0; i < fieldDataJson.size(); i++) { - JSONArray field = fieldDataJson.getJSONArray(i); - if (columnName.equalsIgnoreCase(field.getString(0))) { - return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3)); - } - } - } - return null; - } +// /** +// * 由多个resultSet的JSON构造结果集 +// * +// * @param resultJson: 包含data信息的结果集,有sql返回的结果集 +// * @param fieldJson: 包含多个(最多2个)meta信息的结果集,有describe xxx +// **/ +// public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List fieldJson) throws SQLException { +// this(database, statement, resultJson); +// ArrayList newColumns = new ArrayList<>(); +// +// for (Field column : columns) { +// Field field = findField(column.name, fieldJson); +// if (field != null) { +// newColumns.add(field); +// } else { +// newColumns.add(column); +// } +// } +// this.columns = newColumns; +// this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this); +// } + +// public Field findField(String columnName, List fieldJsonList) { +// for (JSONObject fieldJSON : fieldJsonList) { +// JSONArray fieldDataJson = fieldJSON.getJSONArray("data"); +// for (int i = 0; i < fieldDataJson.size(); i++) { +// JSONArray field = fieldDataJson.getJSONArray(i); +// if (columnName.equalsIgnoreCase(field.getString(0))) { +// return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3)); +// } +// } +// } +// return null; +// } public class Field { String name; - String type; + int type; int length; String note; - public Field(String name, String type, int length, String note) { + public Field(String name, int type, int length, String note) { this.name = name; this.type = type; this.length = length; @@ -109,7 +158,7 @@ public class RestfulResultSet implements ResultSet { @Override public boolean next() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); pos++; if (pos <= resultSet.size() - 1) { return true; @@ -127,14 +176,14 @@ public class RestfulResultSet implements ResultSet { @Override public boolean wasNull() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); return resultSet.isEmpty(); } @Override public String getString(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); if (columnIndex > resultSet.get(pos).size()) { throw new SQLException(TSDBConstants.WrapErrMsg("Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size())); @@ -144,29 +193,20 @@ public class RestfulResultSet implements ResultSet { return resultSet.get(pos).get(columnIndex).toString(); } - @Override public boolean getBoolean(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); int result = getInt(columnIndex); return result == 0 ? false : true; } - @Override - public byte getByte(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public short getShort(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); return Short.parseShort(resultSet.get(pos).get(columnIndex).toString()); } @@ -174,7 +214,7 @@ public class RestfulResultSet implements ResultSet { @Override public int getInt(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); return Integer.parseInt(resultSet.get(pos).get(columnIndex).toString()); } @@ -182,7 +222,7 @@ public class RestfulResultSet implements ResultSet { @Override public long getLong(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); return Long.parseLong(resultSet.get(pos).get(columnIndex).toString()); } @@ -190,7 +230,7 @@ public class RestfulResultSet implements ResultSet { @Override public float getFloat(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); return Float.parseFloat(resultSet.get(pos).get(columnIndex).toString()); } @@ -198,7 +238,7 @@ public class RestfulResultSet implements ResultSet { @Override public double getDouble(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); return Double.parseDouble(resultSet.get(pos).get(columnIndex).toString()); @@ -217,44 +257,10 @@ public class RestfulResultSet implements ResultSet { return columnIndex - 1; } - /*******************************************************************************************************************/ - - @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); columnIndex = getTrueColumnIndex(columnIndex); String strDate = resultSet.get(pos).get(columnIndex).toString(); @@ -262,152 +268,15 @@ public class RestfulResultSet implements ResultSet { return Timestamp.valueOf(strDate); } - @Override - public InputStream getAsciiStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getBinaryStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /*************************************************************************************************************/ - - @Override - public String getString(String columnLabel) throws SQLException { - return getString(findColumn(columnLabel)); - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - return getBoolean(findColumn(columnLabel)); - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - return getByte(findColumn(columnLabel)); - } - - @Override - public short getShort(String columnLabel) throws SQLException { - return getShort(findColumn(columnLabel)); - } - - @Override - public int getInt(String columnLabel) throws SQLException { - return getInt(findColumn(columnLabel)); - } - - @Override - public long getLong(String columnLabel) throws SQLException { - return getLong(findColumn(columnLabel)); - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - return getFloat(findColumn(columnLabel)); - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - return getDouble(findColumn(columnLabel)); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return getBigDecimal(findColumn(columnLabel)); - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - return getBytes(findColumn(columnLabel)); - } - - @Override - public Date getDate(String columnLabel) throws SQLException { - return getDate(findColumn(columnLabel)); - } - - @Override - public Time getTime(String columnLabel) throws SQLException { - return getTime(findColumn(columnLabel)); - } - - @Override - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return getTimestamp(findColumn(columnLabel)); - } - - @Override - public InputStream getAsciiStream(String columnLabel) throws SQLException { - return getAsciiStream(findColumn(columnLabel)); - } - - @Override - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - return getUnicodeStream(findColumn(columnLabel)); - } - - @Override - public InputStream getBinaryStream(String columnLabel) throws SQLException { - return getBinaryStream(findColumn(columnLabel)); - } - /*************************************************************************************************************/ - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return; - } - - @Override - public String getCursorName() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); return this.metaData; } - @Override - public Object getObject(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Object getObject(String columnLabel) throws SQLException { return getObject(findColumn(columnLabel)); @@ -416,7 +285,7 @@ public class RestfulResultSet implements ResultSet { @Override public int findColumn(String columnLabel) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); int columnIndex = columnNames.indexOf(columnLabel); if (columnIndex == -1) @@ -424,49 +293,17 @@ public class RestfulResultSet implements ResultSet { return columnIndex + 1; } - @Override - public Reader getCharacterStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getCharacterStream(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean isBeforeFirst() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); return this.pos == -1 && this.resultSet.size() != 0; } @Override public boolean isAfterLast() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); return this.pos >= resultSet.size() && this.resultSet.size() != 0; } @@ -474,15 +311,14 @@ public class RestfulResultSet implements ResultSet { @Override public boolean isFirst() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); return this.pos == 0; } @Override public boolean isLast() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); if (this.resultSet.size() == 0) return false; return this.pos == (this.resultSet.size() - 1); @@ -491,7 +327,7 @@ public class RestfulResultSet implements ResultSet { @Override public void beforeFirst() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); synchronized (this) { if (this.resultSet.size() > 0) { @@ -503,19 +339,18 @@ public class RestfulResultSet implements ResultSet { @Override public void afterLast() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); synchronized (this) { if (this.resultSet.size() > 0) { this.pos = this.resultSet.size(); } } - } @Override public boolean first() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); if (this.resultSet.size() == 0) return false; @@ -529,7 +364,7 @@ public class RestfulResultSet implements ResultSet { @Override public boolean last() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); if (this.resultSet.size() == 0) return false; synchronized (this) { @@ -541,7 +376,7 @@ public class RestfulResultSet implements ResultSet { @Override public int getRow() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); int row; synchronized (this) { if (this.pos < 0 || this.pos >= this.resultSet.size()) @@ -554,7 +389,7 @@ public class RestfulResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); // if (this.resultSet.size() == 0) // return false; @@ -586,721 +421,42 @@ public class RestfulResultSet implements ResultSet { // } // } - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean relative(int rows) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean previous() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if ((direction != ResultSet.FETCH_FORWARD) && (direction != ResultSet.FETCH_REVERSE) && (direction != ResultSet.FETCH_UNKNOWN)) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - if (!(getType() == ResultSet.TYPE_FORWARD_ONLY && direction == ResultSet.FETCH_FORWARD)) - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getFetchDirection() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - return ResultSet.FETCH_FORWARD; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + public String getNString(int columnIndex) throws SQLException { + return getString(columnIndex); } @Override - public int getFetchSize() throws SQLException { + public Statement getStatement() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return this.resultSet.size(); - } - - @Override - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - @Override - public int getConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; - } - - @Override - public boolean rowUpdated() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowInserted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowDeleted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return this.statement; } @Override - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + public boolean isClosed() throws SQLException { + return isClosed; } - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void insertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void deleteRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void refreshRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void cancelRowUpdates() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToInsertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToCurrentRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Statement getStatement() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return this.statement; - } - - @Override - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /******************************************************************************************************************/ - @Override - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - @Override - public boolean isClosed() throws SQLException { - return isClosed; - } - - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java index 44a02f486b22063cabb8950c21d3a9f55bfc70c8..29ba13bec107be4564a95d27e800fce53b114f96 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java @@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBConstants; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Timestamp; +import java.sql.Types; import java.util.ArrayList; public class RestfulResultSetMetaData implements ResultSetMetaData { @@ -53,14 +54,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public boolean isSigned(int column) throws SQLException { - String type = this.fields.get(column - 1).type.toUpperCase(); + int type = this.fields.get(column - 1).type; switch (type) { - case "TINYINT": - case "SMALLINT": - case "INT": - case "BIGINT": - case "FLOAT": - case "DOUBLE": + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: + case Types.FLOAT: + case Types.DOUBLE: return true; default: return false; @@ -89,14 +90,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public int getPrecision(int column) throws SQLException { - String type = this.fields.get(column - 1).type.toUpperCase(); + int type = this.fields.get(column - 1).type; switch (type) { - case "FLOAT": + case Types.FLOAT: return 5; - case "DOUBLE": + case Types.DOUBLE: return 9; - case "BINARY": - case "NCHAR": + case Types.BINARY: + case Types.NCHAR: return this.fields.get(column - 1).length; default: return 0; @@ -105,11 +106,11 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public int getScale(int column) throws SQLException { - String type = this.fields.get(column - 1).type.toUpperCase(); + int type = this.fields.get(column - 1).type; switch (type) { - case "FLOAT": + case Types.FLOAT: return 5; - case "DOUBLE": + case Types.DOUBLE: return 9; default: return 0; @@ -128,36 +129,13 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public int getColumnType(int column) throws SQLException { - String type = this.fields.get(column - 1).type.toUpperCase(); - switch (type) { - case "BOOL": - return java.sql.Types.BOOLEAN; - case "TINYINT": - return java.sql.Types.TINYINT; - case "SMALLINT": - return java.sql.Types.SMALLINT; - case "INT": - return java.sql.Types.INTEGER; - case "BIGINT": - return java.sql.Types.BIGINT; - case "FLOAT": - return java.sql.Types.FLOAT; - case "DOUBLE": - return java.sql.Types.DOUBLE; - case "BINARY": - return java.sql.Types.BINARY; - case "TIMESTAMP": - return java.sql.Types.TIMESTAMP; - case "NCHAR": - return java.sql.Types.NCHAR; - } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + return this.fields.get(column - 1).type; } @Override public String getColumnTypeName(int column) throws SQLException { - String type = fields.get(column - 1).type; - return type.toUpperCase(); + int type = fields.get(column - 1).type; + return TSDBConstants.jdbcType2TaosTypeName(type); } @Override @@ -177,26 +155,26 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public String getColumnClassName(int column) throws SQLException { - String type = this.fields.get(column - 1).type; + int type = this.fields.get(column - 1).type; String columnClassName = ""; switch (type) { - case "BOOL": + case Types.BOOLEAN: return Boolean.class.getName(); - case "TINYINT": - case "SMALLINT": + case Types.TINYINT: + case Types.SMALLINT: return Short.class.getName(); - case "INT": + case Types.INTEGER: return Integer.class.getName(); - case "BIGINT": + case Types.BIGINT: return Long.class.getName(); - case "FLOAT": + case Types.FLOAT: return Float.class.getName(); - case "DOUBLE": + case Types.DOUBLE: return Double.class.getName(); - case "TIMESTAMP": + case Types.TIMESTAMP: return Timestamp.class.getName(); - case "BINARY": - case "NCHAR": + case Types.BINARY: + case Types.NCHAR: return String.class.getName(); } return columnClassName; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 14ea3180924a8b23055ff610a8f7db5a70522039..d60940d87762340203e54fb7df23579c2f93d510 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -1,9 +1,13 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractStatement; import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.utils.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; import java.sql.*; @@ -12,7 +16,7 @@ import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; -public class RestfulStatement implements Statement { +public class RestfulStatement extends AbstractStatement { private boolean closed; private String database; @@ -20,14 +24,13 @@ public class RestfulStatement implements Statement { private volatile RestfulResultSet resultSet; private volatile int affectedRows; - private volatile boolean closeOnCompletion; public RestfulStatement(RestfulConnection conn, String database) { this.conn = conn; this.database = database; } - private String[] parseTableIdentifier(String sql) { + protected String[] parseTableIdentifier(String sql) { sql = sql.trim().toLowerCase(); String[] ret = null; if (sql.contains("where")) @@ -63,17 +66,17 @@ public class RestfulStatement implements Statement { @Override public ResultSet executeQuery(String sql) throws SQLException { if (isClosed()) - throw new SQLException("statement already closed"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw new SQLException("not a valid sql for executeQuery: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { return executeOneQuery(url, sql); } - if (this.database == null || this.database.isEmpty()) - throw new SQLException("Database not specified or available"); +// if (this.database == null || this.database.isEmpty()) +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneQuery(url, sql); } @@ -81,17 +84,18 @@ public class RestfulStatement implements Statement { @Override public int executeUpdate(String sql) throws SQLException { if (isClosed()) - throw new SQLException("statement already closed"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw new SQLException("not a valid sql for executeUpdate: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { return executeOneUpdate(url, sql); } - if (this.database == null || this.database.isEmpty()) - throw new SQLException("Database not specified or available"); +// if (this.database == null || this.database.isEmpty()) +// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); + HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneUpdate(url, sql); } @@ -104,116 +108,41 @@ public class RestfulStatement implements Statement { } } - @Override - public int getMaxFieldSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return TSDBConstants.maxFieldSize; - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public int getMaxRows() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setMaxRows(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public int getQueryTimeout() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (seconds < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - } - - @Override - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - // nothing to do - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public void setCursorName(String name) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean execute(String sql) throws SQLException { if (isClosed()) - throw new SQLException("Invalid method call on a closed statement."); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecute(sql)) - throw new SQLException("not a valid sql for execute: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql); //如果执行了use操作应该将当前Statement的catalog设置为新的database + boolean result = true; final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isUseSql(sql)) { HttpClientPoolUtil.execute(url, sql); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); + result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { executeOneQuery(url, sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { executeOneUpdate(url, sql); + result = false; } else { if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) { executeQuery(sql); } else { executeUpdate(sql); + result = false; } } - return true; + return result; } private ResultSet executeOneQuery(String url, String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw new SQLException("not a select sql for executeQuery: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); // row data String result = HttpClientPoolUtil.execute(url, sql); @@ -222,29 +151,28 @@ public class RestfulStatement implements Statement { throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code"))); } // parse table name from sql - String[] tableIdentifiers = parseTableIdentifier(sql); - if (tableIdentifiers != null) { - List fieldJsonList = new ArrayList<>(); - for (String tableIdentifier : tableIdentifiers) { - // field meta - String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier); - JSONObject fieldJson = JSON.parseObject(fields); - if (fieldJson.getString("status").equals("error")) { - throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code"))); - } - fieldJsonList.add(fieldJson); - } - this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList); - } else { - this.resultSet = new RestfulResultSet(database, this, resultJson); - } +// String[] tableIdentifiers = parseTableIdentifier(sql); +// if (tableIdentifiers != null) { +// List fieldJsonList = new ArrayList<>(); +// for (String tableIdentifier : tableIdentifiers) { +// String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier); +// JSONObject fieldJson = JSON.parseObject(fields); +// if (fieldJson.getString("status").equals("error")) { +// throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code"))); +// } +// fieldJsonList.add(fieldJson); +// } +// this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList); +// } else { + this.resultSet = new RestfulResultSet(database, this, resultJson); +// } this.affectedRows = 0; return resultSet; } private int executeOneUpdate(String url, String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw new SQLException("not a valid sql for executeUpdate: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); String result = HttpClientPoolUtil.execute(url, sql); JSONObject jsonObject = JSON.parseObject(result); @@ -252,206 +180,49 @@ public class RestfulStatement implements Statement { throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code"))); } this.resultSet = null; - this.affectedRows = Integer.parseInt(jsonObject.getString("rows")); + this.affectedRows = checkJsonResultSet(jsonObject); return this.affectedRows; } - @Override - public ResultSet getResultSet() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return resultSet; - } - - @Override - public int getUpdateCount() throws SQLException { - if (isClosed()) { - throw new SQLException("Invalid method call on a closed statement."); + private int checkJsonResultSet(JSONObject jsonObject) { + // create ... SQLs should return 0 , and Restful result is this: + // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} + JSONArray head = jsonObject.getJSONArray("head"); + JSONArray data = jsonObject.getJSONArray("data"); + int rows = Integer.parseInt(jsonObject.getString("rows")); + if (head.size() == 1 && "affected_rows".equals(head.getString(0)) + && data.size() == 1 && data.getJSONArray(0).getInteger(0) == 0 && rows == 1) { + return 0; } - return this.affectedRows; - } - - @Override - public boolean getMoreResults() throws SQLException { - return getMoreResults(CLOSE_CURRENT_RESULT); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - this.resultSet.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return this.resultSet.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do + return rows; } @Override - public int getFetchSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public int getResultSetConcurrency() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { + public ResultSet getResultSet() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getType(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return resultSet; } @Override - public void addBatch(String sql) throws SQLException { + public int getUpdateCount() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - //TODO: - } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - @Override - public void clearBatch() throws SQLException { - //TODO: - } - - @Override - public int[] executeBatch() throws SQLException { - //TODO: - return new int[0]; + return this.affectedRows; } @Override public Connection getConnection() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); return this.conn; } - @Override - public boolean getMoreResults(int current) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (resultSet == null) - return false; - -// switch (current) { -// case CLOSE_CURRENT_RESULT: -// resultSet.close(); -// break; -// case KEEP_CURRENT_RESULT: -// break; -// case CLOSE_ALL_RESULTS: -// resultSet.close(); -// break; -// default: -// throw new SQLException(TSDBConstants.INVALID_VARIABLES); -// } -// return next; - return false; - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getResultSetHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getHoldability(); - } - @Override public boolean isClosed() throws SQLException { return closed; } - @Override - public void setPoolable(boolean poolable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - //nothing to do - } - - @Override - public boolean isPoolable() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return false; - } - @Override - public void closeOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - this.closeOnCompletion = true; - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.closeOnCompletion; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java similarity index 93% rename from src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java rename to src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java index 9b1681ff948dc9257e7b775d947c598d9dc60378..5e2440fd1d0e551690c5f17fe60b30c1d4a377dc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java @@ -1,6 +1,5 @@ -package com.taosdata.jdbc.rs.util; +package com.taosdata.jdbc.utils; -import org.apache.commons.lang3.StringUtils; import org.apache.http.HeaderElement; import org.apache.http.HeaderElementIterator; import org.apache.http.HttpEntity; @@ -39,7 +38,7 @@ public class HttpClientPoolUtil { /** * 初始化连接池 */ - public static synchronized void initPools() { + private static synchronized void initPools() { if (httpClient == null) { cm = new PoolingHttpClientConnectionManager(); cm.setDefaultMaxPerRoute(count); @@ -51,7 +50,7 @@ public class HttpClientPoolUtil { /** * Http connection keepAlive 设置 */ - public static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> { + private static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> { HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); int keepTime = Http_Default_Keep_Time * 1000; while (it.hasNext()) { @@ -69,14 +68,6 @@ public class HttpClientPoolUtil { return keepTime; }; - public static CloseableHttpClient getHttpClient() { - return httpClient; - } - - public static PoolingHttpClientConnectionManager getHttpConnectionManager() { - return cm; - } - /** * 执行http post请求 * 默认采用Content-Type:application/json,Accept:application/json @@ -95,8 +86,10 @@ public class HttpClientPoolUtil { initPools(); } method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - method.setHeader("Authorization", "Taosd " + token); method.setHeader("Content-Type", "text/plain"); + method.setHeader("Connection", "keep-alive"); + method.setHeader("Authorization", "Taosd " + token); + method.setEntity(new StringEntity(data, Charset.forName("UTF-8"))); HttpContext context = HttpClientContext.create(); CloseableHttpResponse httpResponse = httpClient.execute(method, context); @@ -131,7 +124,7 @@ public class HttpClientPoolUtil { * @return HttpRequestBase 返回类型 * @author lisc */ - public static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) { + private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) { if (httpClient == null) { initPools(); } @@ -152,7 +145,7 @@ public class HttpClientPoolUtil { method = new HttpPost(uri); } - if (StringUtils.isBlank(contentType)) { + if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) { contentType = DEFAULT_CONTENT_TYPE; } method.addHeader("Content-Type", contentType); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java deleted file mode 100644 index 66d23f2ffafb8d266d0f24651676214b6c48d418..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.Assert; -import org.junit.Test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; - - -public class ConnectionTest { - private Connection connection; - private Statement statement; - private static String host = "127.0.0.1"; - - @Test - public void testConnection() { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - Assert.assertTrue(null != connection); - statement = connection.createStatement(); - Assert.assertTrue(null != statement); - statement.close(); - connection.close(); - } catch (ClassNotFoundException e) { - return; - } catch (SQLException e) { - e.printStackTrace(); - } - } -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java deleted file mode 100644 index 8c9b258dcdf8d4e5bb2f6cfc91fe945f3425b4d2..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java +++ /dev/null @@ -1,184 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.*; -import org.junit.runners.MethodSorters; - -import java.sql.*; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -@FixMethodOrder(value = MethodSorters.NAME_ASCENDING) -public class PreparedStatementTest { - static Connection connection; - static TSDBPreparedStatement statement; - static String dbName = "test"; - static String tName = "t0"; - static String host = "localhost"; - - @BeforeClass - public static void createConnection() throws SQLException { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - } catch (ClassNotFoundException e) { - return; - } - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - String sql = "drop database if exists " + dbName; - statement = (TSDBPreparedStatement) connection.prepareStatement(sql); - } - - @Test - public void case001_createTableAndQuery() throws SQLException { - long ts = System.currentTimeMillis(); - - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)"); - statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - - PreparedStatement selectStatement = connection.prepareStatement("select * from " + dbName + "." + tName); - ResultSet resultSet = selectStatement.executeQuery(); - assertTrue(null != resultSet); - - boolean isClosed = statement.isClosed(); - assertEquals(false, isClosed); - selectStatement.close(); - } - - @Test - public void case002_testPreparedStatement() throws SQLException { - long ts = System.currentTimeMillis() + 20000; - - PreparedStatement saveStatement = connection.prepareStatement("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - int affectedRows = saveStatement.executeUpdate(); - assertTrue(1 == affectedRows); - saveStatement.close(); - } - - @Test - public void case003_testSavedPreparedStatement() throws SQLException { - long ts = System.currentTimeMillis(); - TSDBPreparedStatement saveStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tName + " values (?, ?)"); - saveStatement.setObject(1, ts + 10000); - saveStatement.setObject(2, 3); - int rows = saveStatement.executeUpdate(); - assertEquals(1, rows); - saveStatement.close(); - } - - @Test - public void case004_testUnsupport() throws SQLException { - - Assert.assertNotNull(statement.unwrap(TSDBPreparedStatement.class)); - Assert.assertTrue(statement.isWrapperFor(TSDBPreparedStatement.class)); - - try { - statement.getMaxFieldSize(); - } catch (SQLException e) { - } - try { - statement.setMaxFieldSize(0); - } catch (SQLException e) { - } - try { - statement.setEscapeProcessing(true); - } catch (SQLException e) { - } - try { - statement.cancel(); - } catch (SQLException e) { - } - try { - statement.getWarnings(); - } catch (SQLException e) { - } - try { - statement.clearWarnings(); - } catch (SQLException e) { - } - try { - statement.setCursorName(null); - } catch (SQLException e) { - } - try { - statement.getMoreResults(); - } catch (SQLException e) { - } - try { - statement.setFetchDirection(0); - } catch (SQLException e) { - } - try { - statement.getFetchDirection(); - } catch (SQLException e) { - } - try { - statement.getResultSetConcurrency(); - } catch (SQLException e) { - } - try { - statement.getResultSetType(); - } catch (SQLException e) { - } - try { - statement.getConnection(); - } catch (SQLException e) { - } - try { - statement.getMoreResults(); - } catch (SQLException e) { - } - try { - statement.getGeneratedKeys(); - } catch (SQLException e) { - } - try { - statement.executeUpdate(null, 0); - } catch (SQLException e) { - } - try { - statement.executeUpdate(null, new int[]{0}); - } catch (SQLException e) { - } - try { - statement.executeUpdate(null, new String[]{"str1", "str2"}); - } catch (SQLException e) { - } - try { - statement.getResultSetHoldability(); - } catch (SQLException e) { - } - try { - statement.setPoolable(true); - } catch (SQLException e) { - } - try { - statement.isPoolable(); - } catch (SQLException e) { - } - try { - statement.closeOnCompletion(); - } catch (SQLException e) { - - } - try { - statement.isCloseOnCompletion(); - } catch (SQLException e) { - } - } - - @AfterClass - public static void close() throws Exception { - statement.executeUpdate("drop database " + dbName); - statement.close(); - connection.close(); - Thread.sleep(10); - - } - -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java index 3d80ff066cacfdf71f087942f11e2ee5e86b65ee..87348f90261163eff650ecb225a168921c162f58 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java @@ -7,12 +7,12 @@ import org.junit.Test; import javax.sql.rowset.serial.SerialBlob; import javax.sql.rowset.serial.SerialClob; +import java.io.UnsupportedEncodingException; import java.sql.*; import java.util.HashMap; import java.util.Properties; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; public class ResultSetTest { static Connection connection; @@ -39,7 +39,6 @@ public class ResultSetTest { } catch (ClassNotFoundException | SQLException e) { return; } - } @Test @@ -54,51 +53,38 @@ public class ResultSetTest { short v6 = 12; boolean v7 = false; String v8 = "TDengine is powerful"; - sql = "insert into " + dbName + "." + tName + " values (" + ts + "," + v1 + "," + v2 + "," + v3 + "," + v4 + ",\"" + v5 + "\"," + v6 + "," + v7 + ",\"" + v8 + "\")"; - try { statement.executeUpdate(sql); assertEquals(1, statement.getUpdateCount()); } catch (SQLException e) { assert false : "insert error " + e.getMessage(); } - try { - statement.executeQuery("select * from " + dbName + "." + tName + " where ts = " + ts); + statement.execute("select * from " + dbName + "." + tName + " where ts = " + ts); resSet = statement.getResultSet(); System.out.println(((TSDBResultSet) resSet).getRowData()); while (resSet.next()) { assertEquals(ts, resSet.getLong(1)); assertEquals(ts, resSet.getLong("ts")); - System.out.println(resSet.getTimestamp(1)); - assertEquals(v1, resSet.getInt(2)); assertEquals(v1, resSet.getInt("k1")); - assertEquals(v2, resSet.getLong(3)); assertEquals(v2, resSet.getLong("k2")); - assertEquals(v3, resSet.getFloat(4), 7); assertEquals(v3, resSet.getFloat("k3"), 7); - assertEquals(v4, resSet.getDouble(5), 13); assertEquals(v4, resSet.getDouble("k4"), 13); - assertEquals(v5, resSet.getString(6)); assertEquals(v5, resSet.getString("k5")); - assertEquals(v6, resSet.getShort(7)); assertEquals(v6, resSet.getShort("k6")); - assertEquals(v7, resSet.getBoolean(8)); assertEquals(v7, resSet.getBoolean("k7")); - assertEquals(v8, resSet.getString(9)); assertEquals(v8, resSet.getString("k8")); - resSet.getBytes(9); resSet.getObject(6); resSet.getObject("k8"); @@ -111,684 +97,145 @@ public class ResultSetTest { } } - @Test - public void testUnsupport() throws SQLException { - statement.executeQuery("show databases"); + @Test(expected = SQLException.class) + public void testUnsupport() throws SQLException, UnsupportedEncodingException { + statement.execute("show databases"); resSet = statement.getResultSet(); Assert.assertNotNull(resSet.unwrap(TSDBResultSet.class)); Assert.assertTrue(resSet.isWrapperFor(TSDBResultSet.class)); - try { - resSet.getAsciiStream(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getAsciiStream(""); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.clearWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCursorName(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isBeforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isAfterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.beforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.afterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.first(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.last(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.absolute(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.relative(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.previous(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchDirection(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchDirection(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchSize(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchSize(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getConcurrency(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowUpdated(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowInserted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowDeleted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean(0, true); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte(0, (byte) 2); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort(0, (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt(0, 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong(0, 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat(0, 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble(0, 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean("", false); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte("", (byte) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort("", (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt("", 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong("", 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat("", 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble("", 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.insertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.deleteRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.refreshRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.cancelRowUpdates(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToInsertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToCurrentRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getStatement(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject(0, new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject("", new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob("", new SerialClob("".toCharArray())); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob(0, new SerialClob("".toCharArray())); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getHoldability(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getAsciiStream(""); + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getWarnings(); + resSet.clearWarnings(); + resSet.getCursorName(); + resSet.getCharacterStream(null); + resSet.getCharacterStream(null); + resSet.isBeforeFirst(); + resSet.isAfterLast(); + resSet.isFirst(); + resSet.isLast(); + resSet.beforeFirst(); + resSet.afterLast(); + resSet.first(); + resSet.last(); + resSet.getRow(); + resSet.absolute(1); + resSet.relative(1); + resSet.previous(); + resSet.setFetchDirection(0); + resSet.getFetchDirection(); + resSet.setFetchSize(0); + resSet.getFetchSize(); + resSet.getConcurrency(); + resSet.rowUpdated(); + resSet.rowInserted(); + resSet.rowDeleted(); + resSet.updateNull(null); + resSet.updateBoolean(0, true); + resSet.updateByte(0, (byte) 2); + resSet.updateShort(0, (short) 1); + resSet.updateInt(0, 0); + resSet.updateLong(0, 0l); + resSet.updateFloat(0, 3.14f); + resSet.updateDouble(0, 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.updateNull(null); + resSet.updateBoolean("", false); + resSet.updateByte("", (byte) 1); + resSet.updateShort("", (short) 1); + resSet.updateInt("", 0); + resSet.updateLong("", 0l); + resSet.updateFloat("", 3.14f); + resSet.updateDouble("", 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.insertRow(); + resSet.updateRow(); + resSet.deleteRow(); + resSet.refreshRow(); + resSet.cancelRowUpdates(); + resSet.moveToInsertRow(); + resSet.moveToCurrentRow(); + resSet.getStatement(); + resSet.getObject(0, new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getObject("", new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getDate(null, null); + resSet.getDate(null, null); + resSet.getTime(null, null); + resSet.getTime(null, null); + resSet.getTimestamp(null, null); + resSet.getTimestamp(null, null); + resSet.getURL(null); + resSet.getURL(null); + resSet.updateRef(null, null); + resSet.updateRef(null, null); + resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); + resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); + resSet.updateClob("", new SerialClob("".toCharArray())); + resSet.updateClob(0, new SerialClob("".toCharArray())); + resSet.updateArray(null, null); + resSet.updateArray(null, null); + resSet.getRowId(null); + resSet.getRowId(null); + resSet.updateRowId(null, null); + resSet.updateRowId(null, null); + resSet.getHoldability(); + resSet.updateNString(null, null); + resSet.updateNString(null, null); + resSet.getNClob(null); + resSet.getNClob(null); + resSet.getSQLXML(null); + resSet.getSQLXML(null); + resSet.updateSQLXML(null, null); + resSet.updateSQLXML(null, null); + resSet.getNCharacterStream(null); + resSet.getNCharacterStream(null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); } @Test @@ -816,5 +263,4 @@ public class ResultSetTest { e.printStackTrace(); } } - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java index b09482fb03f643cd183468b18bf36e50c50bf0b4..73ceafa7299b256d7e83064b53bd638835a4b075 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java @@ -8,9 +8,6 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class StatementTest { static Connection connection = null; static Statement statement = null; @@ -58,12 +55,12 @@ public class StatementTest { statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)"); statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - statement.executeQuery("select * from " + dbName + "." + tName); + statement.execute("select * from " + dbName + "." + tName); ResultSet resultSet = statement.getResultSet(); - assertTrue(null != resultSet); + Assert.assertNotNull(resultSet); boolean isClosed = statement.isClosed(); - assertEquals(false, isClosed); + Assert.assertEquals(false, isClosed); } @Test(expected = SQLException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 1d8ff08db6d5f177fe74de579e1b6bb26ee35750..11c3de305280a48b690ccdacd2df9751f4b718a9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -9,13 +9,14 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import java.util.concurrent.TimeUnit; public class SubscribeTest { Connection connection; Statement statement; String dbName = "test"; String tName = "t0"; - String host = "localhost"; + String host = "127.0.0.1"; String topic = "test"; @Before @@ -23,15 +24,15 @@ public class SubscribeTest { try { Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); statement = connection.createStatement(); - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); long ts = System.currentTimeMillis(); for (int i = 0; i < 2; i++) { ts += i; @@ -45,44 +46,39 @@ public class SubscribeTest { } @Test - public void subscribe() throws Exception { - TSDBSubscribe subscribe = null; + public void subscribe() { try { - String rawSql = "select * from " + dbName + "." + tName + ";"; System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); - - int a = 0; - while (true) { - Thread.sleep(900); - TSDBResultSet resSet = subscribe.consume(); +// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); - while (resSet.next()) { - for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { - System.out.printf(i + ": " + resSet.getString(i) + "\t"); - } - System.out.println("\n======" + a + "=========="); - } - resSet.close(); - a++; - if (a >= 2) { - break; - } - } +// int a = 0; +// while (true) { +// TimeUnit.MILLISECONDS.sleep(1000); +// TSDBResultSet resSet = subscribe.consume(); +// while (resSet.next()) { +// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { +// System.out.printf(i + ": " + resSet.getString(i) + "\t"); +// } +// System.out.println("\n======" + a + "=========="); +// } +// a++; +// if (a >= 2) { +// break; +// } +// resSet.close(); +// } +// +// subscribe.close(true); } catch (Exception e) { e.printStackTrace(); - } finally { - if (null != subscribe) { - subscribe.close(true); - } } } @After public void close() { try { - statement.executeQuery("drop database " + dbName); + statement.execute("drop database " + dbName); if (statement != null) statement.close(); if (connection != null) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..0a4ecb739cfd5a0abda01f7788d375ef95e0208a --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java @@ -0,0 +1,423 @@ +package com.taosdata.jdbc; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.management.OperationsException; +import java.sql.*; +import java.util.Properties; + +public class TSDBConnectionTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + + @Test + public void getConnection() { + // already test in beforeClass method + } + + @Test + public void createStatement() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void subscribe() { + try { + TSDBConnection unwrap = conn.unwrap(TSDBConnection.class); + TSDBSubscribe subscribe = unwrap.subscribe("topic1", "select * from log.log", false); + TSDBResultSet rs = subscribe.consume(); + ResultSetMetaData metaData = rs.getMetaData(); + for (int count = 0; count < 10 && rs.next(); count++) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String value = rs.getString(i); + System.out.print(metaData.getColumnLabel(i) + ":" + value + "\t"); + } + System.out.println(); + } + Assert.assertNotNull(rs); + subscribe.close(false); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + + @Test + public void prepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void prepareCall() throws SQLException { + conn.prepareCall("select server_status()"); + } + + @Test + public void nativeSQL() throws SQLException { + String nativeSQL = conn.nativeSQL("select * from log.log"); + Assert.assertEquals("select * from log.log", nativeSQL); + } + + @Test + public void setAutoCommit() throws SQLException { + conn.setAutoCommit(true); + conn.setAutoCommit(false); + } + + @Test + public void getAutoCommit() throws SQLException { + Assert.assertTrue(conn.getAutoCommit()); + } + + @Test + public void commit() throws SQLException { + conn.commit(); + } + + @Test + public void rollback() throws SQLException { + conn.rollback(); + } + + @Test + public void close() { + // connection will close in afterClass method + } + + @Test + public void isClosed() throws SQLException { + Assert.assertFalse(conn.isClosed()); + } + + @Test + public void getMetaData() throws SQLException { + DatabaseMetaData meta = conn.getMetaData(); + Assert.assertNotNull(meta); + Assert.assertEquals("com.taosdata.jdbc.TSDBDriver", meta.getDriverName()); + } + + @Test + public void setReadOnly() throws SQLException { + conn.setReadOnly(true); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertTrue(conn.isReadOnly()); + } + + @Test + public void setCatalog() throws SQLException { + conn.setCatalog("test"); + Assert.assertEquals("test", conn.getCatalog()); + } + + @Test + public void getCatalog() throws SQLException { + conn.setCatalog("log"); + Assert.assertEquals("log", conn.getCatalog()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTransactionIsolation() throws SQLException { + conn.setTransactionIsolation(Connection.TRANSACTION_NONE); + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + } + + @Test + public void getTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(conn.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getTypeMap() throws SQLException { + conn.getTypeMap(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTypeMap() throws SQLException { + conn.setTypeMap(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setHoldability() throws SQLException { + conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT); + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + @Test + public void getHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSavepoint() throws SQLException { + conn.setSavepoint(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testSetSavepoint() throws SQLException { + conn.setSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testRollback() throws SQLException { + conn.rollback(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void releaseSavepoint() throws SQLException { + conn.releaseSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement1() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement1() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall1() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement2() throws SQLException { + Assert.assertNotNull("", Statement.NO_GENERATED_KEYS); + conn.prepareStatement("", Statement.RETURN_GENERATED_KEYS); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement3() throws SQLException { + conn.prepareStatement("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement4() throws SQLException { + conn.prepareStatement("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createClob() throws SQLException { + conn.createClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createBlob() throws SQLException { + conn.createBlob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createNClob() throws SQLException { + conn.createNClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createSQLXML() throws SQLException { + conn.createSQLXML(); + } + + @Test(expected = SQLException.class) + public void isValid() throws SQLException { + Assert.assertTrue(conn.isValid(10)); + Assert.assertTrue(conn.isValid(0)); + conn.isValid(-1); + } + + @Test + public void setClientInfo() throws SQLClientInfoException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTC-8"); + } + + @Test + public void testSetClientInfo() throws SQLClientInfoException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn.setClientInfo(properties); + } + + @Test + public void getClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + Properties info = conn.getClientInfo(); + String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test + public void testGetClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createArrayOf() throws SQLException { + conn.createArrayOf("", null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createStruct() throws SQLException { + conn.createStruct("", null); + } + + @Test + public void setSchema() throws SQLException { + conn.setSchema("test"); + } + + @Test + public void getSchema() throws SQLException { + Assert.assertNull(conn.getSchema()); + } + + @Test + public void abort() throws SQLException { + conn.abort(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNetworkTimeout() throws SQLException { + conn.setNetworkTimeout(null, 1000); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNetworkTimeout() throws SQLException { + conn.getNetworkTimeout(); + } + + @Test + public void unwrap() { + try { + TSDBConnection tsdbConnection = conn.unwrap(TSDBConnection.class); + Assert.assertNotNull(tsdbConnection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(conn.isWrapperFor(TSDBConnection.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/log?user=root&password=taosdata", properties); + // create test database for test cases + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test"); + } + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java index 445723f501a85f7c697afffb59b273bcf6a1a630..671ecd723d6fea8d6b9b8ccf94cba06689ce26b8 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java @@ -24,11 +24,10 @@ public class TSDBDriverTest { "jdbc:TAOS://:/test", "jdbc:TAOS://localhost:0/?user=root&password=taosdata" }; - private Connection conn; @Test - public void testConnectWithJdbcURL() { + public void connectWithJdbcURL() { final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; try { conn = DriverManager.getConnection(url); @@ -40,7 +39,7 @@ public class TSDBDriverTest { } @Test - public void testConnectWithProperties() { + public void connectWithProperties() { final String jdbcUrl = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); @@ -56,7 +55,7 @@ public class TSDBDriverTest { } @Test - public void testConnectWithConfigFile() { + public void connectWithConfigFile() { String jdbcUrl = "jdbc:TAOS://:/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); @@ -71,16 +70,6 @@ public class TSDBDriverTest { } } - @Test(expected = SQLException.class) - public void testAcceptsURL() throws SQLException { - Driver driver = new TSDBDriver(); - for (String url : validURLs) { - assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url)); - } - driver.acceptsURL(null); - fail("acceptsURL throws exception when parameter is null"); - } - @Test public void testParseURL() { TSDBDriver driver = new TSDBDriver(); @@ -121,8 +110,19 @@ public class TSDBDriverTest { assertNull("failure - dbname should be null", actual.getProperty("dbname")); } + + @Test(expected = SQLException.class) + public void acceptsURL() throws SQLException { + Driver driver = new TSDBDriver(); + for (String url : validURLs) { + assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url)); + } + driver.acceptsURL(null); + fail("acceptsURL throws exception when parameter is null"); + } + @Test - public void testGetPropertyInfo() throws SQLException { + public void getPropertyInfo() throws SQLException { Driver driver = new TSDBDriver(); final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; Properties connProps = new Properties(); @@ -142,23 +142,23 @@ public class TSDBDriverTest { } @Test - public void testGetMajorVersion() { - assertEquals("failure - getMajorVersion should be 2", 2, new TSDBDriver().getMajorVersion()); + public void getMajorVersion() { + assertEquals(2, new TSDBDriver().getMajorVersion()); } @Test - public void testGetMinorVersion() { - assertEquals("failure - getMinorVersion should be 0", 0, new TSDBDriver().getMinorVersion()); + public void getMinorVersion() { + assertEquals(0, new TSDBDriver().getMinorVersion()); } @Test - public void testJdbcCompliant() { - assertFalse("failure - jdbcCompliant should be false", new TSDBDriver().jdbcCompliant()); + public void jdbcCompliant() { + assertFalse(new TSDBDriver().jdbcCompliant()); } @Test - public void testGetParentLogger() throws SQLFeatureNotSupportedException { - assertNull("failure - getParentLogger should be be null", new TSDBDriver().getParentLogger()); + public void getParentLogger() throws SQLFeatureNotSupportedException { + assertNull(new TSDBDriver().getParentLogger()); } @BeforeClass @@ -169,6 +169,4 @@ public class TSDBDriverTest { e.printStackTrace(); } } - - } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index ebacf6af6b480d7d8b9364256551f4cdb0034608..475cfef060e2769d3390c19bfebc521ada7c0b46 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -1,173 +1,228 @@ package com.taosdata.jdbc; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; +import java.sql.*; public class TSDBPreparedStatementTest { private static final String host = "127.0.0.1"; private static Connection conn; - - @Test - public void executeQuery() { - + private static final String sql_insert = "insert into t1 values(?, ?)"; + private static PreparedStatement pstmt_insert; + private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and temperature >= ?"; + private static PreparedStatement pstmt_select; + + @Test + public void executeQuery() throws SQLException { + long end = System.currentTimeMillis(); + long start = end - 1000 * 60 * 60; + pstmt_select.setTimestamp(1, new Timestamp(start)); + pstmt_select.setTimestamp(2, new Timestamp(end)); + pstmt_select.setFloat(3, 0); + + ResultSet rs = pstmt_select.executeQuery(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } } @Test - public void executeUpdate() { - + public void executeUpdate() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setFloat(2, 3.14f); + int result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test - public void setNull() { + public void setNull() throws SQLException { + pstmt_insert.setNull(2, Types.FLOAT); } @Test - public void setBoolean() { + public void setBoolean() throws SQLException { + pstmt_insert.setBoolean(2, true); } - @Test - public void setByte() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setByte() throws SQLException { + pstmt_insert.setByte(1, (byte) 0x001); } @Test public void setShort() { + } @Test public void setInt() { + } @Test public void setLong() { + } @Test public void setFloat() { + } @Test public void setDouble() { } - @Test - public void setBigDecimal() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBigDecimal() throws SQLException { + pstmt_insert.setBigDecimal(1, null); } @Test public void setString() { } - @Test - public void setBytes() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBytes() throws SQLException { + pstmt_insert.setBytes(1, new byte[]{}); } - @Test - public void setDate() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setDate() throws SQLException { + pstmt_insert.setDate(1, new Date(System.currentTimeMillis())); } - @Test - public void setTime() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTime() throws SQLException { + pstmt_insert.setTime(1, new Time(System.currentTimeMillis())); } @Test public void setTimestamp() { + //TODO } - @Test - public void setAsciiStream() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setAsciiStream() throws SQLException { + pstmt_insert.setAsciiStream(1, null); } - @Test - public void setUnicodeStream() { - } - - @Test - public void setBinaryStream() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBinaryStream() throws SQLException { + pstmt_insert.setBinaryStream(1, null); } @Test public void clearParameters() { + //TODO } @Test - public void setObject() { - + public void setObject() throws SQLException { + pstmt_insert.setObject(1, System.currentTimeMillis()); + //TODO } @Test public void execute() { + //TODO } @Test public void addBatch() { - + //TODO: } - @Test - public void setCharacterStream() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setCharacterStream() throws SQLException { + pstmt_insert.setCharacterStream(1, null); } - @Test - public void setRef() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setRef() throws SQLException { + pstmt_insert.setRef(1, null); } - @Test - public void setBlob() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBlob() throws SQLException { + pstmt_insert.setBlob(1, (Blob) null); } - @Test - public void setClob() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setClob() throws SQLException { + pstmt_insert.setClob(1, (Clob) null); } - @Test - public void setArray() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setArray() throws SQLException { + pstmt_insert.setArray(1, null); } - @Test - public void getMetaData() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void getMetaData() throws SQLException { + pstmt_insert.getMetaData(); } - @Test - public void setURL() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setURL() throws SQLException { + pstmt_insert.setURL(1, null); } - @Test - public void getParameterMetaData() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void getParameterMetaData() throws SQLException { + ParameterMetaData parameterMetaData = pstmt_insert.getParameterMetaData(); +// Assert.assertNotNull(parameterMetaData); + //TODO: } - @Test - public void setRowId() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setRowId() throws SQLException { + pstmt_insert.setRowId(1, null); } - @Test - public void setNString() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNString() throws SQLException { + pstmt_insert.setNString(1, null); } - @Test - public void setNCharacterStream() { + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNCharacterStream() throws SQLException { + pstmt_insert.setNCharacterStream(1, null); } - @Test - public void setNClob() { - + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNClob() throws SQLException { + pstmt_insert.setNClob(1, (NClob) null); } - @Test - public void setSQLXML() { - + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSQLXML() throws SQLException { + pstmt_insert.setSQLXML(1, null); } @BeforeClass public static void beforeClass() { try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Class.forName("com.taosdata.jdbc.TSDBDriver"); conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_pstmt"); + stmt.execute("create database if not exists test_pstmt"); + stmt.execute("use test_pstmt"); + stmt.execute("create table weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.execute("create table t1 using weather tags('beijing')"); + } + pstmt_insert = conn.prepareStatement(sql_insert); + pstmt_select = conn.prepareStatement(sql_select); } catch (ClassNotFoundException | SQLException e) { e.printStackTrace(); } @@ -176,6 +231,7 @@ public class TSDBPreparedStatementTest { @AfterClass public static void afterClass() { try { + if (conn != null) conn.close(); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java new file mode 100644 index 0000000000000000000000000000000000000000..671551c42639620b09ace1ae315da6f25ea278d4 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java @@ -0,0 +1,415 @@ +package com.taosdata.jdbc; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.UUID; + +public class TSDBStatementTest { + private static final String host = "127.0.0.1"; + private static Connection conn; + private static Statement stmt; + + @Test + public void executeQuery() { + try { + ResultSet rs = stmt.executeQuery("show databases"); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeUpdate() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + int affectRows = stmt.executeUpdate("create database " + dbName); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(1, affectRows); + affectRows = stmt.executeUpdate("drop database " + dbName); + Assert.assertEquals(0, affectRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void close() { + // test in AfterClass method + } + + + @Test + public void getMaxFieldSize() throws SQLException { + Assert.assertEquals(16 * 1024, stmt.getMaxFieldSize()); + } + + @Test(expected = SQLException.class) + public void setMaxFieldSize() throws SQLException { + + stmt.setMaxFieldSize(0); + stmt.setMaxFieldSize(-1); + } + + @Test + public void getMaxRows() throws SQLException { + Assert.assertEquals(0, stmt.getMaxRows()); + } + + @Test(expected = SQLException.class) + public void setMaxRows() throws SQLException { + stmt.setMaxRows(0); + stmt.setMaxRows(-1); + } + + @Test + public void setEscapeProcessing() throws SQLException { + stmt.setEscapeProcessing(true); + stmt.setEscapeProcessing(false); + } + + @Test + public void getQueryTimeout() throws SQLException { + Assert.assertEquals(0, stmt.getQueryTimeout()); + } + + @Test(expected = SQLException.class) + public void setQueryTimeout() throws SQLException { + stmt.setQueryTimeout(0); + stmt.setQueryTimeout(-1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(stmt.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setCursorName() throws SQLException { + stmt.setCursorName(""); + } + + @Test + public void execute() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getResultSet() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + ResultSet rs = stmt.getResultSet(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + Assert.assertEquals(3, meta.getColumnCount()); + int count = 0; + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + count++; + } + Assert.assertEquals(1, count); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getUpdateCount() { + // already test in execute method + } + + @Test + public void getMoreResults() throws SQLException { + Assert.assertEquals(false, stmt.getMoreResults()); + } + + @Test(expected = SQLException.class) + public void setFetchDirection() throws SQLException { + stmt.setFetchDirection(ResultSet.FETCH_FORWARD); + stmt.setFetchDirection(ResultSet.FETCH_REVERSE); + stmt.setFetchDirection(ResultSet.FETCH_UNKNOWN); + stmt.setFetchDirection(-1); + } + + @Test + public void getFetchDirection() throws SQLException { + Assert.assertEquals(ResultSet.FETCH_FORWARD, stmt.getFetchDirection()); + } + + @Test(expected = SQLException.class) + public void setFetchSize() throws SQLException { + stmt.setFetchSize(0); + stmt.setFetchSize(-1); + } + + @Test + public void getFetchSize() throws SQLException { + stmt.setFetchSize(0); + Assert.assertEquals(0, stmt.getFetchSize()); + stmt.setFetchSize(0); + } + + @Test + public void getResultSetConcurrency() throws SQLException { + Assert.assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); + } + + @Test + public void getResultSetType() throws SQLException { + Assert.assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); + } + + @Test + public void addBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void clearBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.clearBatch(); + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + stmt.clearBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + int[] results = stmt.executeBatch(); + Assert.assertEquals(0, results[0]); + Assert.assertEquals(0, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[3]); + Assert.assertEquals(0, results[4]); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getConnection() { + try { + Connection connection = stmt.getConnection(); + Assert.assertNotNull(connection); + Assert.assertTrue(this.conn == connection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testGetMoreResults() throws SQLException { + Assert.assertEquals(false, stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getGeneratedKeys() throws SQLException { + stmt.getGeneratedKeys(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate() throws SQLException { + stmt.executeUpdate("", 1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate1() throws SQLException { + stmt.executeUpdate("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate2() throws SQLException { + stmt.executeUpdate("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute() throws SQLException { + stmt.execute("", 1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute1() throws SQLException { + stmt.execute("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute2() throws SQLException { + stmt.execute("", new String[]{}); + } + + @Test + public void getResultSetHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, stmt.getResultSetHoldability()); + } + + @Test + public void isClosed() { + try { + Assert.assertEquals(false, stmt.isClosed()); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void setPoolable() throws SQLException { + stmt.setPoolable(true); + stmt.setPoolable(false); + } + + @Test + public void isPoolable() throws SQLException { + Assert.assertEquals(false, stmt.isPoolable()); + } + + @Test + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Test + public void isCloseOnCompletion() throws SQLException { + Assert.assertFalse(stmt.isCloseOnCompletion()); + } + + @Test + public void unwrap() throws SQLException { + TSDBStatement unwrap = stmt.unwrap(TSDBStatement.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(stmt.isWrapperFor(TSDBStatement.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties); + stmt = conn.createStatement(); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (stmt != null) + stmt.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java index e4d2d7598d6a6bd5610abcb1bd576f523cb22740..d8d8f1eae42f555b2bc0be12e8a3a943071da3c6 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java @@ -1,5 +1,6 @@ package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBErrorNumbers; import org.junit.Assert; import org.junit.Test; @@ -16,8 +17,7 @@ public class ConnectWrongDatabaseTest { } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (SQLException e) { - System.out.println(e.getMessage()); - Assert.assertEquals("TDengine Error: Invalid database name", e.getMessage()); + Assert.assertEquals(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, e.getErrorCode()); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java similarity index 97% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java index 9d1884e6c2b44eb5269bcb5ad40fdfe9dd759f67..d7603312a090bedb17ed125edf6da535924964d0 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.*; import org.junit.runners.MethodSorters; @@ -11,7 +12,7 @@ import static org.junit.Assert.assertEquals; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class ImportTest { private static Connection connection; - static String dbName = "test"; + static String dbName = "test_import"; static String tName = "t0"; static String host = "127.0.0.1"; private static long ts; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f3d79b1df1594edc4fffd626244bc742ea13ec75 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java @@ -0,0 +1,187 @@ +package com.taosdata.jdbc.cases; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class InvalidResultSetPointerTest { + + private static String host = "127.0.0.1"; + private static final String dbName = "test"; + private static final String stbName = "stb"; + private static final String tbName = "tb"; + private static Connection connection; + private static int numOfSTb = 30000; + private static int numOfTb = 3; + private static int numOfThreads = 100; + + @Test + public void test() throws SQLException { + execute("drop database if exists " + dbName); + execute("create database if not exists " + dbName); + execute("use " + dbName); + createSTable(); + createTable(); + insert(); + selectMultiThreading(); + close(); + } + + private void insert() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "INSERT INTO " + dbName + "." + tbName + i + "_" + j + " (ts, temperature, humidity, name) values(now, 20.5, 34, \"" + i + "\")"; + System.out.println(sql); + execute(sql); + } + } + } + + private void createSTable() { + for (int i = 0; i < numOfSTb; i++) { + final String sql = "create table if not exists " + dbName + "." + stbName + i + " (ts timestamp, temperature float, humidity int, name BINARY(" + (i % 73 + 10) + ")) TAGS (tag1 INT)"; + execute(sql); + } + } + + private void createTable() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "create table if not exists " + dbName + "." + tbName + i + "_" + j + " USING " + stbName + i + " TAGS(" + j + ")"; + execute(sql); + } + } + } + + private void close() throws SQLException { + if (connection != null) { + this.connection.close(); + System.out.println("connection closed."); + } + } + + private void selectMultiThreading() { + int a = numOfSTb / numOfThreads; + if (a < 1) { + numOfThreads = numOfSTb; + a = 1; + } + + int b = 0; + if (numOfThreads != 0) { + b = numOfSTb % numOfThreads; + } + + multiThreadingClass instance[] = new multiThreadingClass[numOfThreads]; + + int last = 0; + for (int i = 0; i < numOfThreads; i++) { + instance[i] = new multiThreadingClass(); + instance[i].id = i; + instance[i].from = last; + if (i < b) { + instance[i].to = last + a; + } else { + instance[i].to = last + a - 1; + } + + last = instance[i].to + 1; + instance[i].numOfTb = numOfTb; + instance[i].connection = connection; + instance[i].dbName = dbName; + instance[i].tbName = tbName; + instance[i].start(); + } + + for (int i = 0; i < numOfThreads; i++) { + try { + instance[i].join(); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + } + + @BeforeClass + public static void beforeClass() { + try { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + connection = DriverManager.getConnection(url, properties); + if (connection != null) + System.out.println("[ OK ] Connection established."); + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + private void execute(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void printSql(String sql, long cost) { + System.out.println("time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void executeQuery(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); +// printResult(resultSet); + resultSet.close(); + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + class multiThreadingClass extends Thread { + public int id; + public int from, to; + public int numOfTb; + public Connection connection; + public String dbName; + public String tbName; + + public void run() { + System.out.println("ID: " + id + " from: " + from + " to: " + to); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + + for (int i = from; i < to; i++) { + for (int j = 0; j < numOfTb; j++) { + if (j % 1000 == 0) { + try { + System.out.print(id + "s."); + Thread.sleep(1); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + } + final String sql = "select last_row(humidity) from " + dbName + "." + tbName + i + "_" + j; + executeQuery(sql); + } + } + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java similarity index 96% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index 37fbc284877dbb27ecb8c9da1b752cfaff029023..d0ba113b7a4a8f99e22eb8143905d0b086583e1d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java similarity index 97% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java index 7db37beaf4b355b8d256b5d63f4eedd8e7fd0e0e..38c8cbb98c48342f131f4f5f0fee885bb446e83c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java similarity index 98% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java index 0a1c548baa330edada8c393f79cc89583bea7b18..4575cb73a05fbbc19d6eaf2ba5be0ed27b61804c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.FixMethodOrder; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java index 11d4a143535d7e116d92875a6eb52b5bf7a5b6e0..b0666989ba0e5aacfd6aa1110b4a57016a0937ee 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java @@ -10,7 +10,7 @@ public class AuthenticationTest { private static final String host = "127.0.0.1"; // private static final String host = "master"; private static final String user = "root"; - private static final String password = "123456"; + private static final String password = "taos?data"; private Connection conn; @Test diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4e005d129124ca480313f861a48af5fcfdbf25bc --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java @@ -0,0 +1,403 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class RestfulConnectionTest { + + private static final String host = "127.0.0.1"; + // private static final String host = "master"; + + private static Connection conn; + + @Test + public void getConnection() { + // already test in beforeClass method + } + + @Test + public void createStatement() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void prepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void prepareCall() throws SQLException { + conn.prepareCall("select server_status()"); + } + + @Test + public void nativeSQL() throws SQLException { + String nativeSQL = conn.nativeSQL("select * from log.log"); + Assert.assertEquals("select * from log.log", nativeSQL); + } + + @Test + public void setAutoCommit() throws SQLException { + conn.setAutoCommit(true); + conn.setAutoCommit(false); + } + + @Test + public void getAutoCommit() throws SQLException { + Assert.assertTrue(conn.getAutoCommit()); + } + + @Test + public void commit() throws SQLException { + conn.commit(); + } + + @Test + public void rollback() throws SQLException { + conn.rollback(); + } + + @Test + public void close() { + // connection will close in afterClass method + } + + @Test + public void isClosed() throws SQLException { + Assert.assertFalse(conn.isClosed()); + } + + @Test + public void getMetaData() throws SQLException { + DatabaseMetaData meta = conn.getMetaData(); + Assert.assertNotNull(meta); + Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName()); + } + + @Test + public void setReadOnly() throws SQLException { + conn.setReadOnly(true); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertTrue(conn.isReadOnly()); + } + + @Test + public void setCatalog() throws SQLException { + conn.setCatalog("test"); + Assert.assertEquals("test", conn.getCatalog()); + } + + @Test + public void getCatalog() throws SQLException { + conn.setCatalog("log"); + Assert.assertEquals("log", conn.getCatalog()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTransactionIsolation() throws SQLException { + conn.setTransactionIsolation(Connection.TRANSACTION_NONE); + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + } + + @Test + public void getTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(conn.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getTypeMap() throws SQLException { + conn.getTypeMap(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTypeMap() throws SQLException { + conn.setTypeMap(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setHoldability() throws SQLException { + conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT); + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + @Test + public void getHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSavepoint() throws SQLException { + conn.setSavepoint(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testSetSavepoint() throws SQLException { + conn.setSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testRollback() throws SQLException { + conn.rollback(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void releaseSavepoint() throws SQLException { + conn.releaseSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement1() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement1() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall1() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement2() throws SQLException { + Assert.assertNotNull("", Statement.NO_GENERATED_KEYS); + conn.prepareStatement("", Statement.RETURN_GENERATED_KEYS); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement3() throws SQLException { + conn.prepareStatement("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement4() throws SQLException { + conn.prepareStatement("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createClob() throws SQLException { + conn.createClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createBlob() throws SQLException { + conn.createBlob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createNClob() throws SQLException { + conn.createNClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createSQLXML() throws SQLException { + conn.createSQLXML(); + } + + @Test(expected = SQLException.class) + public void isValid() throws SQLException { + Assert.assertTrue(conn.isValid(10)); + Assert.assertTrue(conn.isValid(0)); + conn.isValid(-1); + } + + @Test + public void setClientInfo() throws SQLClientInfoException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTC-8"); + } + + @Test + public void testSetClientInfo() throws SQLClientInfoException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn.setClientInfo(properties); + } + + @Test + public void getClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + Properties info = conn.getClientInfo(); + String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test + public void testGetClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createArrayOf() throws SQLException { + conn.createArrayOf("", null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createStruct() throws SQLException { + conn.createStruct("", null); + } + + @Test + public void setSchema() throws SQLException { + conn.setSchema("test"); + } + + @Test + public void getSchema() throws SQLException { + Assert.assertNull(conn.getSchema()); + } + + @Test + public void abort() throws SQLException { + conn.abort(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNetworkTimeout() throws SQLException { + conn.setNetworkTimeout(null, 1000); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNetworkTimeout() throws SQLException { + conn.getNetworkTimeout(); + } + + @Test + public void unwrap() { + try { + RestfulConnection restfulConnection = conn.unwrap(RestfulConnection.class); + Assert.assertNotNull(restfulConnection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(conn.isWrapperFor(RestfulConnection.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/log?user=root&password=taosdata", properties); + // create test database for test cases + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test"); + } + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1991c17065a34c16fe7758486bd10b83f3241a07 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java @@ -0,0 +1,984 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class RestfulDatabaseMetaDataTest { + // private static final String host = "master"; + private static final String host = "127.0.0.1"; + private static final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + private static Connection connection; + private static RestfulDatabaseMetaData metaData; + + @Test + public void unwrap() throws SQLException { + RestfulDatabaseMetaData unwrap = metaData.unwrap(RestfulDatabaseMetaData.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(metaData.isWrapperFor(RestfulDatabaseMetaData.class)); + } + + @Test + public void allProceduresAreCallable() throws SQLException { + Assert.assertFalse(metaData.allProceduresAreCallable()); + } + + @Test + public void allTablesAreSelectable() throws SQLException { + Assert.assertFalse(metaData.allTablesAreSelectable()); + } + + @Test + public void getURL() throws SQLException { + Assert.assertEquals(url, metaData.getURL()); + } + + @Test + public void getUserName() throws SQLException { + Assert.assertEquals("root", metaData.getUserName()); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertFalse(metaData.isReadOnly()); + } + + @Test + public void nullsAreSortedHigh() throws SQLException { + Assert.assertFalse(metaData.nullsAreSortedHigh()); + } + + @Test + public void nullsAreSortedLow() throws SQLException { + Assert.assertTrue(metaData.nullsAreSortedLow()); + } + + @Test + public void nullsAreSortedAtStart() throws SQLException { + Assert.assertTrue(metaData.nullsAreSortedAtStart()); + } + + @Test + public void nullsAreSortedAtEnd() throws SQLException { + Assert.assertFalse(metaData.nullsAreSortedAtEnd()); + } + + @Test + public void getDatabaseProductName() throws SQLException { + Assert.assertEquals("TDengine", metaData.getDatabaseProductName()); + } + + @Test + public void getDatabaseProductVersion() throws SQLException { + Assert.assertEquals("2.0.x.x", metaData.getDatabaseProductVersion()); + } + + @Test + public void getDriverName() throws SQLException { + Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", metaData.getDriverName()); + } + + @Test + public void getDriverVersion() throws SQLException { + Assert.assertEquals("2.0.x", metaData.getDriverVersion()); + } + + @Test + public void getDriverMajorVersion() { + Assert.assertEquals(2, metaData.getDriverMajorVersion()); + } + + @Test + public void getDriverMinorVersion() { + Assert.assertEquals(0, metaData.getDriverMinorVersion()); + } + + @Test + public void usesLocalFiles() throws SQLException { + Assert.assertFalse(metaData.usesLocalFiles()); + } + + @Test + public void usesLocalFilePerTable() throws SQLException { + Assert.assertFalse(metaData.usesLocalFilePerTable()); + } + + @Test + public void supportsMixedCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.supportsMixedCaseIdentifiers()); + } + + @Test + public void storesUpperCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesUpperCaseIdentifiers()); + } + + @Test + public void storesLowerCaseIdentifiers() throws SQLException { + Assert.assertTrue(metaData.storesLowerCaseIdentifiers()); + } + + @Test + public void storesMixedCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesMixedCaseIdentifiers()); + } + + @Test + public void supportsMixedCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.supportsMixedCaseQuotedIdentifiers()); + } + + @Test + public void storesUpperCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesUpperCaseQuotedIdentifiers()); + } + + @Test + public void storesLowerCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesLowerCaseQuotedIdentifiers()); + } + + @Test + public void storesMixedCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesMixedCaseQuotedIdentifiers()); + } + + @Test + public void getIdentifierQuoteString() throws SQLException { + Assert.assertEquals(" ", metaData.getIdentifierQuoteString()); + } + + @Test + public void getSQLKeywords() throws SQLException { + Assert.assertEquals(null, metaData.getSQLKeywords()); + } + + @Test + public void getNumericFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getNumericFunctions()); + } + + @Test + public void getStringFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getStringFunctions()); + } + + @Test + public void getSystemFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getSystemFunctions()); + } + + @Test + public void getTimeDateFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getTimeDateFunctions()); + } + + @Test + public void getSearchStringEscape() throws SQLException { + Assert.assertEquals(null, metaData.getSearchStringEscape()); + } + + @Test + public void getExtraNameCharacters() throws SQLException { + Assert.assertEquals(null, metaData.getExtraNameCharacters()); + } + + @Test + public void supportsAlterTableWithAddColumn() throws SQLException { + Assert.assertTrue(metaData.supportsAlterTableWithAddColumn()); + } + + @Test + public void supportsAlterTableWithDropColumn() throws SQLException { + Assert.assertTrue(metaData.supportsAlterTableWithDropColumn()); + } + + @Test + public void supportsColumnAliasing() throws SQLException { + Assert.assertTrue(metaData.supportsColumnAliasing()); + } + + @Test + public void nullPlusNonNullIsNull() throws SQLException { + Assert.assertFalse(metaData.nullPlusNonNullIsNull()); + } + + @Test + public void supportsConvert() throws SQLException { + Assert.assertFalse(metaData.supportsConvert()); + } + + @Test + public void testSupportsConvert() throws SQLException { + Assert.assertFalse(metaData.supportsConvert(1, 1)); + } + + @Test + public void supportsTableCorrelationNames() throws SQLException { + Assert.assertFalse(metaData.supportsTableCorrelationNames()); + } + + @Test + public void supportsDifferentTableCorrelationNames() throws SQLException { + Assert.assertFalse(metaData.supportsDifferentTableCorrelationNames()); + } + + @Test + public void supportsExpressionsInOrderBy() throws SQLException { + Assert.assertFalse(metaData.supportsExpressionsInOrderBy()); + } + + @Test + public void supportsOrderByUnrelated() throws SQLException { + Assert.assertFalse(metaData.supportsOrderByUnrelated()); + } + + @Test + public void supportsGroupBy() throws SQLException { + Assert.assertTrue(metaData.supportsGroupBy()); + } + + @Test + public void supportsGroupByUnrelated() throws SQLException { + Assert.assertFalse(metaData.supportsGroupByUnrelated()); + } + + @Test + public void supportsGroupByBeyondSelect() throws SQLException { + Assert.assertFalse(metaData.supportsGroupByBeyondSelect()); + } + + @Test + public void supportsLikeEscapeClause() throws SQLException { + Assert.assertFalse(metaData.supportsLikeEscapeClause()); + } + + @Test + public void supportsMultipleResultSets() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleResultSets()); + } + + @Test + public void supportsMultipleTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleTransactions()); + } + + @Test + public void supportsNonNullableColumns() throws SQLException { + Assert.assertFalse(metaData.supportsNonNullableColumns()); + } + + @Test + public void supportsMinimumSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsMinimumSQLGrammar()); + } + + @Test + public void supportsCoreSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsCoreSQLGrammar()); + } + + @Test + public void supportsExtendedSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsExtendedSQLGrammar()); + } + + @Test + public void supportsANSI92EntryLevelSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92EntryLevelSQL()); + } + + @Test + public void supportsANSI92IntermediateSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92IntermediateSQL()); + } + + @Test + public void supportsANSI92FullSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92FullSQL()); + } + + @Test + public void supportsIntegrityEnhancementFacility() throws SQLException { + Assert.assertFalse(metaData.supportsIntegrityEnhancementFacility()); + } + + @Test + public void supportsOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsOuterJoins()); + } + + @Test + public void supportsFullOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsFullOuterJoins()); + } + + @Test + public void supportsLimitedOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsLimitedOuterJoins()); + } + + @Test + public void getSchemaTerm() throws SQLException { + Assert.assertNull(metaData.getSchemaTerm()); + } + + @Test + public void getProcedureTerm() throws SQLException { + Assert.assertNull(metaData.getProcedureTerm()); + } + + @Test + public void getCatalogTerm() throws SQLException { + Assert.assertEquals("database", metaData.getCatalogTerm()); + } + + @Test + public void isCatalogAtStart() throws SQLException { + Assert.assertTrue(metaData.isCatalogAtStart()); + } + + @Test + public void getCatalogSeparator() throws SQLException { + Assert.assertEquals(".", metaData.getCatalogSeparator()); + } + + @Test + public void supportsSchemasInDataManipulation() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInDataManipulation()); + } + + @Test + public void supportsSchemasInProcedureCalls() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInProcedureCalls()); + } + + @Test + public void supportsSchemasInTableDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInTableDefinitions()); + } + + @Test + public void supportsSchemasInIndexDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInIndexDefinitions()); + } + + @Test + public void supportsSchemasInPrivilegeDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInPrivilegeDefinitions()); + } + + @Test + public void supportsCatalogsInDataManipulation() throws SQLException { + Assert.assertTrue(metaData.supportsCatalogsInDataManipulation()); + } + + @Test + public void supportsCatalogsInProcedureCalls() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInProcedureCalls()); + } + + @Test + public void supportsCatalogsInTableDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInTableDefinitions()); + } + + @Test + public void supportsCatalogsInIndexDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInIndexDefinitions()); + } + + @Test + public void supportsCatalogsInPrivilegeDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInPrivilegeDefinitions()); + } + + @Test + public void supportsPositionedDelete() throws SQLException { + Assert.assertFalse(metaData.supportsPositionedDelete()); + } + + @Test + public void supportsPositionedUpdate() throws SQLException { + Assert.assertFalse(metaData.supportsPositionedUpdate()); + } + + @Test + public void supportsSelectForUpdate() throws SQLException { + Assert.assertFalse(metaData.supportsSelectForUpdate()); + } + + @Test + public void supportsStoredProcedures() throws SQLException { + Assert.assertFalse(metaData.supportsStoredProcedures()); + } + + @Test + public void supportsSubqueriesInComparisons() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInComparisons()); + } + + @Test + public void supportsSubqueriesInExists() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInExists()); + } + + @Test + public void supportsSubqueriesInIns() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInIns()); + } + + @Test + public void supportsSubqueriesInQuantifieds() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInQuantifieds()); + } + + @Test + public void supportsCorrelatedSubqueries() throws SQLException { + Assert.assertFalse(metaData.supportsCorrelatedSubqueries()); + } + + @Test + public void supportsUnion() throws SQLException { + Assert.assertFalse(metaData.supportsUnion()); + } + + @Test + public void supportsUnionAll() throws SQLException { + Assert.assertFalse(metaData.supportsUnionAll()); + } + + @Test + public void supportsOpenCursorsAcrossCommit() throws SQLException { + Assert.assertFalse(metaData.supportsOpenCursorsAcrossCommit()); + } + + @Test + public void supportsOpenCursorsAcrossRollback() throws SQLException { + Assert.assertFalse(metaData.supportsOpenCursorsAcrossRollback()); + } + + @Test + public void supportsOpenStatementsAcrossCommit() throws SQLException { + Assert.assertFalse(metaData.supportsOpenStatementsAcrossCommit()); + } + + @Test + public void supportsOpenStatementsAcrossRollback() throws SQLException { + Assert.assertFalse(metaData.supportsOpenStatementsAcrossRollback()); + } + + @Test + public void getMaxBinaryLiteralLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxBinaryLiteralLength()); + } + + @Test + public void getMaxCharLiteralLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCharLiteralLength()); + } + + @Test + public void getMaxColumnNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnNameLength()); + } + + @Test + public void getMaxColumnsInGroupBy() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInGroupBy()); + } + + @Test + public void getMaxColumnsInIndex() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInIndex()); + } + + @Test + public void getMaxColumnsInOrderBy() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInOrderBy()); + } + + @Test + public void getMaxColumnsInSelect() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInSelect()); + } + + @Test + public void getMaxColumnsInTable() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInTable()); + } + + @Test + public void getMaxConnections() throws SQLException { + Assert.assertEquals(0, metaData.getMaxConnections()); + } + + @Test + public void getMaxCursorNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCursorNameLength()); + } + + @Test + public void getMaxIndexLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxIndexLength()); + } + + @Test + public void getMaxSchemaNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxSchemaNameLength()); + } + + @Test + public void getMaxProcedureNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxProcedureNameLength()); + } + + @Test + public void getMaxCatalogNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCatalogNameLength()); + } + + @Test + public void getMaxRowSize() throws SQLException { + Assert.assertEquals(0, metaData.getMaxRowSize()); + } + + @Test + public void doesMaxRowSizeIncludeBlobs() throws SQLException { + Assert.assertFalse(metaData.doesMaxRowSizeIncludeBlobs()); + } + + @Test + public void getMaxStatementLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxStatementLength()); + } + + @Test + public void getMaxStatements() throws SQLException { + Assert.assertEquals(0, metaData.getMaxStatements()); + } + + @Test + public void getMaxTableNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxTableNameLength()); + } + + @Test + public void getMaxTablesInSelect() throws SQLException { + Assert.assertEquals(0, metaData.getMaxTablesInSelect()); + } + + @Test + public void getMaxUserNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxUserNameLength()); + } + + @Test + public void getDefaultTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, metaData.getDefaultTransactionIsolation()); + } + + @Test + public void supportsTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsTransactions()); + } + + @Test + public void supportsTransactionIsolationLevel() throws SQLException { + Assert.assertTrue(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_NONE)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE)); + } + + @Test + public void supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsDataDefinitionAndDataManipulationTransactions()); + } + + @Test + public void supportsDataManipulationTransactionsOnly() throws SQLException { + Assert.assertFalse(metaData.supportsDataManipulationTransactionsOnly()); + } + + @Test + public void dataDefinitionCausesTransactionCommit() throws SQLException { + Assert.assertFalse(metaData.dataDefinitionCausesTransactionCommit()); + } + + @Test + public void dataDefinitionIgnoredInTransactions() throws SQLException { + Assert.assertFalse(metaData.dataDefinitionIgnoredInTransactions()); + } + + @Test + public void getProcedures() throws SQLException { + Assert.assertNull(metaData.getProcedures("*", "*", "*")); + } + + @Test + public void getProcedureColumns() throws SQLException { + Assert.assertNull(metaData.getProcedureColumns("*", "*", "*", "*")); + } + + @Test + public void getTables() throws SQLException { + System.out.println("****************************************************"); + ResultSet tables = metaData.getTables("log", "", null, null); + ResultSetMetaData metaData = tables.getMetaData(); + while (tables.next()) { + System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t"); + System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t"); + System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t"); + System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n"); + } + System.out.println(); + Assert.assertNotNull(tables); + } + + @Test + public void getSchemas() throws SQLException { + Assert.assertNotNull(metaData.getSchemas()); + } + + @Test + public void getCatalogs() throws SQLException { + System.out.println("****************************************************"); + + ResultSet catalogs = metaData.getCatalogs(); + ResultSetMetaData meta = catalogs.getMetaData(); + while (catalogs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i)); + } + System.out.println(); + } + } + + @Test + public void getTableTypes() throws SQLException { + System.out.println("****************************************************"); + + ResultSet tableTypes = metaData.getTableTypes(); + while (tableTypes.next()) { + System.out.println(tableTypes.getString("TABLE_TYPE")); + } + Assert.assertNotNull(metaData.getTableTypes()); + } + + @Test + public void getColumns() throws SQLException { + System.out.println("****************************************************"); + + ResultSet columns = metaData.getColumns("log", "", "dn", ""); + ResultSetMetaData meta = columns.getMetaData(); + while (columns.next()) { + System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t"); + System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t"); + System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t"); + System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t"); + System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t"); + System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t"); + System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t"); + System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t"); + System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n"); + System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n"); + } + } + + @Test + public void getColumnPrivileges() throws SQLException { + Assert.assertNotNull(metaData.getColumnPrivileges("", "", "", "")); + } + + @Test + public void getTablePrivileges() throws SQLException { + Assert.assertNotNull(metaData.getTablePrivileges("", "", "")); + } + + @Test + public void getBestRowIdentifier() throws SQLException { + Assert.assertNotNull(metaData.getBestRowIdentifier("", "", "", 0, false)); + } + + @Test + public void getVersionColumns() throws SQLException { + Assert.assertNotNull(metaData.getVersionColumns("", "", "")); + } + + @Test + public void getPrimaryKeys() throws SQLException { + System.out.println("****************************************************"); + + ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1"); + while (rs.next()) { + System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME")); + System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME")); + System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ")); + System.out.println("PK_NAME: " + rs.getString("PK_NAME")); + } + + Assert.assertNotNull(rs); + } + + @Test + public void getImportedKeys() throws SQLException { + Assert.assertNotNull(metaData.getImportedKeys("", "", "")); + } + + @Test + public void getExportedKeys() throws SQLException { + Assert.assertNotNull(metaData.getExportedKeys("", "", "")); + } + + @Test + public void getCrossReference() throws SQLException { + Assert.assertNotNull(metaData.getCrossReference("", "", "", "", "", "")); + } + + @Test + public void getTypeInfo() throws SQLException { + Assert.assertNotNull(metaData.getTypeInfo()); + } + + @Test + public void getIndexInfo() throws SQLException { + Assert.assertNotNull(metaData.getIndexInfo("", "", "", false, false)); + } + + @Test + public void supportsResultSetType() throws SQLException { + Assert.assertFalse(metaData.supportsResultSetType(0)); + } + + @Test + public void supportsResultSetConcurrency() throws SQLException { + Assert.assertFalse(metaData.supportsResultSetConcurrency(0, 0)); + } + + @Test + public void ownUpdatesAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownUpdatesAreVisible(0)); + } + + @Test + public void ownDeletesAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownDeletesAreVisible(0)); + } + + @Test + public void ownInsertsAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownInsertsAreVisible(0)); + } + + @Test + public void othersUpdatesAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersUpdatesAreVisible(0)); + } + + @Test + public void othersDeletesAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersDeletesAreVisible(0)); + } + + @Test + public void othersInsertsAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersInsertsAreVisible(0)); + } + + @Test + public void updatesAreDetected() throws SQLException { + Assert.assertFalse(metaData.updatesAreDetected(0)); + } + + @Test + public void deletesAreDetected() throws SQLException { + Assert.assertFalse(metaData.deletesAreDetected(0)); + } + + @Test + public void insertsAreDetected() throws SQLException { + Assert.assertFalse(metaData.insertsAreDetected(0)); + } + + @Test + public void supportsBatchUpdates() throws SQLException { + Assert.assertFalse(metaData.supportsBatchUpdates()); + } + + @Test + public void getUDTs() throws SQLException { + Assert.assertNotNull(metaData.getUDTs("", "", "", null)); + } + + @Test + public void getConnection() throws SQLException { + Assert.assertNotNull(metaData.getConnection()); + } + + @Test + public void supportsSavepoints() throws SQLException { + Assert.assertFalse(metaData.supportsSavepoints()); + } + + @Test + public void supportsNamedParameters() throws SQLException { + Assert.assertFalse(metaData.supportsNamedParameters()); + } + + @Test + public void supportsMultipleOpenResults() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleOpenResults()); + } + + @Test + public void supportsGetGeneratedKeys() throws SQLException { + Assert.assertFalse(metaData.supportsGetGeneratedKeys()); + } + + @Test + public void getSuperTypes() throws SQLException { + Assert.assertNotNull(metaData.getSuperTypes("", "", "")); + } + + @Test + public void getSuperTables() throws SQLException { + System.out.println("****************************************************"); + + ResultSet rs = metaData.getSuperTables("log", "", "dn1"); + while (rs.next()) { + System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME")); + System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME")); + } + Assert.assertNotNull(rs); + } + + @Test + public void getAttributes() throws SQLException { + Assert.assertNotNull(metaData.getAttributes("", "", "", "")); + } + + @Test + public void supportsResultSetHoldability() throws SQLException { + Assert.assertTrue(metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)); + Assert.assertFalse(metaData.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT)); + } + + @Test + public void getResultSetHoldability() throws SQLException { + Assert.assertEquals(1, metaData.getResultSetHoldability()); + } + + @Test + public void getDatabaseMajorVersion() throws SQLException { + Assert.assertEquals(2, metaData.getDatabaseMajorVersion()); + } + + @Test + public void getDatabaseMinorVersion() throws SQLException { + Assert.assertEquals(0, metaData.getDatabaseMinorVersion()); + } + + @Test + public void getJDBCMajorVersion() throws SQLException { + Assert.assertEquals(2, metaData.getJDBCMajorVersion()); + } + + @Test + public void getJDBCMinorVersion() throws SQLException { + Assert.assertEquals(0, metaData.getJDBCMinorVersion()); + } + + @Test + public void getSQLStateType() throws SQLException { + Assert.assertEquals(0, metaData.getSQLStateType()); + } + + @Test + public void locatorsUpdateCopy() throws SQLException { + Assert.assertFalse(metaData.locatorsUpdateCopy()); + } + + @Test + public void supportsStatementPooling() throws SQLException { + Assert.assertFalse(metaData.supportsStatementPooling()); + } + + @Test + public void getRowIdLifetime() throws SQLException { + Assert.assertNull(metaData.getRowIdLifetime()); + } + + @Test + public void supportsStoredFunctionsUsingCallSyntax() throws SQLException { + Assert.assertFalse(metaData.supportsStoredFunctionsUsingCallSyntax()); + } + + @Test + public void autoCommitFailureClosesAllResultSets() throws SQLException { + Assert.assertFalse(metaData.autoCommitFailureClosesAllResultSets()); + } + + @Test + public void getClientInfoProperties() throws SQLException { + Assert.assertNotNull(metaData.getClientInfoProperties()); + } + + @Test + public void getFunctions() throws SQLException { + Assert.assertNotNull(metaData.getFunctions("", "", "")); + } + + @Test + public void getFunctionColumns() throws SQLException { + Assert.assertNotNull(metaData.getFunctionColumns("", "", "", "")); + } + + @Test + public void getPseudoColumns() throws SQLException { + Assert.assertNotNull(metaData.getPseudoColumns("", "", "", "")); + } + + @Test + public void generatedKeyAlwaysReturned() throws SQLException { + Assert.assertFalse(metaData.generatedKeyAlwaysReturned()); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection(url, properties); + metaData = connection.getMetaData().unwrap(RestfulDatabaseMetaData.class); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index 185c0306f5259206c96072b7f306f18a7a8a8f7e..451f5d49160a69b4786e806c51480236083eacac 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -10,7 +10,7 @@ import java.util.Random; public class RestfulJDBCTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; +// private static final String host = "master"; private static Connection connection; private Random random = new Random(System.currentTimeMillis()); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a3867c1b6e30d810dcea3001ee92eae698256341 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java @@ -0,0 +1,246 @@ +package com.taosdata.jdbc.rs; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; + +public class RestfulPreparedStatementTest { + private static final String host = "127.0.0.1"; + // private static final String host = "master"; + private static Connection conn; + private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + private static PreparedStatement pstmt_insert; + private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; + private static PreparedStatement pstmt_select; + + @Test + public void executeQuery() throws SQLException { + long end = System.currentTimeMillis(); + long start = end - 1000 * 60 * 60; + pstmt_select.setTimestamp(1, new Timestamp(start)); + pstmt_select.setTimestamp(2, new Timestamp(end)); + pstmt_select.setInt(3, 0); + + ResultSet rs = pstmt_select.executeQuery(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + } + + @Test + public void executeUpdate() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setFloat(2, 3.14f); + int result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + } + + @Test + public void setNull() throws SQLException { + pstmt_insert.setNull(2, Types.FLOAT); + } + + @Test + public void setBoolean() throws SQLException { + pstmt_insert.setBoolean(2, true); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setByte() throws SQLException { + pstmt_insert.setByte(1, (byte) 0x001); + } + + @Test + public void setShort() { + + } + + @Test + public void setInt() { + + } + + @Test + public void setLong() { + + } + + @Test + public void setFloat() { + + } + + @Test + public void setDouble() { + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBigDecimal() throws SQLException { + pstmt_insert.setBigDecimal(1, null); + } + + @Test + public void setString() { + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBytes() throws SQLException { + pstmt_insert.setBytes(1, new byte[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setDate() throws SQLException { + pstmt_insert.setDate(1, new Date(System.currentTimeMillis())); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTime() throws SQLException { + pstmt_insert.setTime(1, new Time(System.currentTimeMillis())); + } + + @Test + public void setTimestamp() { + //TODO + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setAsciiStream() throws SQLException { + pstmt_insert.setAsciiStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBinaryStream() throws SQLException { + pstmt_insert.setBinaryStream(1, null); + } + + @Test + public void clearParameters() { + //TODO + } + + @Test + public void setObject() throws SQLException { + pstmt_insert.setObject(1, System.currentTimeMillis()); + //TODO + } + + @Test + public void execute() { + //TODO + } + + @Test + public void addBatch() { + //TODO: + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setCharacterStream() throws SQLException { + pstmt_insert.setCharacterStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setRef() throws SQLException { + pstmt_insert.setRef(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setBlob() throws SQLException { + pstmt_insert.setBlob(1, (Blob) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setClob() throws SQLException { + pstmt_insert.setClob(1, (Clob) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setArray() throws SQLException { + pstmt_insert.setArray(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getMetaData() throws SQLException { + pstmt_insert.getMetaData(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setURL() throws SQLException { + pstmt_insert.setURL(1, null); + } + + @Test + public void getParameterMetaData() throws SQLException { + ParameterMetaData parameterMetaData = pstmt_insert.getParameterMetaData(); + Assert.assertNull(parameterMetaData); + //TODO: + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setRowId() throws SQLException { + pstmt_insert.setRowId(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNString() throws SQLException { + pstmt_insert.setNString(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNCharacterStream() throws SQLException { + pstmt_insert.setNCharacterStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNClob() throws SQLException { + pstmt_insert.setNClob(1, (NClob) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSQLXML() throws SQLException { + pstmt_insert.setSQLXML(1, null); + } + + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_pstmt"); + stmt.execute("create database if not exists test_pstmt"); + stmt.execute("use test_pstmt"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); + stmt.execute("create table t1 using weather tags('beijing')"); + } + pstmt_insert = conn.prepareStatement(sql_insert); + pstmt_select = conn.prepareStatement(sql_select); + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (pstmt_insert != null) + pstmt_insert.close(); + if (pstmt_select != null) + pstmt_select.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index a14d09588d21f5d405cbb2456c9833d2a411ea96..b199eff1baab53d9c4a8c65f7e0bb58157657d33 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -259,10 +259,12 @@ public class RestfulResultSetTest { rs.previous(); } - @Test(expected = SQLException.class) + @Test public void setFetchDirection() throws SQLException { rs.setFetchDirection(ResultSet.FETCH_FORWARD); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } @Test @@ -270,14 +272,15 @@ public class RestfulResultSetTest { Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } - @Test(expected = SQLException.class) + @Test public void setFetchSize() throws SQLException { rs.setFetchSize(0); + Assert.assertEquals(0, rs.getFetchSize()); } @Test public void getFetchSize() throws SQLException { - Assert.assertEquals(1, rs.getFetchSize()); + Assert.assertEquals(0, rs.getFetchSize()); } @Test @@ -526,9 +529,12 @@ public class RestfulResultSetTest { rs.updateSQLXML(1, null); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getNString() throws SQLException { - rs.getNString("f1"); + String f10 = rs.getNString("f10"); + Assert.assertEquals("涛思数据", f10); + f10 = rs.getNString(10); + Assert.assertEquals("涛思数据", f10); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java new file mode 100644 index 0000000000000000000000000000000000000000..653e480413189667ec57d836f9b503f4c96a79cb --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java @@ -0,0 +1,418 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.UUID; + +public class RestfulStatementTest { + private static final String host = "127.0.0.1"; + // private static final String host = "master"; + private static Connection conn; + private static Statement stmt; + + @Test + public void executeQuery() { + try { + ResultSet rs = stmt.executeQuery("show databases"); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeUpdate() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + int affectRows = stmt.executeUpdate("create database " + dbName); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(1, affectRows); + affectRows = stmt.executeUpdate("drop database " + dbName); + Assert.assertEquals(0, affectRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void close() { + // test in AfterClass method + } + + + @Test + public void getMaxFieldSize() throws SQLException { + Assert.assertEquals(16 * 1024, stmt.getMaxFieldSize()); + } + + @Test(expected = SQLException.class) + public void setMaxFieldSize() throws SQLException { + + stmt.setMaxFieldSize(0); + stmt.setMaxFieldSize(-1); + } + + @Test + public void getMaxRows() throws SQLException { + Assert.assertEquals(0, stmt.getMaxRows()); + } + + @Test(expected = SQLException.class) + public void setMaxRows() throws SQLException { + stmt.setMaxRows(0); + stmt.setMaxRows(-1); + } + + @Test + public void setEscapeProcessing() throws SQLException { + stmt.setEscapeProcessing(true); + stmt.setEscapeProcessing(false); + } + + @Test + public void getQueryTimeout() throws SQLException { + Assert.assertEquals(0, stmt.getQueryTimeout()); + } + + @Test(expected = SQLException.class) + public void setQueryTimeout() throws SQLException { + stmt.setQueryTimeout(0); + stmt.setQueryTimeout(-1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(stmt.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setCursorName() throws SQLException { + stmt.setCursorName(""); + } + + @Test + public void execute() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database if not exists " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table if not exists " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getResultSet() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database if not exists " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table if not exists " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + ResultSet rs = stmt.getResultSet(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + Assert.assertEquals(3, meta.getColumnCount()); + int count = 0; + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + count++; + } + Assert.assertEquals(1, count); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getUpdateCount() { + // already test in execute method + } + + @Test + public void getMoreResults() throws SQLException { + Assert.assertEquals(false, stmt.getMoreResults()); + } + + @Test(expected = SQLException.class) + public void setFetchDirection() throws SQLException { + stmt.setFetchDirection(ResultSet.FETCH_FORWARD); + stmt.setFetchDirection(ResultSet.FETCH_REVERSE); + stmt.setFetchDirection(ResultSet.FETCH_UNKNOWN); + stmt.setFetchDirection(-1); + } + + @Test + public void getFetchDirection() throws SQLException { + Assert.assertEquals(ResultSet.FETCH_FORWARD, stmt.getFetchDirection()); + } + + @Test(expected = SQLException.class) + public void setFetchSize() throws SQLException { + stmt.setFetchSize(0); + stmt.setFetchSize(-1); + } + + @Test + public void getFetchSize() throws SQLException { + stmt.setFetchSize(0); + Assert.assertEquals(0, stmt.getFetchSize()); + stmt.setFetchSize(0); + } + + @Test + public void getResultSetConcurrency() throws SQLException { + Assert.assertEquals(ResultSet.CONCUR_READ_ONLY, stmt.getResultSetConcurrency()); + } + + @Test + public void getResultSetType() throws SQLException { + Assert.assertEquals(ResultSet.TYPE_FORWARD_ONLY, stmt.getResultSetType()); + } + + @Test + public void addBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void clearBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.clearBatch(); + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + stmt.clearBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + int[] results = stmt.executeBatch(); + Assert.assertEquals(0, results[0]); + Assert.assertEquals(0, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[3]); + Assert.assertEquals(0, results[4]); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getConnection() { + try { + Connection connection = stmt.getConnection(); + Assert.assertNotNull(connection); + Assert.assertTrue(this.conn == connection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testGetMoreResults() throws SQLException { + Assert.assertEquals(false, stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getGeneratedKeys() throws SQLException { + stmt.getGeneratedKeys(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate() throws SQLException { + stmt.executeUpdate("", 1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate1() throws SQLException { + stmt.executeUpdate("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecuteUpdate2() throws SQLException { + stmt.executeUpdate("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute() throws SQLException { + stmt.execute("", 1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute1() throws SQLException { + stmt.execute("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testExecute2() throws SQLException { + stmt.execute("", new String[]{}); + } + + @Test + public void getResultSetHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, stmt.getResultSetHoldability()); + } + + @Test + public void isClosed() { + try { + Assert.assertEquals(false, stmt.isClosed()); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void setPoolable() throws SQLException { + stmt.setPoolable(true); + stmt.setPoolable(false); + } + + @Test + public void isPoolable() throws SQLException { + Assert.assertEquals(false, stmt.isPoolable()); + } + + @Test + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Test + public void isCloseOnCompletion() throws SQLException { + Assert.assertFalse(stmt.isCloseOnCompletion()); + } + + @Test + public void unwrap() throws SQLException { + RestfulStatement unwrap = stmt.unwrap(RestfulStatement.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(stmt.isWrapperFor(RestfulStatement.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties); + stmt = conn.createStatement(); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (stmt != null) + stmt.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java index 313abfd27865ef37d8b522b38951b17902f5a486..fe4d04775da642bcf258e3680b0f5ba822e69684 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -12,7 +12,7 @@ import java.sql.*; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class SQLTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; +// private static final String host = "master"; private static Connection connection; @Test @@ -323,6 +323,18 @@ public class SQLTest { SQLExecutor.executeQuery(connection, sql); } + @Test + public void testCase052() { + String sql = "select server_status()"; + SQLExecutor.executeQuery(connection, sql); + } + + @Test + public void testCase053() { + String sql = "select avg(cpu_taosd), avg(cpu_system), max(cpu_cores), avg(mem_taosd), avg(mem_system), max(mem_total), avg(disk_used), max(disk_total), avg(band_speed), avg(io_read), avg(io_write), sum(req_http), sum(req_select), sum(req_insert) from log.dn1 where ts> now - 60m and ts<= now interval(1m) fill(value, 0)"; + SQLExecutor.executeQuery(connection, sql); + } + @BeforeClass public static void before() throws ClassNotFoundException, SQLException { Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 2e1e39ef123c901b36050a9e239fe58d26869e8e..0d8c07041aa741793b7a1b8db20c3a3b470cf193 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt index 67357cb4698b2885b563fd54133f36aace38c54b..2699e1bc90e162c80d27d690e1f7163747616526 100644 --- a/src/connector/odbc/src/CMakeLists.txt +++ b/src/connector/odbc/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py index dba234d7a4360f2cd5167261f513ddcc1c31094d..4a829f36c4bf0d6e680ed923573509cc1fad39db 100644 --- a/src/connector/python/linux/python2/setup.py +++ b/src/connector/python/linux/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.5", + version="2.0.6", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/linux/python2/taos/__init__.py index 62e0536b6fca63a0aca0b6be3673104c27c631c6..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/linux/python2/taos/__init__.py +++ b/src/connector/python/linux/python2/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 2b1b29ee31af56499d626fc56d4950e2e25f9978..555cc3435bcbea302b34cbde09772ac5f6fe32b2 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.CDLL('libtaos.so') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,12 +443,13 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: # CTaosInterface.libtaos.close(connection) - + @staticmethod def affectedRows(result): """The affected rows after runing query @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,38 +511,53 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) + @staticmethod def freeResult(result): CTaosInterface.libtaos.taos_free_result(result) diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/linux/python2/taos/connection.py index 552250f1164ced467cd29b5084524985aca8848b..f6c395342c9c39a24bda6022f0ed36cb7bfe045b 100644 --- a/src/connector/python/linux/python2/taos/connection.py +++ b/src/connector/python/linux/python2/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -29,7 +31,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -43,7 +45,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -55,7 +62,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -80,7 +88,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/linux/python2/taos/constants.py index feb7050a40b67f88a6d7ca859764fbbc6b36af1c..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 100644 --- a/src/connector/python/linux/python2/taos/constants.py +++ b/src/connector/python/linux/python2/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index bc3b6c65d80cb2f025213f57a6eb728a182ff4a0..8f9aab82da64d24645311d1263f9abb006c737eb 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -128,8 +128,8 @@ class TDengineCursor(object): if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: self._affected_rows += CTaosInterface.affectedRows( - self._result ) - return CTaosInterface.affectedRows(self._result ) + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult( self._result) @@ -148,6 +148,7 @@ class TDengineCursor(object): """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. """ pass + def fetchmany(self): pass @@ -158,11 +159,26 @@ class TDengineCursor(object): if (dataType.upper() == "TINYINT"): if (self._description[col][1] == FieldType.C_TINYINT): return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True if (dataType.upper() == "INT"): if (self._description[col][1] == FieldType.C_INT): return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_INT): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): return True if (dataType.upper() == "FLOAT"): if (self._description[col][1] == FieldType.C_FLOAT): @@ -191,16 +207,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def fetchall(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -208,16 +228,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def nextset(self): """ """ diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/linux/python2/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/linux/python2/taos/dbapi.py +++ b/src/connector/python/linux/python2/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/linux/python2/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/linux/python2/taos/error.py +++ b/src/connector/python/linux/python2/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/linux/python2/taos/subscription.py index 2d01395532820c3bd0e068ef7eb3d425eaaa6d78..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/linux/python2/taos/subscription.py +++ b/src/connector/python/linux/python2/taos/subscription.py @@ -1,52 +1,57 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: return False - + CTaosInterface.unsubscribe(self._sub, keepProgress) return True if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py index e238372cd3e4cffa5d5d2d33660364b9addd2ee5..a865f5df856d1b416d7f78da8b1f857a967f5e61 100644 --- a/src/connector/python/linux/python3/setup.py +++ b/src/connector/python/linux/python3/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.4", + version="2.0.5", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py index 8cf095ea68dda55a071403734d39e9198a71d8a1..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/linux/python3/taos/__init__.py +++ b/src/connector/python/linux/python3/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index fdebe349fe9597260f231adb52022a2ecc9e1063..555cc3435bcbea302b34cbde09772ac5f6fe32b2 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.CDLL('libtaos.so') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,7 +443,8 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py index 552250f1164ced467cd29b5084524985aca8848b..f6c395342c9c39a24bda6022f0ed36cb7bfe045b 100644 --- a/src/connector/python/linux/python3/taos/connection.py +++ b/src/connector/python/linux/python3/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -29,7 +31,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -43,7 +45,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -55,7 +62,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -80,7 +88,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py index feb7050a40b67f88a6d7ca859764fbbc6b36af1c..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 100644 --- a/src/connector/python/linux/python3/taos/constants.py +++ b/src/connector/python/linux/python3/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index f972d2ff07f8e7c964839102a4af8b41f98d4622..2e7c362d547973bc3e78f2eb57a33b7fa2d0635e 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -5,6 +5,7 @@ import threading # querySeqNum = 0 + class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -107,8 +108,8 @@ class TDengineCursor(object): # if threading.get_ident() != self._threadId: # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) # raise OperationalError(info) - # print(info) - # return None + # print(info) + # return None if not operation: return None @@ -137,8 +138,8 @@ class TDengineCursor(object): if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: self._affected_rows += CTaosInterface.affectedRows( - self._result ) - return CTaosInterface.affectedRows(self._result ) + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult( self._result) @@ -168,11 +169,26 @@ class TDengineCursor(object): if (dataType.upper() == "TINYINT"): if (self._description[col][1] == FieldType.C_TINYINT): return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True if (dataType.upper() == "INT"): if (self._description[col][1] == FieldType.C_INT): return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_INT): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): return True if (dataType.upper() == "FLOAT"): if (self._description[col][1] == FieldType.C_FLOAT): @@ -201,10 +217,13 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields @@ -219,15 +238,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def nextset(self): """ """ @@ -259,8 +283,8 @@ class TDengineCursor(object): # if threading.get_ident() != self._threadId: # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) # raise OperationalError(info) - # print(info) - # return None + # print(info) + # return None self._description = [] for ele in self._fields: @@ -268,4 +292,3 @@ class TDengineCursor(object): (ele['name'], ele['type'], None, None, None, None, False)) return self._result - diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/linux/python3/taos/dbapi.py +++ b/src/connector/python/linux/python3/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/linux/python3/taos/error.py +++ b/src/connector/python/linux/python3/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/linux/python3/taos/subscription.py +++ b/src/connector/python/linux/python3/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..79a9d730868bfe5d3fa01d679a4abfe9ee7811f0 --- /dev/null +++ b/src/connector/python/osx/python3/LICENSE @@ -0,0 +1,12 @@ + Copyright (c) 2019 TAOS Data, Inc. + +This program is free software: you can use, redistribute, and/or modify +it under the terms of the GNU Affero General Public License, version 3 +or later ("AGPL"), as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70db6bba13a8b52b9f707400b80d1302542dbc34 --- /dev/null +++ b/src/connector/python/osx/python3/README.md @@ -0,0 +1 @@ +# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b97f753c7ee3bc303be0db2217e87e889ef4df --- /dev/null +++ b/src/connector/python/osx/python3/setup.py @@ -0,0 +1,20 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="2.0.5", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: MacOS X", + ], +) diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..973263573808232e4e71dc0158585624a8e7d2ab --- /dev/null +++ b/src/connector/python/osx/python3/taos/__init__.py @@ -0,0 +1,24 @@ + +from .connection import TDengineConnection +from .cursor import TDengineCursor + +# Globals +threadsafety = 0 +paramstyle = 'pyformat' + +__all__ = ['connection', 'cursor'] + + +def connect(*args, **kwargs): + """ Function to return a TDengine connector object + + Current supporting keyword parameters: + @dsn: Data source name as string + @user: Username as string(optional) + @password: Password as string(optional) + @host: Hostname(optional) + @database: Database name(optional) + + @rtype: TDengineConnector + """ + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py new file mode 100644 index 0000000000000000000000000000000000000000..6f56cf0c5e09c14fdc9d1296c80e434ab672ef44 --- /dev/null +++ b/src/connector/python/osx/python3/taos/cinterface.py @@ -0,0 +1,642 @@ +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli / 1000.0) + + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro / 1000000.0) + + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + else: + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + else: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + else: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + if num_of_rows > 0: + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + else: + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res = [] + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + tmpstr = ctypes.c_char_p(data) + res.append(tmpstr.value.decode()) + else: + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) + except ValueError: + res.append(None) + + return res + + +def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + res = [] + if num_of_rows > 0: + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) + return res + + +def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res = [] + if num_of_rows >= 0: + for i in range(abs(num_of_rows)): + try: + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) + except ValueError: + res.append(None) + return res + + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python +} + +_CONVERT_FUNC_BLOCK = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python_block, + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python +} + +# Corresponding TAOS_FIELD structure in C + + +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 65), + ('type', ctypes.c_char), + ('bytes', ctypes.c_short)] + +# C interface class + + +class CTaosInterface(object): + + libtaos = ctypes.CDLL('libtaos.dylib') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + #libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + libtaos.taos_fetch_lengths.restype = ctypes.c_void_p + libtaos.taos_free_result.restype = None + libtaos.taos_errno.restype = ctypes.c_int + libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config is not None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host is not None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value is None: + print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") + # sys.exit(1) + # else: + # print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + #print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(result): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(result) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(result): + '''Use result after calling self.query + ''' + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + if num_of_rows == 0: + return None, 0 + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: + raise DatabaseError("Invalid data type returned from database") + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def fetchRow(result, fields): + pblock = ctypes.c_void_p(0) + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError( + "Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) + else: + return None, 0 + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(result): + return CTaosInterface.libtaos.taos_field_count(result) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(result): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(result) + + @staticmethod + def errStr(result): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + result = cinter.query(conn, 'show databases') + + print('Query Affected rows: {}'.format(cinter.affectedRows(result))) + + fields = CTaosInterface.useResult(result) + + data, num_of_rows = CTaosInterface.fetchBlock(result, fields) + + print(data) + + cinter.freeResult(result) + cinter.close(conn) diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c395342c9c39a24bda6022f0ed36cb7bfe045b --- /dev/null +++ b/src/connector/python/osx/python3/taos/connection.py @@ -0,0 +1,95 @@ +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + + +class TDengineConnection(object): + """ TDengine connection object + """ + + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + pass + + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() + print("Hello world") diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 --- /dev/null +++ b/src/connector/python/osx/python3/taos/constants.py @@ -0,0 +1,42 @@ +"""Constants in TDengine python +""" + +from .dbapi import * + + +class FieldType(object): + """TDengine Field Types + """ + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 + C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 + C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 + C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int('0xff', 16)]) + # Timestamp precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..2e7c362d547973bc3e78f2eb57a33b7fa2d0635e --- /dev/null +++ b/src/connector/python/osx/python3/taos/cursor.py @@ -0,0 +1,294 @@ +from .cinterface import CTaosInterface +from .error import * +from .constants import FieldType +import threading + +# querySeqNum = 0 + + +class TDengineCursor(object): + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mondatory) + > type_code (mondatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + + def __init__(self, connection=None): + self._description = [] + self._rowcount = -1 + self._connection = None + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + self._logfile = "" + self._threadId = threading.get_ident() + + if connection is not None: + self._connection = connection + + def __iter__(self): + return self + + def __next__(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetch iterator") + + if self._block_rows <= self._block_iter: + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) + if self._block_rows == 0: + raise StopIteration + self._block = list(map(tuple, zip(*block))) + self._block_iter = 0 + + data = self._block[self._block_iter] + self._block_iter += 1 + + return data + + @property + def description(self): + """Return the description of the object. + """ + return self._description + + @property + def rowcount(self): + """Return the rowcount of the object + """ + return self._rowcount + + @property + def affected_rows(self): + """Return the rowcount of insertion + """ + return self._affected_rows + + def callproc(self, procname, *args): + """Call a stored database procedure with the given name. + + Void functionality since no stored procedures. + """ + pass + + def log(self, logfile): + self._logfile = logfile + + def close(self): + """Close the cursor. + """ + if self._connection is None: + return False + + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None): + """Prepare and execute a database operation (query or command). + """ + # if threading.get_ident() != self._threadId: + # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + if not operation: + return None + + if not self._connection: + # TODO : change the exception raised here + raise ProgrammingError("Cursor is not connected") + + self._reset_result() + + stmt = operation + if params is not None: + pass + + # global querySeqNum + # querySeqNum += 1 + # localSeqNum = querySeqNum # avoid raice condition + # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) + self._result = CTaosInterface.query(self._connection._conn, stmt) + # print(" << Query ({}) Exec Done".format(localSeqNum)) + if (self._logfile): + with open(self._logfile, "a") as logfile: + logfile.write("%s;\n" % operation) + + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno == 0: + if CTaosInterface.fieldsCount(self._result) == 0: + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) + else: + self._fields = CTaosInterface.useResult( + self._result) + return self._handle_result() + else: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + """ + pass + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. + """ + pass + + def fetchmany(self): + pass + + def istype(self, col, dataType): + if (dataType.upper() == "BOOL"): + if (self._description[col][1] == FieldType.C_BOOL): + return True + if (dataType.upper() == "TINYINT"): + if (self._description[col][1] == FieldType.C_TINYINT): + return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True + if (dataType.upper() == "INT"): + if (self._description[col][1] == FieldType.C_INT): + return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True + if (dataType.upper() == "BIGINT"): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): + return True + if (dataType.upper() == "FLOAT"): + if (self._description[col][1] == FieldType.C_FLOAT): + return True + if (dataType.upper() == "DOUBLE"): + if (self._description[col][1] == FieldType.C_DOUBLE): + return True + if (dataType.upper() == "BINARY"): + if (self._description[col][1] == FieldType.C_BINARY): + return True + if (dataType.upper() == "TIMESTAMP"): + if (self._description[col][1] == FieldType.C_TIMESTAMP): + return True + if (dataType.upper() == "NCHAR"): + if (self._description[col][1] == FieldType.C_NCHAR): + return True + + return False + + def fetchall_row(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. + """ + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + + def fetchall(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + + def nextset(self): + """ + """ + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version. + """ + self._description = [] + self._rowcount = -1 + if self._result is not None: + CTaosInterface.freeResult(self._result) + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + + def _handle_result(self): + """Handle the return result from query. + """ + # if threading.get_ident() != self._threadId: + # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + self._description = [] + for ele in self._fields: + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + + return self._result diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py new file mode 100644 index 0000000000000000000000000000000000000000..594681ada953abf388e503c23199043cf686e1a3 --- /dev/null +++ b/src/connector/python/osx/python3/taos/dbapi.py @@ -0,0 +1,44 @@ +"""Type Objects and Constructors. +""" + +import time +import datetime + + +class DBAPITypeObject(object): + def __init__(self, *values): + self.values = values + + def __com__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + + +def DataFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + + +Binary = bytes + +# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) +# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) +# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) +# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py new file mode 100644 index 0000000000000000000000000000000000000000..c584badce8320cd35dc81e8f6b613c56163b1a29 --- /dev/null +++ b/src/connector/python/osx/python3/taos/error.py @@ -0,0 +1,66 @@ +"""Python exceptions +""" + + +class Error(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self._full_msg = self.msg + self.errno = errno + + def __str__(self): + return self._full_msg + + +class Warning(Exception): + """Exception raised for important warnings like data truncations while inserting. + """ + pass + + +class InterfaceError(Error): + """Exception raised for errors that are related to the database interface rather than the database itself. + """ + pass + + +class DatabaseError(Error): + """Exception raised for errors that are related to the database. + """ + pass + + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + """ + pass + + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + """ + pass + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database is affected. + """ + pass + + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal error. + """ + pass + + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors. + """ + pass + + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used which is not supported by the database,. + """ + pass diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..270d9de09217fc58a389981a3542698dd1c0428a --- /dev/null +++ b/src/connector/python/osx/python3/taos/subscription.py @@ -0,0 +1,57 @@ +from .cinterface import CTaosInterface +from .error import * + + +class TDengineSubscription(object): + """TDengine subscription object + """ + + def __init__(self, sub): + self._sub = sub + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: + break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + def close(self, keepProgress=True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0, 10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py index 62e0536b6fca63a0aca0b6be3673104c27c631c6..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/windows/python2/taos/__init__.py +++ b/src/connector/python/windows/python2/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 14f4f49be8ae719da16dbed7e79631db7ef9689e..d8cdce2ad138c34db5193e3972ba51d46c693254 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.windll.LoadLibrary('taos') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,12 +443,13 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: # CTaosInterface.libtaos.close(connection) - + @staticmethod def affectedRows(result): """The affected rows after runing query @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py index d9576a553b810a975429b2cefc03e5e60f240a88..5729d01c6df8c0e58086726c4001467811e9fee5 100644 --- a/src/connector/python/windows/python2/taos/connection.py +++ b/src/connector/python/windows/python2/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -30,7 +32,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -44,7 +46,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -56,7 +63,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -81,7 +89,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py index a994bceaf61894ac0bf9a719a574d00a09c584a5..8a8011c3e36c52993e9d03228c2a50e2af6a7c9e 100644 --- a/src/connector/python/windows/python2/taos/constants.py +++ b/src/connector/python/windows/python2/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py index 958466985ef050df64ceecdabd994e112716ccf0..0656b6326e173b111eb8293c6e3b76678eccc0e2 100644 --- a/src/connector/python/windows/python2/taos/cursor.py +++ b/src/connector/python/windows/python2/taos/cursor.py @@ -5,6 +5,7 @@ import threading # querySeqNum = 0 + class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -23,7 +24,7 @@ class TDengineCursor(object): if the cursor has not had an operation invoked via the .execute*() method yet. .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected + .execute*() produced (for DQL statements like SELECT) or affected """ def __init__(self, connection=None): @@ -50,13 +51,14 @@ class TDengineCursor(object): raise OperationalError("Invalid use of fetch iterator") if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow(self._result, self._fields) + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) if self._block_rows == 0: raise StopIteration self._block = list(map(tuple, zip(*block))) self._block_iter = 0 - data = self._block[self._block_iter] + data = self._block[self._block_iter] self._block_iter += 1 return data @@ -91,7 +93,7 @@ class TDengineCursor(object): """ if self._connection is None: return False - + self._reset_result() self._connection = None @@ -106,19 +108,20 @@ class TDengineCursor(object): if not self._connection: # TODO : change the exception raised here raise ProgrammingError("Cursor is not connected") - + self._reset_result() stmt = operation if params is not None: pass - + self._result = CTaosInterface.query(self._connection._conn, stmt) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows(self._result) - return CTaosInterface.affectedRows(self._result ) + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult(self._result) return self._handle_result() @@ -147,17 +150,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) - + def fetchall(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -165,20 +171,21 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): """ """ @@ -209,6 +216,7 @@ class TDengineCursor(object): """ self._description = [] for ele in self._fields: - self._description.append((ele['name'], ele['type'], None, None, None, None, False)) - + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + return self._result diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/windows/python2/taos/dbapi.py +++ b/src/connector/python/windows/python2/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/windows/python2/taos/error.py +++ b/src/connector/python/windows/python2/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/windows/python2/taos/subscription.py +++ b/src/connector/python/windows/python2/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py index c6dd929a6a5110300ecd2e1042d48a5ad6a4bf46..b57e25fd2c320956e46b190d9f0a1139db1cced0 100644 --- a/src/connector/python/windows/python3/taos/__init__.py +++ b/src/connector/python/windows/python3/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index 42b820ca80feda4a269e5e80f288bd1f5e87adcd..d8cdce2ad138c34db5193e3972ba51d46c693254 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,170 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res - + + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.windll.LoadLibrary('taos') @@ -218,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -229,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -238,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -257,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -270,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -295,7 +443,8 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: @@ -310,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -362,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) @@ -476,4 +639,4 @@ if __name__ == '__main__': print(data) cinter.freeResult(result) - cinter.close(conn) \ No newline at end of file + cinter.close(conn) diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py index d9576a553b810a975429b2cefc03e5e60f240a88..5729d01c6df8c0e58086726c4001467811e9fee5 100644 --- a/src/connector/python/windows/python3/taos/connection.py +++ b/src/connector/python/windows/python3/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -30,7 +32,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -44,7 +46,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -56,7 +63,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -81,7 +89,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py index def2bbc0a8c063e85214a634b60c5db7a5fd1259..49fc17b2fb98a6684e74e4a044651fdc6237518e 100644 --- a/src/connector/python/windows/python3/taos/constants.py +++ b/src/connector/python/windows/python3/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py index bbac1b1dd5158a5f074325a72f15f6993e97e9da..769cb7cf0f61fe850c16315bf552162f33536502 100644 --- a/src/connector/python/windows/python3/taos/cursor.py +++ b/src/connector/python/windows/python3/taos/cursor.py @@ -24,7 +24,7 @@ class TDengineCursor(object): if the cursor has not had an operation invoked via the .execute*() method yet. .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected + .execute*() produced (for DQL statements like SELECT) or affected """ def __init__(self, connection=None): @@ -51,13 +51,14 @@ class TDengineCursor(object): raise OperationalError("Invalid use of fetch iterator") if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow(self._result, self._fields) + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) if self._block_rows == 0: raise StopIteration self._block = list(map(tuple, zip(*block))) self._block_iter = 0 - data = self._block[self._block_iter] + data = self._block[self._block_iter] self._block_iter += 1 return data @@ -92,7 +93,7 @@ class TDengineCursor(object): """ if self._connection is None: return False - + self._reset_result() self._connection = None @@ -107,24 +108,25 @@ class TDengineCursor(object): if not self._connection: # TODO : change the exception raised here raise ProgrammingError("Cursor is not connected") - + self._reset_result() stmt = operation if params is not None: pass - + self._result = CTaosInterface.query(self._connection._conn, stmt) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows(self._result ) - return CTaosInterface.affectedRows(self._result ) + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) else: - self._fields = CTaosInterface.useResult(self._result ) + self._fields = CTaosInterface.useResult(self._result) return self._handle_result() else: - raise ProgrammingError(CTaosInterface.errStr(self._result ), errno) + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) def executemany(self, operation, seq_of_parameters): """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. @@ -148,10 +150,13 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields @@ -166,20 +171,21 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): """ """ @@ -204,12 +210,13 @@ class TDengineCursor(object): self._block_rows = -1 self._block_iter = 0 self._affected_rows = 0 - + def _handle_result(self): """Handle the return result from query. """ self._description = [] for ele in self._fields: - self._description.append((ele['name'], ele['type'], None, None, None, None, False)) - + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + return self._result diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py index 9b1cb1321c14619782c801e9381010f1f67fbc2e..a29621f7a3594a618b59b30bdc96197c4222a619 100644 --- a/src/connector/python/windows/python3/taos/dbapi.py +++ b/src/connector/python/windows/python3/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py index ccc0e61d84597a7cae9b360b7fa2434b9bc47401..238b293a0b609570e7b5d536648c6ada3ca2f209 100644 --- a/src/connector/python/windows/python3/taos/error.py +++ b/src/connector/python/windows/python3/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/windows/python3/taos/subscription.py +++ b/src/connector/python/windows/python3/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt index 73d5eebd6d3d20d8ed4e0c150d8873c27c217d51..e9ed2996c74e2c59d56245e6fc1e932ebb07dfb0 100644 --- a/src/cq/CMakeLists.txt +++ b/src/cq/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/cq/test/CMakeLists.txt b/src/cq/test/CMakeLists.txt index fc3a1ea93a72a9ab7791e596688a7a4a1dd8b77f..cd124567afd8766173cf07e7a6191ab473be1714 100644 --- a/src/cq/test/CMakeLists.txt +++ b/src/cq/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) LIST(APPEND CQTEST_SRC ./cqtest.c) diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index b010c0c36353636ffb9081583a3ed808f0d21719..644a4e875d62622c07034639a4e08e584e99fdfb 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index 94d23609505bf952225b8dd2c752754dbd5d55b9..87baff30673afc68eb23a00bef279433a422ba67 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -29,7 +29,7 @@ typedef struct { static SCheckItem tsCheckItem[TSDB_CHECK_ITEM_MAX] = {{0}}; int64_t tsMinFreeMemSizeForStart = 0; -static int32_t bindTcpPort(int16_t port) { +static int32_t bindTcpPort(uint16_t port) { SOCKET serverSocket; struct sockaddr_in server_addr; @@ -85,9 +85,9 @@ static int32_t bindUdpPort(int16_t port) { static int32_t dnodeCheckNetwork() { int32_t ret; - int16_t startPort = tsServerPort; + uint16_t startPort = tsServerPort; - for (int16_t port = startPort; port < startPort + 12; port++) { + for (uint16_t port = startPort; port < startPort + 12; port++) { ret = bindTcpPort(port); if (0 != ret) { dError("failed to tcp bind port %d, quit", port); diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index 218d48747333cc9490ac7517d193a1ec2f18e252..79744e153e46777c098644ac9902fb76be81fb11 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -145,6 +145,14 @@ void dnodeSendRpcMWriteRsp(void *pMsg, int32_t code) { return; } + dTrace("msg:%p, app:%p type:%s master:%p will be responsed", pWrite, pWrite->rpcMsg.ahandle, + taosMsg[pWrite->rpcMsg.msgType], pWrite->pBatchMasterMsg); + if (pWrite->pBatchMasterMsg && pWrite != pWrite->pBatchMasterMsg) { + dError("msg:%p, app:%p type:%s master:%p sub message should not response!", pWrite, pWrite->rpcMsg.ahandle, + taosMsg[pWrite->rpcMsg.msgType], pWrite->pBatchMasterMsg); + return; + } + SRpcMsg rpcRsp = { .handle = pWrite->rpcMsg.handle, .pCont = pWrite->rpcRsp.rsp, diff --git a/src/inc/taos.h b/src/inc/taos.h index 05d390ffd0cdf4bf0acab82714c867777d9593d4..cd8e116053bd9adabda9a1eeeb20c6d92679d99d 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -68,7 +68,7 @@ typedef struct taosField { #define DLL_EXPORT #endif -DLL_EXPORT void taos_init(); +DLL_EXPORT int taos_init(); DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 9f3c31f22595d6dc6007dc9517390b970b781943..f4712198ee9c2aee97eda1a03085032c81c53b84 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -286,7 +286,7 @@ do { \ #define TSDB_MAX_COMP_LEVEL 2 #define TSDB_DEFAULT_COMP_LEVEL 2 -#define TSDB_MIN_WAL_LEVEL 1 +#define TSDB_MIN_WAL_LEVEL 0 #define TSDB_MAX_WAL_LEVEL 2 #define TSDB_DEFAULT_WAL_LEVEL 1 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 429304c7449e1a5541e0a231512c292172af95f4..3eb197868b26115ff25206cc8db2bdd809c634cc 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -23,11 +23,7 @@ extern "C" { #include #include -#ifdef TAOS_ERROR_C -#define TAOS_DEFINE_ERROR(name, mod, code, msg) {.val = (int32_t)((0x80000000 | ((mod)<<16) | (code))), .str=(msg)}, -#else -#define TAOS_DEFINE_ERROR(name, mod, code, msg) static const int32_t name = (int32_t)((0x80000000 | ((mod)<<16) | (code))); -#endif +#define TAOS_DEF_ERROR_CODE(mod, code) ((int32_t)((0x80000000 | ((mod)<<16) | (code)))) #define TAOS_SYSTEM_ERROR(code) (0x80ff0000 | (code)) #define TAOS_SUCCEEDED(err) ((err) >= 0) @@ -40,399 +36,388 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SUCCESS 0 -#ifdef TAOS_ERROR_C -static STaosError errors[] = { - {.val = 0, .str = "success"}, -#endif - // rpc -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_ACTION_IN_PROGRESS, 0, 0x0001, "Action in progress") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_REQUIRED, 0, 0x0002, "Authentication required") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, 0, 0x0003, "Authentication failure") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_REDIRECT, 0, 0x0004, "Redirect") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NOT_READY, 0, 0x0005, "System not ready") // peer is not ready to process data -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_ALREADY_PROCESSED, 0, 0x0006, "Message already processed") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED,0, 0x0007, "Last session not finished") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MISMATCHED_LINK_ID, 0, 0x0008, "Mismatched meter id") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TOO_SLOW, 0, 0x0009, "Processing of request timed out") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, 0, 0x000A, "Number of sessions reached limit") // too many sessions -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, 0, 0x000B, "Unable to establish connection") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_APP_ERROR, 0, 0x000C, "Unexpected generic error in RPC") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_UNEXPECTED_RESPONSE, 0, 0x000D, "Unexpected response") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VALUE, 0, 0x000E, "Invalid value") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TRAN_ID, 0, 0x000F, "Invalid transaction id") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_SESSION_ID, 0, 0x0010, "Invalid session id") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_MSG_TYPE, 0, 0x0011, "Invalid message type") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_RESPONSE_TYPE, 0, 0x0012, "Invalid response type") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, 0, 0x0013, "Client and server's time is not synchronized") -TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, 0, 0x0014, "Database not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, 0, 0x0015, "Unable to resolve FQDN") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VERSION, 0, 0x0016, "Invalid app version") +#define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") +#define TSDB_CODE_RPC_AUTH_REQUIRED TAOS_DEF_ERROR_CODE(0, 0x0002) //"Authentication required") +#define TSDB_CODE_RPC_AUTH_FAILURE TAOS_DEF_ERROR_CODE(0, 0x0003) //"Authentication failure") +#define TSDB_CODE_RPC_REDIRECT TAOS_DEF_ERROR_CODE(0, 0x0004) //"Redirect") +#define TSDB_CODE_RPC_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0005) //"System not ready") // peer is not ready to process data +#define TSDB_CODE_RPC_ALREADY_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0006) //"Message already processed") +#define TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x0007) //"Last session not finished") +#define TSDB_CODE_RPC_MISMATCHED_LINK_ID TAOS_DEF_ERROR_CODE(0, 0x0008) //"Mismatched meter id") +#define TSDB_CODE_RPC_TOO_SLOW TAOS_DEF_ERROR_CODE(0, 0x0009) //"Processing of request timed out") +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x000A) //"Number of sessions reached limit") // too many sessions +#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x000B) //"Unable to establish connection") +#define TSDB_CODE_RPC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x000C) //"Unexpected generic error in RPC") +#define TSDB_CODE_RPC_UNEXPECTED_RESPONSE TAOS_DEF_ERROR_CODE(0, 0x000D) //"Unexpected response") +#define TSDB_CODE_RPC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x000E) //"Invalid value") +#define TSDB_CODE_RPC_INVALID_TRAN_ID TAOS_DEF_ERROR_CODE(0, 0x000F) //"Invalid transaction id") +#define TSDB_CODE_RPC_INVALID_SESSION_ID TAOS_DEF_ERROR_CODE(0, 0x0010) //"Invalid session id") +#define TSDB_CODE_RPC_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0011) //"Invalid message type") +#define TSDB_CODE_RPC_INVALID_RESPONSE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0012) //"Invalid response type") +#define TSDB_CODE_RPC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0013) //"Client and server's time is not synchronized") +#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready") +#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN") +#define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version") //common & util -TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, 0, 0x0100, "Operation not supported") -TAOS_DEFINE_ERROR(TSDB_CODE_COM_MEMORY_CORRUPTED, 0, 0x0101, "Memory corrupted") -TAOS_DEFINE_ERROR(TSDB_CODE_COM_OUT_OF_MEMORY, 0, 0x0102, "Out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_COM_INVALID_CFG_MSG, 0, 0x0103, "Invalid config message") -TAOS_DEFINE_ERROR(TSDB_CODE_COM_FILE_CORRUPTED, 0, 0x0104, "Data file corrupted") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, 0, 0x0105, "Ref out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, 0, 0x0106, "too many Ref Objs") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_ID_REMOVED, 0, 0x0107, "Ref ID is removed") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_INVALID_ID, 0, 0x0108, "Invalid Ref ID") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_ALREADY_EXIST, 0, 0x0109, "Ref is already there") -TAOS_DEFINE_ERROR(TSDB_CODE_REF_NOT_EXIST, 0, 0x010A, "Ref is not there") +#define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported") +#define TSDB_CODE_COM_MEMORY_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0101) //"Memory corrupted") +#define TSDB_CODE_COM_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0102) //"Out of memory") +#define TSDB_CODE_COM_INVALID_CFG_MSG TAOS_DEF_ERROR_CODE(0, 0x0103) //"Invalid config message") +#define TSDB_CODE_COM_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0104) //"Data file corrupted") +#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0105) //"Ref out of memory") +#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0106) //"too many Ref Objs") +#define TSDB_CODE_REF_ID_REMOVED TAOS_DEF_ERROR_CODE(0, 0x0107) //"Ref ID is removed") +#define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) //"Invalid Ref ID") +#define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) //"Ref is already there") +#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there") //client -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_SQL, 0, 0x0200, "Invalid SQL statement") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_QHANDLE, 0, 0x0201, "Invalid qhandle") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TIME_STAMP, 0, 0x0202, "Invalid combination of client/service time") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_VALUE, 0, 0x0203, "Invalid value in client") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_VERSION, 0, 0x0204, "Invalid client version") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_IE, 0, 0x0205, "Invalid client ie") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_FQDN, 0, 0x0206, "Invalid host name") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_USER_LENGTH, 0, 0x0207, "Invalid user name") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PASS_LENGTH, 0, 0x0208, "Invalid password") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_DB_LENGTH, 0, 0x0209, "Database name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH, 0, 0x020A, "Table name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_CONNECTION, 0, 0x020B, "Invalid connection") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_OUT_OF_MEMORY, 0, 0x020C, "System out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_DISKSPACE, 0, 0x020D, "System out of disk space") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_CACHE_ERASED, 0, 0x020E, "Query cache erased") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_CANCELLED, 0, 0x020F, "Query terminated") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SORTED_RES_TOO_MANY, 0, 0x0210, "Result set too large to be sorted") // too many result for ordered super table projection query -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_APP_ERROR, 0, 0x0211, "Application error") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, 0, 0x0212, "Action in progress") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, 0, 0x0213, "Disconnected from service") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, 0, 0x0214, "No write permission") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax error in SQL") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, 0, 0x0217, "Database not specified or available") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, 0, 0x0218, "Table does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, 0, 0x0219, "SQL statement too long, check maxSQLLength config") +#define TSDB_CODE_TSC_INVALID_SQL TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid SQL statement") +#define TSDB_CODE_TSC_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0201) //"Invalid qhandle") +#define TSDB_CODE_TSC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0202) //"Invalid combination of client/service time") +#define TSDB_CODE_TSC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x0203) //"Invalid value in client") +#define TSDB_CODE_TSC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0204) //"Invalid client version") +#define TSDB_CODE_TSC_INVALID_IE TAOS_DEF_ERROR_CODE(0, 0x0205) //"Invalid client ie") +#define TSDB_CODE_TSC_INVALID_FQDN TAOS_DEF_ERROR_CODE(0, 0x0206) //"Invalid host name") +#define TSDB_CODE_TSC_INVALID_USER_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0207) //"Invalid user name") +#define TSDB_CODE_TSC_INVALID_PASS_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0208) //"Invalid password") +#define TSDB_CODE_TSC_INVALID_DB_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0209) //"Database name too long") +#define TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH TAOS_DEF_ERROR_CODE(0, 0x020A) //"Table name too long") +#define TSDB_CODE_TSC_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x020B) //"Invalid connection") +#define TSDB_CODE_TSC_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x020C) //"System out of memory") +#define TSDB_CODE_TSC_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x020D) //"System out of disk space") +#define TSDB_CODE_TSC_QUERY_CACHE_ERASED TAOS_DEF_ERROR_CODE(0, 0x020E) //"Query cache erased") +#define TSDB_CODE_TSC_QUERY_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x020F) //"Query terminated") +#define TSDB_CODE_TSC_SORTED_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0210) //"Result set too large to be sorted") // too many result for ordered super table projection query +#define TSDB_CODE_TSC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0211) //"Application error") +#define TSDB_CODE_TSC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0212) //"Action in progress") +#define TSDB_CODE_TSC_DISCONNECTED TAOS_DEF_ERROR_CODE(0, 0x0213) //"Disconnected from service") +#define TSDB_CODE_TSC_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0214) //"No write permission") +#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215) //"Connection killed") +#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216) //"Syntax error in SQL") +#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217) //"Database not specified or available") +#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218) //"Table does not exist") +#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config") // mnode -TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_IN_PROGRESS, 0, 0x0301, "Message is progressing") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_NEED_REPROCESSED, 0, 0x0302, "Messag need to be reprocessed") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_RIGHTS, 0, 0x0303, "Insufficient privilege for operation") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_APP_ERROR, 0, 0x0304, "Unexpected generic error in mnode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONNECTION, 0, 0x0305, "Invalid message connection") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_VERSION, 0, 0x0306, "Incompatible protocol version") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_LEN, 0, 0x0307, "Invalid message length") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_TYPE, 0, 0x0308, "Invalid message type") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_SHELL_CONNS, 0, 0x0309, "Too many connections") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_OUT_OF_MEMORY, 0, 0x030A, "Out of memory in mnode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, 0, 0x030B, "Data expired") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, 0, 0x030C, "Invalid query id") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_ID, 0, 0x030D, "Invalid stream id") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, 0, 0x030E, "Invalid connection id") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_IS_RUNNING, 0, 0x0310, "mnode is alreay running") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC, 0, 0x0311, "failed to config sync") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_START_SYNC, 0, 0x0312, "failed to start sync") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CREATE_DIR, 0, 0x0313, "failed to create mnode dir") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_INIT_STEP, 0, 0x0314, "failed to init components") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE, 0, 0x0320, "Object already there") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_ERROR, 0, 0x0321, "Unexpected generic error in sdb") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE, 0, 0x0322, "Invalid table type") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, 0, 0x0323, "Object not there") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "Invalid meta row") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "Invalid key type") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "DNode already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "DNode does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "VGroup does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "Master DNode cannot be removed") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "Out of DNodes") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "Cluster cfg inconsistent") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, 0, 0x0336, "Invalid dnode cfg option") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, 0, 0x0337, "Balance already enabled") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, 0, 0x0338, "Vgroup not in dnode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, 0, 0x0339, "Vgroup already in dnode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, 0, 0x033A, "Dnode not avaliable") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, 0, 0x033B, "Cluster id not match") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_READY, 0, 0x033C, "Cluster not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED, 0, 0x033D, "Dnode Id not configured") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED, 0, 0x033E, "Dnode Ep not configured") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "Account already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "Invalid account") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0342, "Invalid account options") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_EXPIRED, 0, 0x0343, "Account authorization has expired") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, 0, 0x0350, "User already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, 0, 0x0351, "Invalid user") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER_FORMAT, 0, 0x0352, "Invalid user format") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_PASS_FORMAT, 0, 0x0353, "Invalid password format") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_USER_FROM_CONN, 0, 0x0354, "Can not get user from conn") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_USERS, 0, 0x0355, "Too many users") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TABLE_ALREADY_EXIST, 0, 0x0360, "Table already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, 0, 0x0361, "Table name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, 0, 0x0362, "Table does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, 0, 0x0363, "Invalid table type in tsdb") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, 0, 0x0364, "Too many tags") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, 0, 0x0366, "Too many time series") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, 0, 0x0367, "Not super table") // operation only available for super table -TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, 0, 0x0368, "Tag name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, 0, 0x0369, "Tag already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, 0, 0x036A, "Tag does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, 0, 0x036B, "Field already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, 0, 0x036C, "Field does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, 0, 0x036D, "Super table does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG, 0, 0x036E, "Invalid create table message") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, 0, 0x0380, "Database not specified or available") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, 0, 0x0381, "Database already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, 0, 0x0382, "Invalid database options") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, 0, 0x0383, "Invalid database name") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, 0, 0x0384, "Cannot delete monitor database") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, 0, 0x0385, "Too many databases for account") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "Database not available") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, 0, 0x0387, "Database unsynced") - -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_DAYS, 0, 0x0390, "Invalid database option: days out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, 0, 0x0391, "Invalid database option: keep >= keep2 >= keep1 >= days") +#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") +#define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) //"Message is progressing") +#define TSDB_CODE_MND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0302) //"Messag need to be reprocessed") +#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) //"Insufficient privilege for operation") +#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0304) //"Unexpected generic error in mnode") +#define TSDB_CODE_MND_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x0305) //"Invalid message connection") +#define TSDB_CODE_MND_INVALID_MSG_VERSION TAOS_DEF_ERROR_CODE(0, 0x0306) //"Incompatible protocol version") +#define TSDB_CODE_MND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0307) //"Invalid message length") +#define TSDB_CODE_MND_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0308) //"Invalid message type") +#define TSDB_CODE_MND_TOO_MANY_SHELL_CONNS TAOS_DEF_ERROR_CODE(0, 0x0309) //"Too many connections") +#define TSDB_CODE_MND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x030A) //"Out of memory in mnode") +#define TSDB_CODE_MND_INVALID_SHOWOBJ TAOS_DEF_ERROR_CODE(0, 0x030B) //"Data expired") +#define TSDB_CODE_MND_INVALID_QUERY_ID TAOS_DEF_ERROR_CODE(0, 0x030C) //"Invalid query id") +#define TSDB_CODE_MND_INVALID_STREAM_ID TAOS_DEF_ERROR_CODE(0, 0x030D) //"Invalid stream id") +#define TSDB_CODE_MND_INVALID_CONN_ID TAOS_DEF_ERROR_CODE(0, 0x030E) //"Invalid connection id") +#define TSDB_CODE_MND_MNODE_IS_RUNNING TAOS_DEF_ERROR_CODE(0, 0x0310) //"mnode is alreay running") +#define TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC TAOS_DEF_ERROR_CODE(0, 0x0311) //"failed to config sync") +#define TSDB_CODE_MND_FAILED_TO_START_SYNC TAOS_DEF_ERROR_CODE(0, 0x0312) //"failed to start sync") +#define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) //"failed to create mnode dir") +#define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) //"failed to init components") + +#define TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) //"Object already there") +#define TSDB_CODE_MND_SDB_ERROR TAOS_DEF_ERROR_CODE(0, 0x0321) //"Unexpected generic error in sdb") +#define TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0322) //"Invalid table type") +#define TSDB_CODE_MND_SDB_OBJ_NOT_THERE TAOS_DEF_ERROR_CODE(0, 0x0323) //"Object not there") +#define TSDB_CODE_MND_SDB_INVAID_META_ROW TAOS_DEF_ERROR_CODE(0, 0x0324) //"Invalid meta row") +#define TSDB_CODE_MND_SDB_INVAID_KEY_TYPE TAOS_DEF_ERROR_CODE(0, 0x0325) //"Invalid key type") + +#define TSDB_CODE_MND_DNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0330) //"DNode already exists") +#define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0331) //"DNode does not exist") +#define TSDB_CODE_MND_VGROUP_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0332) //"VGroup does not exist") +#define TSDB_CODE_MND_NO_REMOVE_MASTER TAOS_DEF_ERROR_CODE(0, 0x0333) //"Master DNode cannot be removed") +#define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0334) //"Out of DNodes") +#define TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT TAOS_DEF_ERROR_CODE(0, 0x0335) //"Cluster cfg inconsistent") +#define TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION TAOS_DEF_ERROR_CODE(0, 0x0336) //"Invalid dnode cfg option") +#define TSDB_CODE_MND_BALANCE_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0337) //"Balance already enabled") +#define TSDB_CODE_MND_VGROUP_NOT_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0338) //"Vgroup not in dnode") +#define TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0339) //"Vgroup already in dnode") +#define TSDB_CODE_MND_DNODE_NOT_FREE TAOS_DEF_ERROR_CODE(0, 0x033A) //"Dnode not avaliable") +#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x033B) //"Cluster id not match") +#define TSDB_CODE_MND_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x033C) //"Cluster not ready") +#define TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033D) //"Dnode Id not configured") +#define TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033E) //"Dnode Ep not configured") + +#define TSDB_CODE_MND_ACCT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0340) //"Account already exists") +#define TSDB_CODE_MND_INVALID_ACCT TAOS_DEF_ERROR_CODE(0, 0x0341) //"Invalid account") +#define TSDB_CODE_MND_INVALID_ACCT_OPTION TAOS_DEF_ERROR_CODE(0, 0x0342) //"Invalid account options") +#define TSDB_CODE_MND_ACCT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0343) //"Account authorization has expired") + +#define TSDB_CODE_MND_USER_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) //"User already exists") +#define TSDB_CODE_MND_INVALID_USER TAOS_DEF_ERROR_CODE(0, 0x0351) //"Invalid user") +#define TSDB_CODE_MND_INVALID_USER_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0352) //"Invalid user format") +#define TSDB_CODE_MND_INVALID_PASS_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0353) //"Invalid password format") +#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn") +#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users") + +#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists") +#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long") +#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist") +#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb") +#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags") +#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series") +#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table +#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long") +#define TSDB_CODE_MND_TAG_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0369) //"Tag already exists") +#define TSDB_CODE_MND_TAG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036A) //"Tag does not exist") +#define TSDB_CODE_MND_FIELD_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x036B) //"Field already exists") +#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist") +#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist") +#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message") + +#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available") +#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists") +#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options") +#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) //"Invalid database name") +#define TSDB_CODE_MND_MONITOR_DB_FORBIDDEN TAOS_DEF_ERROR_CODE(0, 0x0384) //"Cannot delete monitor database") +#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) //"Too many databases for account") +#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //"Database not available") +#define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) //"Database unsynced") + +#define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) //"Invalid database option: days out of range") +#define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) //"Invalid database option: keep >= keep1 >= keep0 >= days") // dnode -TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "Message not processed") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "Dnode out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "No permission for disk files in dnode") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "Invalid message length") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, 0, 0x0404, "Action in progress") -TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, 0, 0x0405, "Too many vnode directories") - -// vnode -TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "Action in progress") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_MSG_NOT_PROCESSED, 0, 0x0501, "Message not processed") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_NEED_REPROCESSED, 0, 0x0502, "Action need to be reprocessed") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, 0, 0x0503, "Invalid Vgroup ID") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_INIT_FAILED, 0, 0x0504, "Vnode initialization failed") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISKSPACE, 0, 0x0505, "System out of disk space") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "No write permission for disk files") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing data file") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Database memory is full for commit failed") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, 0, 0x050C, "Database memory is full for waiting commit") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, 0, 0x050D, "Database is dropping") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, 0, 0x050E, "Database is balancing") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Database write operation denied") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, 0, 0x0513, "Database is syncing") +#define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) //"Message not processed") +#define TSDB_CODE_DND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0401) //"Dnode out of memory") +#define TSDB_CODE_DND_NO_WRITE_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0402) //"No permission for disk files in dnode") +#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length") +#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress") +#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories") + +// vnode +#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress") +#define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) //"Message not processed") +#define TSDB_CODE_VND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0502) //"Action need to be reprocessed") +#define TSDB_CODE_VND_INVALID_VGROUP_ID TAOS_DEF_ERROR_CODE(0, 0x0503) //"Invalid Vgroup ID") +#define TSDB_CODE_VND_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0504) //"Vnode initialization failed") +#define TSDB_CODE_VND_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0505) //"System out of disk space") +#define TSDB_CODE_VND_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0506) //"No write permission for disk files") +#define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file") +#define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory") +#define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode") +#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file") +#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed") +#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") +#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") +#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") +#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") +#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") +#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") // tsdb -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "Invalid table ID") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_TYPE, 0, 0x0601, "Invalid table type") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION, 0, 0x0602, "Invalid table schema version") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_ALREADY_EXIST, 0, 0x0603, "Table already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CONFIG, 0, 0x0604, "Invalid configuration") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INIT_FAILED, 0, 0x0605, "Tsdb init failed") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISKSPACE, 0, 0x0606, "No diskspace for tsdb") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISK_PERMISSIONS, 0, 0x0607, "No permission for disk files") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_CORRUPTED, 0, 0x0608, "Data file(s) corrupted") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_OUT_OF_MEMORY, 0, 0x0609, "Out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE, 0, 0x060A, "Tag too old") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE, 0, 0x060B, "Timestamp data out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP, 0, 0x060C, "Submit message is messed up") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, 0, 0x060D, "Invalid operation") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "Invalid creation of table") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "No table data in memory skiplist") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "File already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "Need to reconfigure table") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, 0, 0x0612, "Invalid information to create table") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, 0, 0x0613, "No available disk") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, 0, 0x0614, "TSDB messed message") +#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") +#define TSDB_CODE_TDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0601) //"Invalid table type") +#define TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0602) //"Invalid table schema version") +#define TSDB_CODE_TDB_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0603) //"Table already exists") +#define TSDB_CODE_TDB_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0604) //"Invalid configuration") +#define TSDB_CODE_TDB_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0605) //"Tsdb init failed") +#define TSDB_CODE_TDB_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0606) //"No diskspace for tsdb") +#define TSDB_CODE_TDB_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0607) //"No permission for disk files") +#define TSDB_CODE_TDB_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0608) //"Data file(s) corrupted") +#define TSDB_CODE_TDB_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0609) //"Out of memory") +#define TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE TAOS_DEF_ERROR_CODE(0, 0x060A) //"Tag too old") +#define TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x060B) //"Timestamp data out of range") +#define TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x060C) //"Submit message is messed up") +#define TSDB_CODE_TDB_INVALID_ACTION TAOS_DEF_ERROR_CODE(0, 0x060D) //"Invalid operation") +#define TSDB_CODE_TDB_INVALID_CREATE_TB_MSG TAOS_DEF_ERROR_CODE(0, 0x060E) //"Invalid creation of table") +#define TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM TAOS_DEF_ERROR_CODE(0, 0x060F) //"No table data in memory skiplist") +#define TSDB_CODE_TDB_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0610) //"File already exists") +#define TSDB_CODE_TDB_TABLE_RECONFIGURE TAOS_DEF_ERROR_CODE(0, 0x0611) //"Need to reconfigure table") +#define TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO TAOS_DEF_ERROR_CODE(0, 0x0612) //"Invalid information to create table") +#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk") +#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message") // query -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "Invalid handle") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_MSG, 0, 0x0701, "Invalid message") // failed to validate the sql expression msg by vnode -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NO_DISKSPACE, 0, 0x0702, "No diskspace for query") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_OUT_OF_MEMORY, 0, 0x0703, "System out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_APP_ERROR, 0, 0x0704, "Unexpected generic error in query") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, 0, 0x0705, "Duplicated join key") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, 0, 0x0706, "Tag conditon too many") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, 0, 0x070A, "Too many time window in query") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, 0, 0x070B, "Query buffer limit has reached") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, 0, 0x070C, "File inconsistance in replica") +#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") +#define TSDB_CODE_QRY_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x0701) //"Invalid message") // failed to validate the sql expression msg by vnode +#define TSDB_CODE_QRY_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0702) //"No diskspace for query") +#define TSDB_CODE_QRY_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0703) //"System out of memory") +#define TSDB_CODE_QRY_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0704) //"Unexpected generic error in query") +#define TSDB_CODE_QRY_DUP_JOIN_KEY TAOS_DEF_ERROR_CODE(0, 0x0705) //"Duplicated join key") +#define TSDB_CODE_QRY_EXCEED_TAGS_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0706) //"Tag conditon too many") +#define TSDB_CODE_QRY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0707) //"Query not ready") +#define TSDB_CODE_QRY_HAS_RSP TAOS_DEF_ERROR_CODE(0, 0x0708) //"Query should response") +#define TSDB_CODE_QRY_IN_EXEC TAOS_DEF_ERROR_CODE(0, 0x0709) //"Multiple retrieval of this query") +#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query") +#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached") +#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistance in replica") // grant -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_DNODE_LIMITED, 0, 0x0801, "DNode creation limited by licence") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_ACCT_LIMITED, 0, 0x0802, "Account creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_TIMESERIES_LIMITED, 0, 0x0803, "Table creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_DB_LIMITED, 0, 0x0804, "DB creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_USER_LIMITED, 0, 0x0805, "User creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CONN_LIMITED, 0, 0x0806, "Conn creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_STREAM_LIMITED, 0, 0x0807, "Stream creation limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_SPEED_LIMITED, 0, 0x0808, "Write speed limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_STORAGE_LIMITED, 0, 0x0809, "Storage capacity limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_QUERYTIME_LIMITED, 0, 0x080A, "Query time limited by license") -TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x080B, "CPU cores limited by license") +#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired") +#define TSDB_CODE_GRANT_DNODE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0801) //"DNode creation limited by licence") +#define TSDB_CODE_GRANT_ACCT_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0802) //"Account creation limited by license") +#define TSDB_CODE_GRANT_TIMESERIES_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0803) //"Table creation limited by license") +#define TSDB_CODE_GRANT_DB_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0804) //"DB creation limited by license") +#define TSDB_CODE_GRANT_USER_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0805) //"User creation limited by license") +#define TSDB_CODE_GRANT_CONN_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0806) //"Conn creation limited by license") +#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807) //"Stream creation limited by license") +#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808) //"Write speed limited by license") +#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809) //"Storage capacity limited by license") +#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A) //"Query time limited by license") +#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B) //"CPU cores limited by license") // sync -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CONFIG, 0, 0x0900, "Invalid Sync Configuration") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "Sync module not enabled") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_VERSION, 0, 0x0902, "Invalid Sync version") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_CONFIRM_EXPIRED, 0, 0x0903, "Sync confirm expired") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TOO_MANY_FWDINFO, 0, 0x0904, "Too many sync fwd infos") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_PROTOCOL, 0, 0x0905, "Mismatched protocol") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_CLUSTERID, 0, 0x0906, "Mismatched clusterId") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_SIGNATURE, 0, 0x0907, "Mismatched signature") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CHECKSUM, 0, 0x0908, "Invalid msg checksum") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGLEN, 0, 0x0909, "Invalid msg length") -TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, 0, 0x090A, "Invalid msg type") +#define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) //"Invalid Sync Configuration") +#define TSDB_CODE_SYN_NOT_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0901) //"Sync module not enabled") +#define TSDB_CODE_SYN_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0902) //"Invalid Sync version") +#define TSDB_CODE_SYN_CONFIRM_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0903) //"Sync confirm expired") +#define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) //"Too many sync fwd infos") +#define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) //"Mismatched protocol") +#define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) //"Mismatched clusterId") +#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) //"Mismatched signature") +#define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) //"Invalid msg checksum") +#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) //"Invalid msg length") +#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) //"Invalid msg type") // wal -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "Unexpected generic error in wal") -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, 0, 0x1001, "WAL file is corrupted") -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, 0, 0x1002, "WAL size exceeds limit") +#define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") +#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") +#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") // http -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, 0, 0x1100, "http server is not onlin") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_UNSUPPORT_URL, 0, 0x1101, "url is not support") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVLALID_URL, 0, 0x1102, "invalid url format") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_ENOUGH_MEMORY, 0, 0x1103, "no enough memory") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUSET_TOO_BIG, 0, 0x1104, "request size is too big") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_AUTH_INFO, 0, 0x1105, "no auth info input") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_MSG_INPUT, 0, 0x1106, "request is empty") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_SQL_INPUT, 0, 0x1107, "no sql input") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_EXEC_USEDB, 0, 0x1108, "no need to execute use db cmd") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SESSION_FULL, 0, 0x1109, "session list was full") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR, 0, 0x110A, "generate taosd token error") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_MULTI_REQUEST, 0, 0x110B, "size of multi request is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_CREATE_GZIP_FAILED, 0, 0x110C, "failed to create gzip") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_FINISH_GZIP_FAILED, 0, 0x110D, "failed to finish gzip") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_LOGIN_FAILED, 0, 0x110E, "failed to login") - -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_VERSION, 0, 0x1120, "invalid http version") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH, 0, 0x1121, "invalid content length") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_TYPE, 0, 0x1122, "invalid type of Authorization") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_FORMAT, 0, 0x1123, "invalid format of Authorization") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_BASIC_AUTH, 0, 0x1124, "invalid basic Authorization") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_TAOSD_AUTH, 0, 0x1125, "invalid taosd Authorization") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_METHOD_FAILED, 0, 0x1126, "failed to parse method") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_TARGET_FAILED, 0, 0x1127, "failed to parse target") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_VERSION_FAILED, 0, 0x1128, "failed to parse http version") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_SP_FAILED, 0, 0x1129, "failed to parse sp") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_STATUS_FAILED, 0, 0x112A, "failed to parse status") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_PHRASE_FAILED, 0, 0x112B, "failed to parse phrase") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CRLF_FAILED, 0, 0x112C, "failed to parse crlf") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_FAILED, 0, 0x112D, "failed to parse header") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED, 0, 0x112E, "failed to parse header key") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED, 0, 0x112F, "failed to parse header val") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED, 0, 0x1130, "failed to parse chunk size") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_FAILED, 0, 0x1131, "failed to parse chunk") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_END_FAILED, 0, 0x1132, "failed to parse end section") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_INVALID_STATE, 0, 0x1134, "invalid parse state") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_ERROR_STATE, 0, 0x1135, "failed to parse error section") - -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_NULL, 0, 0x1150, "query size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_SIZE, 0, 0x1151, "query size can not more than 100") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR, 0, 0x1152, "parse grafana json error") - -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_NOT_INPUT, 0, 0x1160, "database name can not be null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_TOO_LONG, 0, 0x1161, "database name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_INVALID_JSON, 0, 0x1162, "invalid telegraf json fromat") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_NULL, 0, 0x1163, "metrics size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_SIZE, 0, 0x1164, "metrics size can not more than 1K") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NULL, 0, 0x1165, "metric name not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_TYPE, 0, 0x1166, "metric name type should be string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_NULL, 0, 0x1167, "metric name length is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_LONG, 0, 0x1168, "metric name length too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_NULL, 0, 0x1169, "timestamp not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE, 0, 0x116A, "timestamp type should be integer") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL, 0, 0x116B, "timestamp value smaller than 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_NULL, 0, 0x116C, "tags not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_0, 0, 0x116D, "tags size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG, 0, 0x116E, "tags size too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NULL, 0, 0x116F, "tag is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_NULL, 0, 0x1170, "tag name is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_SIZE, 0, 0x1171, "tag name length too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE, 0, 0x1172, "tag value type should be number or string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_NULL, 0, 0x1173, "tag value is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_NULL, 0, 0x1174, "table is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_SIZE, 0, 0x1175, "table name length too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_NULL, 0, 0x1176, "fields not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_0, 0, 0x1177, "fields size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG, 0, 0x1178, "fields size too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NULL, 0, 0x1179, "field is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_NULL, 0, 0x117A, "field name is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE, 0, 0x117B, "field name length too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE, 0, 0x117C, "field value type should be number or string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL, 0, 0x117D, "field value is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_HOST_NOT_STRING, 0, 0x117E, "host type should be string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST, 0, 0x117F, "stable not exist") - -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_NOT_INPUT, 0, 0x1190, "database name can not be null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_TOO_LONG, 0, 0x1191, "database name too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_INVALID_JSON, 0, 0x1192, "invalid opentsdb json fromat") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_NULL, 0, 0x1193, "metrics size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_SIZE, 0, 0x1194, "metrics size can not more than 10K") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NULL, 0, 0x1195, "metric name not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_TYPE, 0, 0x1196, "metric name type should be string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_NULL, 0, 0x1197, "metric name length is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_LONG, 0, 0x1198, "metric name length can not more than 22") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_NULL, 0, 0x1199, "timestamp not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE, 0, 0x119A, "timestamp type should be integer") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL, 0, 0x119B, "timestamp value smaller than 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_NULL, 0, 0x119C, "tags not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_0, 0, 0x119D, "tags size is 0") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG, 0, 0x119E, "tags size too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NULL, 0, 0x119F, "tag is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_NULL, 0, 0x11A0, "tag name is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_SIZE, 0, 0x11A1, "tag name length too long") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE, 0, 0x11A2, "tag value type should be boolean, number or string") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_NULL, 0, 0x11A3, "tag value is null") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, 0, 0x11A4, "tag value can not more than 64") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, 0, 0x11A5, "value not find") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, 0, 0x11A6, "value type should be boolean, number or string") +#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") +#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support") +#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format") +#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory") +#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big") +#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input") +#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty") +#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input") +#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd") +#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full") +#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error") +#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0") +#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip") +#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip") +#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login") + +#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version") +#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length") +#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization") +#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization") +#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization") +#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization") +#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method") +#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target") +#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version") +#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp") +#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status") +#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase") +#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf") +#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header") +#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key") +#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val") +#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size") +#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk") +#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section") +#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state") +#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section") + +#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0") +#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100") +#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error") + +#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null") +#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long") +#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat") +#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0") +#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K") +#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find") +#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string") +#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0") +#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long") +#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find") +#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer") +#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0") +#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find") +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0") +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long") +#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null") +#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null") +#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long") +#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string") +#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null") +#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null") +#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long") +#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find") +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0") +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long") +#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null") +#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null") +#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long") +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string") +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null") +#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string") +#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist") + +#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null") +#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long") +#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat") +#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0") +#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K") +#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find") +#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string") +#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0") +#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22") +#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find") +#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer") +#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0") +#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find") +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0") +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long") +#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null") +#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null") +#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long") +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string") +#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null") +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64") +#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find") +#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string") // odbc -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, 0, 0x2100, "out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, 0, 0x2101, "convertion not a valid literal input") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_UNDEF, 0, 0x2102, "convertion undefined") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC_FRAC, 0, 0x2103, "convertion fractional truncated") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, 0, 0x2104, "convertion truncated") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, 0, 0x2105, "convertion not supported") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_OOR, 0, 0x2106, "convertion numeric value out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, 0, 0x2107, "out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, 0, 0x2108, "not supported yet") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, 0, 0x2109, "invalid handle") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, 0, 0x210a, "no result set") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, 0, 0x210b, "no fields returned") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, 0, 0x210c, "invalid cursor") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, 0, 0x210d, "statement not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, 0, 0x210e, "connection still busy") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, 0, 0x210f, "bad connection string") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, 0, 0x2110, "bad argument") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_VALID_TS, 0, 0x2111, "not a valid timestamp") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE, 0, 0x2112, "src too large") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, 0, 0x2113, "src bad sequence") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, 0, 0x2114, "src incomplete") -TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_GENERAL, 0, 0x2115, "src general") +#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory") +#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input") +#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined") +#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated") +#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated") +#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported") +#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range") +#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range") +#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet") +#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle") +#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set") +#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned") +#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor") +#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready") +#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy") +#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string") +#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument") +#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp") +#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large") +#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence") +#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete") +#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general") // tfs -TAOS_DEFINE_ERROR(TSDB_CODE_FS_OUT_OF_MEMORY, 0, 0x2200, "tfs out of memory") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_CFG, 0, 0x2201, "tfs invalid mount config") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_TOO_MANY_MOUNT, 0, 0x2202, "tfs too many mount") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_DUP_PRIMARY, 0, 0x2203, "tfs duplicate primary mount") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_PRIMARY_DISK, 0, 0x2204, "tfs no primary mount") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_MOUNT_AT_TIER, 0, 0x2205, "tfs no mount at tier") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_FILE_ALREADY_EXISTS, 0, 0x2206, "tfs file already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_LEVEL, 0, 0x2207, "tfs invalid level") -TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_VALID_DISK, 0, 0x2208, "tfs no valid disk") - - -#ifdef TAOS_ERROR_C -}; -#endif - +#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory") +#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config") +#define TSDB_CODE_FS_TOO_MANY_MOUNT TAOS_DEF_ERROR_CODE(0, 0x2202) //"tfs too many mount") +#define TSDB_CODE_FS_DUP_PRIMARY TAOS_DEF_ERROR_CODE(0, 0x2203) //"tfs duplicate primary mount") +#define TSDB_CODE_FS_NO_PRIMARY_DISK TAOS_DEF_ERROR_CODE(0, 0x2204) //"tfs no primary mount") +#define TSDB_CODE_FS_NO_MOUNT_AT_TIER TAOS_DEF_ERROR_CODE(0, 0x2205) //"tfs no mount at tier") +#define TSDB_CODE_FS_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2206) //"tfs file already exists") +#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level") +#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk") #ifdef __cplusplus } diff --git a/src/inc/tfs.h b/src/inc/tfs.h index c273be56789d515e7fcbf37882b1200886394376..76e9b17a621f9842479396ada65bb10ac147366c 100644 --- a/src/inc/tfs.h +++ b/src/inc/tfs.h @@ -64,7 +64,7 @@ typedef struct { #define tfsclose(fd) close(fd) #define tfsremove(pf) remove(TFILE_NAME(pf)) #define tfscopy(sf, df) taosCopy(TFILE_NAME(sf), TFILE_NAME(df)) -#define tfsrename(sf, df) rename(TFILE_NAME(sf), TFILE_NAME(df)) +#define tfsrename(sf, df) taosRename(TFILE_NAME(sf), TFILE_NAME(df)) void tfsInitFile(TFILE *pf, int level, int id, const char *bname); bool tfsIsSameFile(const TFILE *pf1, const TFILE *pf2); diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index a33f15267ae371a7f6184a233292123722734835..780302687da81d5fa415a8058929b64dae849c84 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -122,8 +122,8 @@ #define TK_UNSIGNED 103 #define TK_TAGS 104 #define TK_USING 105 -#define TK_AS 106 -#define TK_COMMA 107 +#define TK_COMMA 106 +#define TK_AS 107 #define TK_NULL 108 #define TK_SELECT 109 #define TK_UNION 110 diff --git a/src/inc/ttype.h b/src/inc/ttype.h index 05622a16bee29b1cf023fbbe0fa104fac32d5fbf..2f6b80f89d024e3d286ae20b41f645a073ddc174 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -27,7 +27,7 @@ typedef struct tstr { #define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR)) // this data type is internally used only in 'in' query to hold the values -#define TSDB_DATA_TYPE_ARRAY (TSDB_DATA_TYPE_NCHAR + 1) +#define TSDB_DATA_TYPE_ARRAY (1000) #define GET_TYPED_DATA(_v, _finalType, _type, _data) \ do { \ diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index bf77d856f9f772aeffb42f7f85d51a5841943076..66e8cf73988ab25db7544b9a52215d2279630c63 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -1,7 +1,6 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(taosdemo) -ADD_SUBDIRECTORY(taosdemox) ADD_SUBDIRECTORY(taosdump) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index c4f3cc5696a1442b38d7511ff69ddd7ff557b896..b6babc5bc53aa254e0372dbbfd235bdd4cef878a 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 24156956174364ae1be6abfa2af4c3db7c0db712..50bceb1a7169373d1c832cadf03d6d601cace264 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -30,6 +30,8 @@ #define MAX_COMMAND_SIZE 65536 #define HISTORY_FILE ".taos_history" +#define DEFAULT_RES_SHOW_NUM 100 + typedef struct SShellHistory { char* hist[MAX_HISTORY_SIZE]; int hstart; @@ -62,7 +64,7 @@ extern TAOS* shellInit(SShellArguments* args); extern void* shellLoopQuery(void* arg); extern void taos_error(TAOS_RES* tres, int64_t st); extern int regex_match(const char* s, const char* reg, int cflags); -void shellReadCommand(TAOS* con, char command[]); +int32_t shellReadCommand(TAOS* con, char command[]); int32_t shellRunCommand(TAOS* con, char* command); void shellRunCommandOnServer(TAOS* con, char command[]); void read_history(); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index dbec3fdb05761d386d54a8e3e5a0d2d6dcb6bfeb..31ad7046e9176221e10c79b3f2367ea464529438 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -180,7 +180,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } -void shellReadCommand(TAOS *con, char *command) { +int32_t shellReadCommand(TAOS *con, char *command) { unsigned hist_counter = history.hend; char utf8_array[10] = "\0"; Command cmd; @@ -233,7 +233,7 @@ void shellReadCommand(TAOS *con, char *command) { sprintf(command, "%s%s", cmd.buffer, cmd.command); tfree(cmd.buffer); tfree(cmd.command); - return; + return 0; } else { updateBuffer(&cmd); } @@ -324,6 +324,8 @@ void shellReadCommand(TAOS *con, char *command) { insertChar(&cmd, &c, 1); } } + + return 0; } void *shellLoopQuery(void *arg) { @@ -342,11 +344,16 @@ void *shellLoopQuery(void *arg) { return NULL; } + int32_t err = 0; + do { // Read command from shell. memset(command, 0, MAX_COMMAND_SIZE); set_terminal_mode(); - shellReadCommand(con, command); + err = shellReadCommand(con, command); + if (err) { + break; + } reset_terminal_mode(); } while (shellRunCommand(con, command) == 0); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 716a317fca03f4a3d8c1067e1eba781208689901..a6f5869936beb7e5d9a17d0ff9ecc9fbd499db2f 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -76,7 +76,11 @@ TAOS *shellInit(SShellArguments *args) { args->user = TSDB_DEFAULT_USER; } - taos_init(); + if (taos_init()) { + printf("failed to init taos\n"); + fflush(stdout); + return NULL; + } // Connect to the database. TAOS *con = NULL; @@ -320,7 +324,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands - int error_no = 0; + int error_no = 0; + int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { atomic_store_64(&result, 0); @@ -337,7 +342,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } else { int num_rows_affacted = taos_affected_rows(pSql); et = taosGetTimestampUs(); - printf("Query OK, %d row(s) affected (%.6fs)\n", num_rows_affacted, (et - st) / 1E6); + printf("Query OK, %d of %d row(s) in database (%.6fs)\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6); } printf("\n"); @@ -387,10 +392,13 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } time_t tt; + int32_t ms = 0; if (precision == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)(val / 1000000); + ms = val % 1000000; } else { tt = (time_t)(val / 1000); + ms = val % 1000; } /* comment out as it make testcases like select_with_tags.sim fail. @@ -404,14 +412,22 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { #ifdef WINDOWS if (tt < 0) tt = 0; #endif + if (tt < 0 && ms != 0) { + tt--; + if (precision == TSDB_TIME_PRECISION_MICRO) { + ms += 1000000; + } else { + ms += 1000; + } + } struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + sprintf(buf + pos, ".%06d", ms); } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); + sprintf(buf + pos, ".%03d", ms); } return buf; @@ -644,6 +660,17 @@ static void printField(const char* val, TAOS_FIELD* field, int width, int32_t le } +bool isSelectQuery(TAOS_RES* tres) { + char *sql = tscGetSqlStr(tres); + + if (regex_match(sql, "^[\t ]*select[ \t]*", REG_EXTENDED | REG_ICASE)) { + return true; + } + + return false; +} + + static int verticalPrintResult(TAOS_RES* tres) { TAOS_ROW row = taos_fetch_row(tres); if (row == NULL) { @@ -662,18 +689,33 @@ static int verticalPrintResult(TAOS_RES* tres) { } } + uint64_t resShowMaxNum = UINT64_MAX; + + if (args.commands == NULL && args.file[0] == 0 && isSelectQuery(tres) && !tscIsQueryWithLimit(tres)) { + resShowMaxNum = DEFAULT_RES_SHOW_NUM; + } + int numOfRows = 0; - do { - printf("*************************** %d.row ***************************\n", numOfRows + 1); - int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - TAOS_FIELD* field = fields + i; + int showMore = 1; + do { + if (numOfRows < resShowMaxNum) { + printf("*************************** %d.row ***************************\n", numOfRows + 1); - int padding = (int)(maxColNameLen - strlen(field->name)); - printf("%*.s%s: ", padding, " ", field->name); + int32_t* length = taos_fetch_lengths(tres); - printField((const char*)row[i], field, 0, length[i], precision); - putchar('\n'); + for (int i = 0; i < num_fields; i++) { + TAOS_FIELD* field = fields + i; + + int padding = (int)(maxColNameLen - strlen(field->name)); + printf("%*.s%s: ", padding, " ", field->name); + + printField((const char*)row[i], field, 0, length[i], precision); + putchar('\n'); + } + } else if (showMore) { + printf("[100 Rows showed, and more rows are fetching but will not be showed. You can ctrl+c to stop or wait.]\n"); + printf("[You can add limit statement to get more or redirect results to specific file to get all.]\n"); + showMore = 0; } numOfRows++; @@ -780,16 +822,31 @@ static int horizontalPrintResult(TAOS_RES* tres) { printHeader(fields, width, num_fields); + uint64_t resShowMaxNum = UINT64_MAX; + + if (args.commands == NULL && args.file[0] == 0 && isSelectQuery(tres) && !tscIsQueryWithLimit(tres)) { + resShowMaxNum = DEFAULT_RES_SHOW_NUM; + } + int numOfRows = 0; + int showMore = 1; + do { int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - putchar(' '); - printField((const char*)row[i], fields + i, width[i], length[i], precision); - putchar(' '); - putchar('|'); + if (numOfRows < resShowMaxNum) { + for (int i = 0; i < num_fields; i++) { + putchar(' '); + printField((const char*)row[i], fields + i, width[i], length[i], precision); + putchar(' '); + putchar('|'); + } + putchar('\n'); + } else if (showMore) { + printf("[100 Rows showed, and more rows are fetching but will not be showed. You can ctrl+c to stop or wait.]\n"); + printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); + showMore = 0; } - putchar('\n'); + numOfRows++; row = taos_fetch_row(tres); } while(row != NULL); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index a8f457377dd0b150f8e0938e08a81f6d79d911d6..07b21531a7f1cf9fc6346a27373fe9de8895f2b2 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -172,7 +172,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } -void shellReadCommand(TAOS *con, char *command) { +int32_t shellReadCommand(TAOS *con, char *command) { unsigned hist_counter = history.hend; char utf8_array[10] = "\0"; Command cmd; @@ -186,6 +186,10 @@ void shellReadCommand(TAOS *con, char *command) { while (1) { c = (char)getchar(); // getchar() return an 'int' value + if (c == EOF) { + return c; + } + if (c < 0) { // For UTF-8 int count = countPrefixOnes(c); utf8_array[0] = c; @@ -225,7 +229,7 @@ void shellReadCommand(TAOS *con, char *command) { sprintf(command, "%s%s", cmd.buffer, cmd.command); tfree(cmd.buffer); tfree(cmd.command); - return; + return 0; } else { updateBuffer(&cmd); } @@ -316,6 +320,8 @@ void shellReadCommand(TAOS *con, char *command) { insertChar(&cmd, &c, 1); } } + + return 0; } void *shellLoopQuery(void *arg) { @@ -333,12 +339,17 @@ void *shellLoopQuery(void *arg) { uError("failed to malloc command"); return NULL; } + + int32_t err = 0; do { // Read command from shell. memset(command, 0, MAX_COMMAND_SIZE); set_terminal_mode(); - shellReadCommand(con, command); + err = shellReadCommand(con, command); + if (err) { + break; + } reset_terminal_mode(); } while (shellRunCommand(con, command) == 0); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 49de42796caf31f426c60945d7d252fd10372f28..4c7e550760cecb7c045cb8c94fc431cb5f91812b 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -110,7 +110,10 @@ int main(int argc, char* argv[]) { } if (args.netTestRole && args.netTestRole[0] != 0) { - taos_init(); + if (taos_init()) { + printf("Failed to init taos"); + exit(EXIT_FAILURE); + } taosNetTest(args.netTestRole, args.host, args.port, args.pktLen); exit(0); } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 56021d9c34a812fcf96f2ca902ee97e9f6dd88af..11c1ac34d8bc6058b33699c4d65d9d1224b88d31 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -221,7 +221,7 @@ void insertChar(Command *cmd, char c) { cmd->command[cmd->cursorOffset++] = c; } -void shellReadCommand(TAOS *con, char command[]) { +int32_t shellReadCommand(TAOS *con, char command[]) { Command cmd; memset(&cmd, 0, sizeof(cmd)); cmd.buffer = (char *)calloc(1, MAX_COMMAND_SIZE); @@ -241,7 +241,7 @@ void shellReadCommand(TAOS *con, char command[]) { cmd.buffer = NULL; free(cmd.command); cmd.command = NULL; - return; + return 0; } else { shellPrintContinuePrompt(); updateBuffer(&cmd); @@ -251,6 +251,8 @@ void shellReadCommand(TAOS *con, char command[]) { insertChar(&cmd, c); } } + + return 0; } void *shellLoopQuery(void *arg) { @@ -258,12 +260,17 @@ void *shellLoopQuery(void *arg) { char *command = malloc(MAX_COMMAND_SIZE); if (command == NULL) return NULL; + int32_t err = 0; + do { memset(command, 0, MAX_COMMAND_SIZE); shellPrintPrompt(); // Read command from shell. - shellReadCommand(con, command); + err = shellReadCommand(con, command); + if (err) { + break; + } } while (shellRunCommand(con, command) == 0); return NULL; diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index f74dbc2de4c045e209fe0510207d1f38a89d7a6c..a75157b94c0bcabfccfb037b3375b0f1a4136b0f 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -1,29 +1,35 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(inc) IF (TD_LINUX) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos) + TARGET_LINK_LIBRARIES(taosdemo taos cJson) ENDIF () ELSEIF (TD_WINDOWS) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + SET_SOURCE_FILES_PROPERTIES(./taosdemo.c PROPERTIES COMPILE_FLAGS -w) + IF (TD_SOMODE_STATIC) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) + ELSE () + TARGET_LINK_LIBRARIES(taosdemo taos cJson}) + ENDIF () ELSEIF (TD_DARWIN) + # missing a few dependencies, such as AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) - + IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos) + TARGET_LINK_LIBRARIES(taosdemo taos cJson) ENDIF () ENDIF () + diff --git a/src/kit/taosdemo/insert.json b/src/kit/taosdemo/insert.json new file mode 100644 index 0000000000000000000000000000000000000000..5276b9fb61af9645578392a0035c8a95898c75e2 --- /dev/null +++ b/src/kit/taosdemo/insert.json @@ -0,0 +1,57 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 100, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/src/kit/taosdemo/query.json b/src/kit/taosdemo/query.json new file mode 100644 index 0000000000000000000000000000000000000000..33ac120bdae283f64c4419d2885a93d36a7c8093 --- /dev/null +++ b/src/kit/taosdemo/query.json @@ -0,0 +1,19 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "yes", + "databases": "dbx", + "specified_table_query": + {"query_interval":1, "concurrent":4, + "sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"}, + {"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}] + }, + "super_table_query": + {"stblname": "stb", "query_interval":1, "threads":4, + "sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}] + } +} diff --git a/src/kit/taosdemox/subscribe.json b/src/kit/taosdemo/subscribe.json similarity index 96% rename from src/kit/taosdemox/subscribe.json rename to src/kit/taosdemo/subscribe.json index f70b1213a884af7d593b3d7366268ce03de1d239..fd33a2e2e2515ac268764c0a7f3f8356e998becd 100644 --- a/src/kit/taosdemox/subscribe.json +++ b/src/kit/taosdemo/subscribe.json @@ -5,7 +5,7 @@ "port": 6030, "user": "root", "password": "taosdata", - "databases": "db", + "databases": "dbx", "specified_table_query": {"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes", "sqls": [{"sql": "select avg(col1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}] diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 4d8c64d8b319072c857ac800fdc5672c3b05214c..0353fcabd3dc5a20defa67db36b022715089260c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -13,7 +13,13 @@ * along with this program. If not, see . */ + +/* + when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. +*/ + #define _GNU_SOURCE +#define CURL_STATICLIB #ifdef LINUX #include "os.h" @@ -39,24 +45,133 @@ #include #include #include "os.h" + +#ifdef TD_WINDOWS + #pragma comment ( lib, "ws2_32.lib" ) + #pragma comment ( lib, "winmm.lib" ) + #pragma comment ( lib, "wldap32.lib" ) +#endif #endif +#include "cJSON.h" + #include "taos.h" #include "tutil.h" +#define REQ_EXTRA_BUF_LEN 1024 +#define RESP_BUF_LEN 4096 + extern char configDir[]; -#define BUFFER_SIZE 65536 -#define MAX_DB_NAME_SIZE 64 -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 -#define MAX_NUM_DATATYPE 10 -#define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 -#define MAX_PREPARED_RAND 1000000 +#define INSERT_JSON_NAME "insert.json" +#define QUERY_JSON_NAME "query.json" +#define SUBSCRIBE_JSON_NAME "subscribe.json" + +#define INSERT_MODE 0 +#define QUERY_MODE 1 +#define SUBSCRIBE_MODE 2 + +#define MAX_SQL_SIZE 65536 +#define BUFFER_SIZE (65536*2) +#define MAX_DB_NAME_SIZE 64 +#define MAX_TB_NAME_SIZE 64 +#define MAX_DATA_SIZE 16000 +#define MAX_NUM_DATATYPE 10 +#define OPT_ABORT 1 /* –abort */ +#define STRING_LEN 60000 +#define MAX_PREPARED_RAND 1000000 +#define MAX_FILE_NAME_LEN 256 + +#define MAX_SAMPLES_ONCE_FROM_FILE 10000 +#define MAX_NUM_DATATYPE 10 + +#define MAX_DB_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 8 +#define MAX_COLUMN_COUNT 1024 +#define MAX_TAG_COUNT 128 + +#define MAX_QUERY_SQL_COUNT 10 +#define MAX_QUERY_SQL_LENGTH 256 + +#define MAX_DATABASE_COUNT 256 + +typedef enum CREATE_SUB_TALBE_MOD_EN { + PRE_CREATE_SUBTBL, + AUTO_CREATE_SUBTBL, + NO_CREATE_SUBTBL +} CREATE_SUB_TALBE_MOD_EN; + +typedef enum TALBE_EXISTS_EN { + TBL_ALREADY_EXISTS, + TBL_NO_EXISTS, + TBL_EXISTS_BUTT +} TALBE_EXISTS_EN; + +enum MODE { + SYNC, + ASYNC, + MODE_BUT +}; + +enum QUERY_TYPE { + NO_INSERT_TYPE, + INSERT_TYPE, + QUERY_TYPE_BUT +} ; + +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_stables_index { + TSDB_SHOW_STABLES_NAME_INDEX, + TSDB_SHOW_STABLES_CREATED_TIME_INDEX, + TSDB_SHOW_STABLES_COLUMNS_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_TID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_MAX_SHOW_STABLES +}; +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct { + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[128]; +} SColDes; /* Used by main to communicate with parse_opt. */ -typedef struct DemoArguments { +typedef struct SArguments_S { + char * metaFile; + int test_mode; char * host; uint16_t port; char * user; @@ -67,1057 +182,4524 @@ typedef struct DemoArguments { char * sqlFile; bool use_metric; bool insert_only; + bool answer_yes; + bool debug_print; char * output_file; int mode; char * datatype[MAX_NUM_DATATYPE + 1]; int len_of_binary; int num_of_CPR; int num_of_threads; + int insert_interval; int num_of_RPR; int num_of_tables; int num_of_DPT; int abort; - int order; - int rate; + int disorderRatio; + int disorderRange; int method_of_delete; char ** arg_list; -} SDemoArguments; - -#ifdef LINUX - /* The options we understand. */ - static struct argp_option options[] = { - {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0}, - {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1}, - {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2}, - #ifdef _TD_POWER_ - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3}, - #else - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3}, - #endif - {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, - {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3}, - {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3}, - {0, 's', "sql file", 0, "The select sql file.", 3}, - {0, 'M', 0, 0, "Use metric flag.", 13}, - {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14}, - {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6}, - {0, 'b', "type_of_cols", 0, "The data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.", 7}, - {0, 'w', "length_of_binary", 0, "The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8", 8}, - {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 3.", 8}, - {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 9}, - {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10}, - {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11}, - {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12}, - #ifdef _TD_POWER_ - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14}, - #else - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14}, - #endif - {0, 'x', 0, 0, "Insert only flag.", 13}, - {0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14}, - {0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14}, - {0, 'D', "delete table", 0, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database", 14}, - {0}}; - - /* Parse a single option. */ - static error_t parse_opt(int key, char *arg, struct argp_state *state) { - /* Get the input argument from argp_parse, which we - know is a pointer to our arguments structure. */ - SDemoArguments *arguments = state->input; - wordexp_t full_path; - char **sptr; - switch (key) { - case 'h': - arguments->host = arg; - break; - case 'p': - arguments->port = atoi(arg); - break; - case 'u': - arguments->user = arg; - break; - case 'P': - arguments->password = arg; - break; - case 'o': - arguments->output_file = arg; - break; - case 's': - arguments->sqlFile = arg; - break; - case 'q': - arguments->mode = atoi(arg); - break; - case 'T': - arguments->num_of_threads = atoi(arg); - break; - case 'r': - arguments->num_of_RPR = atoi(arg); - break; - case 't': - arguments->num_of_tables = atoi(arg); - break; - case 'n': - arguments->num_of_DPT = atoi(arg); - break; - case 'd': - arguments->database = arg; - break; - case 'l': - arguments->num_of_CPR = atoi(arg); - break; - case 'b': - sptr = arguments->datatype; - if (strstr(arg, ",") == NULL) { - if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && - strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && - strcasecmp(arg, "SMALLINT") != 0 && - strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && - strcasecmp(arg, "BINARY") && strcasecmp(arg, "NCHAR")) { - argp_error(state, "Invalid data_type!"); - } - sptr[0] = arg; - } else { - int index = 0; - char *dupstr = strdup(arg); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && - strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && - strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && - strcasecmp(token, "BIGINT") != 0 && - strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { - argp_error(state, "Invalid data_type!"); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - break; - case 'w': - arguments->len_of_binary = atoi(arg); - break; - case 'm': - arguments->tb_prefix = arg; - break; - case 'M': - arguments->use_metric = true; - break; - case 'x': - arguments->insert_only = true; - break; - case 'c': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - break; - case 'O': - arguments->order = atoi(arg); - if (arguments->order > 1 || arguments->order < 0) - { - arguments->order = 0; - } else if (arguments->order == 1) - { - arguments->rate = 10; - } - break; - case 'R': - arguments->rate = atoi(arg); - if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) - { - arguments->rate = 10; - } - break; - case 'a': - arguments->replica = atoi(arg); - if (arguments->replica > 3 || arguments->replica < 1) - { - arguments->replica = 1; - } - break; - case 'D': - arguments->method_of_delete = atoi(arg); - if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) - { - arguments->method_of_delete = 0; - } - break; - case OPT_ABORT: - arguments->abort = 1; - break; - case ARGP_KEY_ARG: - /*arguments->arg_list = &state->argv[state->next-1]; - state->next = state->argc;*/ - argp_usage(state); - break; - - default: - return ARGP_ERR_UNKNOWN; - } - return 0; - } - - static struct argp argp = {options, parse_opt, 0, 0}; - - void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, arguments); - if (arguments->abort) { - #ifndef _ALPINE - error(10, 0, "ABORTED"); - #else - abort(); - #endif - } - } - -#else - void printHelp() { - char indent[10] = " "; - printf("%s%s\n", indent, "-h"); - printf("%s%s%s\n", indent, indent, "host, The host to connect to TDengine. Default is localhost."); - printf("%s%s\n", indent, "-p"); - printf("%s%s%s\n", indent, indent, "port, The TCP/IP port number to use for the connection. Default is 0."); - printf("%s%s\n", indent, "-u"); - printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'."); - printf("%s%s\n", indent, "-p"); - #ifdef _TD_POWER_ - printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'."); - #else - printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'."); - #endif - printf("%s%s\n", indent, "-d"); - printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'."); - printf("%s%s\n", indent, "-a"); - printf("%s%s%s\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 3."); - printf("%s%s\n", indent, "-m"); - printf("%s%s%s\n", indent, indent, "table_prefix, Table prefix name. Default is 't'."); - printf("%s%s\n", indent, "-s"); - printf("%s%s%s\n", indent, indent, "sql file, The select sql file."); - printf("%s%s\n", indent, "-M"); - printf("%s%s%s\n", indent, indent, "meteric, Use metric flag."); - printf("%s%s\n", indent, "-o"); - printf("%s%s%s\n", indent, indent, "outputfile, Direct output to the named file. Default is './output.txt'."); - printf("%s%s\n", indent, "-q"); - printf("%s%s%s\n", indent, indent, "query_mode, Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); - printf("%s%s\n", indent, "-b"); - printf("%s%s%s\n", indent, indent, "type_of_cols, data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'."); - printf("%s%s\n", indent, "-w"); - printf("%s%s%s\n", indent, indent, "length_of_binary, The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8"); - printf("%s%s\n", indent, "-l"); - printf("%s%s%s\n", indent, indent, "num_of_cols_per_record, The number of columns per record. Default is 3."); - printf("%s%s\n", indent, "-T"); - printf("%s%s%s\n", indent, indent, "num_of_threads, The number of threads. Default is 10."); - printf("%s%s\n", indent, "-r"); - printf("%s%s%s\n", indent, indent, "num_of_records_per_req, The number of records per request. Default is 1000."); - printf("%s%s\n", indent, "-t"); - printf("%s%s%s\n", indent, indent, "num_of_tables, The number of tables. Default is 10000."); - printf("%s%s\n", indent, "-n"); - printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000."); - printf("%s%s\n", indent, "-c"); - #ifdef _TD_POWER_ - printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'."); - #else - printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); - #endif - printf("%s%s\n", indent, "-x"); - printf("%s%s%s\n", indent, indent, "flag, Insert only flag."); - printf("%s%s\n", indent, "-O"); - printf("%s%s%s\n", indent, indent, "order, Insert mode--0: In order, 1: Out of order. Default is in order."); - printf("%s%s\n", indent, "-R"); - printf("%s%s%s\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50."); - printf("%s%s\n", indent, "-D"); - printf("%s%s%s\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); - } - - void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - char **sptr; - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-h") == 0) { - arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { - arguments->port = atoi(argv[++i]); - } else if (strcmp(argv[i], "-u") == 0) { - arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - arguments->password = argv[++i]; - } else if (strcmp(argv[i], "-o") == 0) { - arguments->output_file = argv[++i]; - } else if (strcmp(argv[i], "-s") == 0) { - arguments->sqlFile = argv[++i]; - } else if (strcmp(argv[i], "-q") == 0) { - arguments->mode = atoi(argv[++i]); - } else if (strcmp(argv[i], "-T") == 0) { - arguments->num_of_threads = atoi(argv[++i]); - } else if (strcmp(argv[i], "-r") == 0) { - arguments->num_of_RPR = atoi(argv[++i]); - } else if (strcmp(argv[i], "-t") == 0) { - arguments->num_of_tables = atoi(argv[++i]); - } else if (strcmp(argv[i], "-n") == 0) { - arguments->num_of_DPT = atoi(argv[++i]); - } else if (strcmp(argv[i], "-d") == 0) { - arguments->database = argv[++i]; - } else if (strcmp(argv[i], "-l") == 0) { - arguments->num_of_CPR = atoi(argv[++i]); - } else if (strcmp(argv[i], "-b") == 0) { - sptr = arguments->datatype; - ++i; - if (strstr(argv[i], ",") == NULL) { - if (strcasecmp(argv[i], "INT") != 0 && strcasecmp(argv[i], "FLOAT") != 0 && - strcasecmp(argv[i], "TINYINT") != 0 && strcasecmp(argv[i], "BOOL") != 0 && - strcasecmp(argv[i], "SMALLINT") != 0 && - strcasecmp(argv[i], "BIGINT") != 0 && strcasecmp(argv[i], "DOUBLE") != 0 && - strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); - printHelp(); - exit(EXIT_FAILURE); - } - sptr[0] = argv[i]; - } else { - int index = 0; - char *dupstr = strdup(argv[i]); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && - strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && - strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && - strcasecmp(token, "BIGINT") != 0 && - strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); - printHelp(); - exit(EXIT_FAILURE); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - } else if (strcmp(argv[i], "-w") == 0) { - arguments->len_of_binary = atoi(argv[++i]); - } else if (strcmp(argv[i], "-m") == 0) { - arguments->tb_prefix = argv[++i]; - } else if (strcmp(argv[i], "-M") == 0) { - arguments->use_metric = true; - } else if (strcmp(argv[i], "-x") == 0) { - arguments->insert_only = true; - } else if (strcmp(argv[i], "-c") == 0) { - strcpy(configDir, argv[++i]); - } else if (strcmp(argv[i], "-O") == 0) { - arguments->order = atoi(argv[++i]); - if (arguments->order > 1 || arguments->order < 0) { - arguments->order = 0; - } else if (arguments->order == 1) { - arguments->rate = 10; - } - } else if (strcmp(argv[i], "-R") == 0) { - arguments->rate = atoi(argv[++i]); - if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) { - arguments->rate = 10; - } - } else if (strcmp(argv[i], "-a") == 0) { - arguments->replica = atoi(argv[++i]); - if (arguments->rate > 3 || arguments->rate < 1) { - arguments->rate = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - arguments->method_of_delete = atoi(argv[++i]); - if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) { - arguments->method_of_delete = 0; - } - } else if (strcmp(argv[i], "--help") == 0) { - printHelp(); - exit(EXIT_FAILURE); - } else { - fprintf(stderr, "wrong options\n"); - printHelp(); - exit(EXIT_FAILURE); - } - } - } - -#endif +} SArguments; + +typedef struct SColumn_S { + char field[TSDB_COL_NAME_LEN + 1]; + char dataType[MAX_TB_NAME_SIZE]; + int dataLen; + char note[128]; +} StrColumn; + +typedef struct SSuperTable_S { + char sTblName[MAX_TB_NAME_SIZE+1]; + int childTblCount; + bool superTblExists; // 0: no, 1: yes + bool childTblExists; // 0: no, 1: yes + int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + char childTblPrefix[MAX_TB_NAME_SIZE]; + char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample + char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful + + int multiThreadWriteOneTbl; // 0: no, 1: yes + int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl + int rowsPerTbl; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms or us by database precision + int maxSqlLen; // + + int64_t insertRows; // 0: no limit + int timeStampStep; + char startTimestamp[MAX_TB_NAME_SIZE]; // + char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json + char sampleFile[MAX_FILE_NAME_LEN+1]; + char tagsFile[MAX_FILE_NAME_LEN+1]; + + int columnCount; + StrColumn columns[MAX_COLUMN_COUNT]; + int tagCount; + StrColumn tags[MAX_TAG_COUNT]; + + char* childTblName; + char* colsOfCreateChildTable; + int lenOfOneRow; + int lenOfTagOfOneRow; + + char* sampleDataBuf; + int sampleDataBufSize; + //int sampleRowCount; + //int sampleUsePos; + + int tagSource; // 0: rand, 1: tag sample + char* tagDataBuf; + int tagSampleCount; + int tagUsePos; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; +} SSuperTable; -/* ******************************* Structure - * definition******************************* */ -enum MODE { - SYNC, ASYNC -}; typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + char create_time[32]; + int32_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[32]; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[8]; // time resolution + int8_t update; + char status[16]; +} SDbInfo; + +typedef struct SDbCfg_S { +// int maxtablesPerVnode; + int minRows; + int maxRows; + int comp; + int walLevel; + int cacheLast; + int fsync; + int replica; + int update; + int keep; + int days; + int cache; + int blocks; + int quorum; + char precision[MAX_TB_NAME_SIZE]; +} SDbCfg; + +typedef struct SDataBase_S { + char dbName[MAX_DB_NAME_SIZE]; + int drop; // 0: use exists, 1: if exists, drop then new create + SDbCfg dbCfg; + int superTblCount; + SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; +} SDataBase; + +typedef struct SDbs_S { + char cfgDir[MAX_FILE_NAME_LEN+1]; + char host[MAX_DB_NAME_SIZE]; + uint16_t port; + char user[MAX_DB_NAME_SIZE]; + char password[MAX_DB_NAME_SIZE]; + char resultFile[MAX_FILE_NAME_LEN+1]; + bool use_metric; + bool insert_only; + bool do_aggreFunc; + bool queryMode; + + int threadCount; + int threadCountByCreateTbl; + int dbCount; + SDataBase db[MAX_DB_COUNT]; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; + +} SDbs; + +typedef struct SuperQueryInfo_S { + int rate; // 0: unlimit > 0 loop/s + int concurrent; + int sqlCount; + int subscribeMode; // 0: sync, 1: async + int subscribeInterval; // ms + int subscribeRestart; + int subscribeKeepProgress; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; +} SuperQueryInfo; + +typedef struct SubQueryInfo_S { + char sTblName[MAX_TB_NAME_SIZE+1]; + int rate; // 0: unlimit > 0 loop/s + int threadCnt; + int subscribeMode; // 0: sync, 1: async + int subscribeInterval; // ms + int subscribeRestart; + int subscribeKeepProgress; + int childTblCount; + char childTblPrefix[MAX_TB_NAME_SIZE]; + int sqlCount; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + + char* childTblName; +} SubQueryInfo; + +typedef struct SQueryMetaInfo_S { + char cfgDir[MAX_FILE_NAME_LEN+1]; + char host[MAX_DB_NAME_SIZE]; + uint16_t port; + char user[MAX_DB_NAME_SIZE]; + char password[MAX_DB_NAME_SIZE]; + char dbName[MAX_DB_NAME_SIZE+1]; + char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful + + SuperQueryInfo superQueryInfo; + SubQueryInfo subQueryInfo; +} SQueryMetaInfo; + +typedef struct SThreadInfo_S { TAOS *taos; int threadID; - char db_name[MAX_DB_NAME_SIZE]; + char db_name[MAX_DB_NAME_SIZE+1]; char fp[4096]; - char **datatype; - int len_of_binary; char tb_prefix[MAX_TB_NAME_SIZE]; int start_table_id; int end_table_id; - int ncols_per_record; - int nrecords_per_table; - int nrecords_per_request; - int data_of_order; int data_of_rate; - int64_t start_time; - bool do_aggreFunc; - + uint64_t start_time; char* cols; - bool use_metric; + bool use_metric; + SSuperTable* superTblInfo; - tsem_t mutex_sem; - int notFinished; + // for async insert tsem_t lock_sem; - int counter; + int64_t counter; + uint64_t st; + uint64_t et; + int64_t lastTs; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; - // insert delay statitics + // insert delay statistics int64_t cntDelay; int64_t totalDelay; int64_t avgDelay; int64_t maxDelay; int64_t minDelay; -} info; - -typedef struct { - TAOS *taos; - - char tb_name[MAX_TB_NAME_SIZE]; - int64_t timestamp; - int target; - int counter; - int nrecords_per_request; - int ncols_per_record; - char **data_type; - int len_of_binary; - int data_of_order; - int data_of_rate; - - tsem_t *mutex_sem; - int *notFinished; - tsem_t *lock_sem; -} sTable; +} threadInfo; -/* ******************************* Global - * variables******************************* */ -char *aggreFunc[] = {"*", "count(*)", "avg(f1)", "sum(f1)", "max(f1)", "min(f1)", "first(f1)", "last(f1)"}; - - -void queryDB(TAOS *taos, char *command); - -void *readTable(void *sarg); +#ifdef WINDOWS +#include +// Some old MinGW/CYGWIN distributions don't define this: +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif -void *readMetric(void *sarg); +static HANDLE g_stdoutHandle; +static DWORD g_consoleMode; -void *syncWrite(void *sarg); +void setupForAnsiEscape(void) { + DWORD mode = 0; + g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); -void *deleteTable(); + if(g_stdoutHandle == INVALID_HANDLE_VALUE) { + exit(GetLastError()); + } -void *asyncWrite(void *sarg); + if(!GetConsoleMode(g_stdoutHandle, &mode)) { + exit(GetLastError()); + } -int generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary); + g_consoleMode = mode; -void rand_string(char *str, int size); + // Enable ANSI escape codes + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; -void init_rand_data(); + if(!SetConsoleMode(g_stdoutHandle, mode)) { + exit(GetLastError()); + } +} -double getCurrentTime(); +void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); -void callBack(void *param, TAOS_RES *res, int code); -void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass); -void querySqlFile(TAOS* taos, char* sqlFile); + // Reset console mode + if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { + exit(GetLastError()); + } +} +#else +void setupForAnsiEscape(void) {} -int main(int argc, char *argv[]) { - SDemoArguments arguments = { NULL, // host - 0, // port - "root", // user - #ifdef _TD_POWER_ - "powerdb", // password - #else - "taosdata", // password - #endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, - false, // use_metric - false, // insert_only - "./output.txt", // output_file - 0, // mode - { - "int", // datatype - "int", - "int", - "int", - "int", - "int", - "int", - "float" - }, - 8, // len_of_binary - 1, // num_of_CPR - 1, // num_of_connections/thread - 1, // num_of_RPR - 1, // num_of_tables - 50000, // num_of_DPT - 0, // abort - 0, // order - 0, // rate - 0, // method_of_delete - NULL // arg_list - }; - - /* Parse our arguments; every option seen by parse_opt will be - reflected in arguments. */ - // For demo use, change default values for some parameters; - arguments.num_of_tables = 10000; - arguments.num_of_CPR = 3; - arguments.num_of_threads = 10; - arguments.num_of_DPT = 100000; - arguments.num_of_RPR = 1000; - arguments.use_metric = true; - arguments.insert_only = false; - // end change - - parse_args(argc, argv, &arguments); - - enum MODE query_mode = arguments.mode; - char *ip_addr = arguments.host; - uint16_t port = arguments.port; - char *user = arguments.user; - char *pass = arguments.password; - char *db_name = arguments.database; - char *tb_prefix = arguments.tb_prefix; - int len_of_binary = arguments.len_of_binary; - int ncols_per_record = arguments.num_of_CPR; - int order = arguments.order; - int rate = arguments.rate; - int method_of_delete = arguments.method_of_delete; - int ntables = arguments.num_of_tables; - int threads = arguments.num_of_threads; - int nrecords_per_table = arguments.num_of_DPT; - int nrecords_per_request = arguments.num_of_RPR; - bool use_metric = arguments.use_metric; - bool insert_only = arguments.insert_only; - char **data_type = arguments.datatype; - int count_data_type = 0; - char dataString[STRING_LEN]; - bool do_aggreFunc = true; - int replica = arguments.replica; +void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); +} +#endif - if (NULL != arguments.sqlFile) { - TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port); - querySqlFile(qtaos, arguments.sqlFile); - taos_close(qtaos); - return 0; - } - init_rand_data(); +static int createDatabases(); +static void createChildTables(); +static int queryDbExec(TAOS *taos, char *command, int type); - memset(dataString, 0, STRING_LEN); - int len = 0; +/* ************ Global variables ************ */ - if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { - do_aggreFunc = false; - } - for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) { - if (data_type[count_data_type] == NULL) { - break; - } +int32_t randint[MAX_PREPARED_RAND]; +int64_t randbigint[MAX_PREPARED_RAND]; +float randfloat[MAX_PREPARED_RAND]; +double randdouble[MAX_PREPARED_RAND]; +char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", + "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; + +SArguments g_args = { + NULL, // metaFile + 0, // test_mode + "127.0.0.1", // host + 6030, // port + "root", // user + #ifdef _TD_POWER_ + "powerdb", // password + #else + "taosdata", // password + #endif + "test", // database + 1, // replica + "t", // tb_prefix + NULL, // sqlFile + false, // use_metric + false, // insert_only + false, // debug_print + false, // answer_yes; + "./output.txt", // output_file + 0, // mode : sync or async + { + "TINYINT", // datatype + "SMALLINT", + "INT", + "BIGINT", + "FLOAT", + "DOUBLE", + "BINARY", + "NCHAR", + "BOOL", + "TIMESTAMP" + }, + 16, // len_of_binary + 10, // num_of_CPR + 10, // num_of_connections/thread + 0, // insert_interval + 100, // num_of_RPR + 10000, // num_of_tables + 10000, // num_of_DPT + 0, // abort + 0, // disorderRatio + 1000, // disorderRange + 1, // method_of_delete + NULL // arg_list +}; - len += snprintf(dataString + len, STRING_LEN - len, "%s ", data_type[count_data_type]); - } - FILE *fp = fopen(arguments.output_file, "a"); - if (NULL == fp) { - fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file); - return 1; - }; - - time_t tTime = time(NULL); - struct tm tm = *localtime(&tTime); - printf("###################################################################\n"); - printf("# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); - printf("# User: %s\n", user); - printf("# Password: %s\n", pass); - printf("# Use metric: %s\n", use_metric ? "true" : "false"); - printf("# Datatype of Columns: %s\n", dataString); - printf("# Binary Length(If applicable): %d\n", - (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1); - printf("# Number of Columns per record: %d\n", ncols_per_record); - printf("# Number of Threads: %d\n", threads); - printf("# Number of Tables: %d\n", ntables); - printf("# Number of Data per Table: %d\n", nrecords_per_table); - printf("# Records/Request: %d\n", nrecords_per_request); - printf("# Database name: %s\n", db_name); - printf("# Table prefix: %s\n", tb_prefix); - if (order == 1) - { - printf("# Data order: %d\n", order); - printf("# Data out of order rate: %d\n", rate); - - } - printf("# Delete method: %d\n", method_of_delete); - printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - printf("###################################################################\n\n"); - printf("Press enter key to continue"); - (void)getchar(); - - fprintf(fp, "###################################################################\n"); - fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); - fprintf(fp, "# User: %s\n", user); - fprintf(fp, "# Password: %s\n", pass); - fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false"); - fprintf(fp, "# Datatype of Columns: %s\n", dataString); - fprintf(fp, "# Binary Length(If applicable): %d\n", - (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1); - fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record); - fprintf(fp, "# Number of Threads: %d\n", threads); - fprintf(fp, "# Number of Tables: %d\n", ntables); - fprintf(fp, "# Number of Data per Table: %d\n", nrecords_per_table); - fprintf(fp, "# Records/Request: %d\n", nrecords_per_request); - fprintf(fp, "# Database name: %s\n", db_name); - fprintf(fp, "# Table prefix: %s\n", tb_prefix); - if (order == 1) - { - printf("# Data order: %d\n", order); - printf("# Data out of order rate: %d\n", rate); - - } - fprintf(fp, "# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - fprintf(fp, "###################################################################\n\n"); - fprintf(fp, "| WRecords | Records/Second | Requests/Second | WLatency(ms) |\n"); - - taos_init(); - TAOS *taos = taos_connect(ip_addr, user, pass, NULL, port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - return 1; - } - char command[BUFFER_SIZE] = "\0"; - sprintf(command, "drop database %s;", db_name); - TAOS_RES* res = taos_query(taos, command); - taos_free_result(res); +static SDbs g_Dbs; +static int g_totalChildTables = 0; +static SQueryMetaInfo g_queryInfo; +static FILE * g_fpOfInsertResult = NULL; - sprintf(command, "create database %s replica %d;", db_name, replica); - res = taos_query(taos, command); - taos_free_result(res); +#define debugPrint(fmt, ...) \ + do { if (g_args.debug_print) fprintf(stderr, fmt, __VA_ARGS__); } while(0) +/////////////////////////////////////////////////// - char cols[STRING_LEN] = "\0"; - int colIndex = 0; - len = 0; +void printHelp() { + char indent[10] = " "; + printf("%s%s%s%s\n", indent, "-f", indent, + "The meta file to the execution procedure. Default is './meta.json'."); + printf("%s%s%s%s\n", indent, "-u", indent, + "The TDengine user name to use when connecting to the server. Default is 'root'."); +#ifdef _TD_POWER_ + printf("%s%s%s%s\n", indent, "-P", indent, + "The password to use when connecting to the server. Default is 'powerdb'."); + printf("%s%s%s%s\n", indent, "-c", indent, + "Configuration directory. Default is '/etc/power/'."); +#else + printf("%s%s%s%s\n", indent, "-P", indent, + "The password to use when connecting to the server. Default is 'taosdata'."); + printf("%s%s%s%s\n", indent, "-c", indent, + "Configuration directory. Default is '/etc/taos/'."); +#endif + printf("%s%s%s%s\n", indent, "-h", indent, + "The host to connect to TDengine. Default is localhost."); + printf("%s%s%s%s\n", indent, "-p", indent, + "The TCP/IP port number to use for the connection. Default is 0."); + printf("%s%s%s%s\n", indent, "-d", indent, + "Destination database. Default is 'test'."); + printf("%s%s%s%s\n", indent, "-a", indent, + "Set the replica parameters of the database, Default 1, min: 1, max: 3."); + printf("%s%s%s%s\n", indent, "-m", indent, + "Table prefix name. Default is 't'."); + printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file."); + printf("%s%s%s%s\n", indent, "-M", indent, "Use metric flag."); + printf("%s%s%s%s\n", indent, "-o", indent, + "Direct output to the named file. Default is './output.txt'."); + printf("%s%s%s%s\n", indent, "-q", indent, + "Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); + printf("%s%s%s%s\n", indent, "-b", indent, + "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP."); + printf("%s%s%s%s\n", indent, "-w", indent, + "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); + printf("%s%s%s%s\n", indent, "-l", indent, + "The number of columns per record. Default is 10."); + printf("%s%s%s%s\n", indent, "-T", indent, + "The number of threads. Default is 10."); + printf("%s%s%s%s\n", indent, "-i", indent, + "The sleep time (ms) between insertion. Default is 0."); + printf("%s%s%s%s\n", indent, "-r", indent, + "The number of records per request. Default is 100."); + printf("%s%s%s%s\n", indent, "-t", indent, + "The number of tables. Default is 10000."); + printf("%s%s%s%s\n", indent, "-n", indent, + "The number of records per table. Default is 10000."); + printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag."); + printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt."); + printf("%s%s%s%s\n", indent, "-O", indent, + "Insert mode--0: In order, > 0: disorder ratio. Default is in order."); + printf("%s%s%s%s\n", indent, "-R", indent, + "Out of order data's range, ms, default is 1000."); + printf("%s%s%s%s\n", indent, "-g", indent, + "Print debug info."); +/* printf("%s%s%s%s\n", indent, "-D", indent, + "if elete database if exists. 0: no, 1: yes, default is 1"); + */ +} - for (; colIndex < ncols_per_record - 1; colIndex++) { - if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0) { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); +void parse_args(int argc, char *argv[], SArguments *arguments) { + char **sptr; + wordexp_t full_path; + + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-f") == 0) { + arguments->metaFile = argv[++i]; + } else if (strcmp(argv[i], "-c") == 0) { + char *configPath = argv[++i]; + if (wordexp(configPath, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", configPath); + return; + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + } else if (strcmp(argv[i], "-h") == 0) { + arguments->host = argv[++i]; + } else if (strcmp(argv[i], "-p") == 0) { + arguments->port = atoi(argv[++i]); + } else if (strcmp(argv[i], "-u") == 0) { + arguments->user = argv[++i]; + } else if (strcmp(argv[i], "-P") == 0) { + arguments->password = argv[++i]; + } else if (strcmp(argv[i], "-o") == 0) { + arguments->output_file = argv[++i]; + } else if (strcmp(argv[i], "-s") == 0) { + arguments->sqlFile = argv[++i]; + } else if (strcmp(argv[i], "-q") == 0) { + arguments->mode = atoi(argv[++i]); + } else if (strcmp(argv[i], "-T") == 0) { + arguments->num_of_threads = atoi(argv[++i]); + } else if (strcmp(argv[i], "-i") == 0) { + arguments->insert_interval = atoi(argv[++i]); + } else if (strcmp(argv[i], "-r") == 0) { + arguments->num_of_RPR = atoi(argv[++i]); + } else if (strcmp(argv[i], "-t") == 0) { + arguments->num_of_tables = atoi(argv[++i]); + } else if (strcmp(argv[i], "-n") == 0) { + arguments->num_of_DPT = atoi(argv[++i]); + } else if (strcmp(argv[i], "-d") == 0) { + arguments->database = argv[++i]; + } else if (strcmp(argv[i], "-l") == 0) { + arguments->num_of_CPR = atoi(argv[++i]); + } else if (strcmp(argv[i], "-b") == 0) { + sptr = arguments->datatype; + ++i; + if (strstr(argv[i], ",") == NULL) { + // only one col + if (strcasecmp(argv[i], "INT") + && strcasecmp(argv[i], "FLOAT") + && strcasecmp(argv[i], "TINYINT") + && strcasecmp(argv[i], "BOOL") + && strcasecmp(argv[i], "SMALLINT") + && strcasecmp(argv[i], "BIGINT") + && strcasecmp(argv[i], "DOUBLE") + && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "NCHAR")) { + fprintf(stderr, "Invalid data_type!\n"); + printHelp(); + exit(EXIT_FAILURE); + } + sptr[0] = argv[i]; + } else { + // more than one col + int index = 0; + char *dupstr = strdup(argv[i]); + char *running = dupstr; + char *token = strsep(&running, ","); + while (token != NULL) { + if (strcasecmp(token, "INT") + && strcasecmp(token, "FLOAT") + && strcasecmp(token, "TINYINT") + && strcasecmp(token, "BOOL") + && strcasecmp(token, "SMALLINT") + && strcasecmp(token, "BIGINT") + && strcasecmp(token, "DOUBLE") + && strcasecmp(token, "BINARY") + && strcasecmp(token, "NCHAR")) { + fprintf(stderr, "Invalid data_type!\n"); + printHelp(); + exit(EXIT_FAILURE); + } + sptr[index++] = token; + token = strsep(&running, ","); + if (index >= MAX_NUM_DATATYPE) break; + } + sptr[index] = NULL; + } + } else if (strcmp(argv[i], "-w") == 0) { + arguments->len_of_binary = atoi(argv[++i]); + } else if (strcmp(argv[i], "-m") == 0) { + arguments->tb_prefix = argv[++i]; + } else if (strcmp(argv[i], "-M") == 0) { + arguments->use_metric = true; + } else if (strcmp(argv[i], "-x") == 0) { + arguments->insert_only = true; + } else if (strcmp(argv[i], "-y") == 0) { + arguments->answer_yes = true; + } else if (strcmp(argv[i], "-g") == 0) { + arguments->debug_print = true; + } else if (strcmp(argv[i], "-c") == 0) { + strcpy(configDir, argv[++i]); + } else if (strcmp(argv[i], "-O") == 0) { + arguments->disorderRatio = atoi(argv[++i]); + if (arguments->disorderRatio > 1 + || arguments->disorderRatio < 0) { + arguments->disorderRatio = 0; + } else if (arguments->disorderRatio == 1) { + arguments->disorderRange = 10; + } + } else if (strcmp(argv[i], "-R") == 0) { + arguments->disorderRange = atoi(argv[++i]); + if (arguments->disorderRange == 1 + && (arguments->disorderRange > 50 + || arguments->disorderRange <= 0)) { + arguments->disorderRange = 10; + } + } else if (strcmp(argv[i], "-a") == 0) { + arguments->replica = atoi(argv[++i]); + if (arguments->replica > 3 || arguments->replica < 1) { + arguments->replica = 1; + } + } else if (strcmp(argv[i], "-D") == 0) { + arguments->method_of_delete = atoi(argv[++i]); + if (arguments->method_of_delete < 0 + || arguments->method_of_delete > 3) { + arguments->method_of_delete = 0; + } + } else if (strcmp(argv[i], "--help") == 0) { + printHelp(); + exit(0); } else { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + fprintf(stderr, "wrong options\n"); + printHelp(); + exit(EXIT_FAILURE); } } - if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0){ - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); - } else { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + if (arguments->debug_print) { + printf("###################################################################\n"); + printf("# meta file: %s\n", arguments->metaFile); + printf("# Server IP: %s:%hu\n", + arguments->host == NULL ? "localhost" : arguments->host, + arguments->port ); + printf("# User: %s\n", arguments->user); + printf("# Password: %s\n", arguments->password); + printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false"); + if (*(arguments->datatype)) { + printf("# Specified data type: "); + for (int i = 0; i < MAX_NUM_DATATYPE; i++) + if (arguments->datatype[i]) + printf("%s,", arguments->datatype[i]); + else + break; + printf("\n"); + } + printf("# Insertion interval: %d\n", arguments->insert_interval); + printf("# Number of Columns per record: %d\n", arguments->num_of_RPR); + printf("# Number of Threads: %d\n", arguments->num_of_threads); + printf("# Number of Tables: %d\n", arguments->num_of_tables); + printf("# Number of Data per Table: %d\n", arguments->num_of_DPT); + printf("# Database name: %s\n", arguments->database); + printf("# Table prefix: %s\n", arguments->tb_prefix); + if (arguments->disorderRatio) { + printf("# Data order: %d\n", arguments->disorderRatio); + printf("# Data out of order rate: %d\n", arguments->disorderRange); + + } + printf("# Delete method: %d\n", arguments->method_of_delete); + printf("# Answer yes when prompt: %d\n", arguments->answer_yes); + printf("# Print debug info: %d\n", arguments->debug_print); + printf("###################################################################\n"); + if (!arguments->answer_yes) { + printf("Press enter key to continue\n\n"); + (void) getchar(); + } } +} - if (use_metric) { - /* Create metric table */ - printf("Creating meters super table...\n"); - snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s) tags (areaid int, loc binary(10))", db_name, cols); - queryDB(taos, command); - printf("meters created!\n"); +static bool getInfoFromJsonFile(char* file); +//static int generateOneRowDataForStb(SSuperTable* stbInfo); +//static int getDataIntoMemForStb(SSuperTable* stbInfo); +static void init_rand_data(); +void tmfclose(FILE *fp) { + if (NULL != fp) { + fclose(fp); } - taos_close(taos); - - /* Wait for table to create */ - multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass); - - /* Insert data */ - double ts = getCurrentTime(); - printf("Inserting data......\n"); - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - info *infos = malloc(threads * sizeof(info)); - - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(info)); +} - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; +void tmfree(char *buf) { + if (NULL != buf) { + free(buf); } +} - int b = 0; - if (threads != 0) - b = ntables % threads; - int last = 0; - for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE); - t_info->datatype = data_type; - t_info->ncols_per_record = ncols_per_record; - t_info->nrecords_per_table = nrecords_per_table; - t_info->start_time = 1500000000000; - t_info->taos = taos_connect(ip_addr, user, pass, db_name, port); - t_info->len_of_binary = len_of_binary; - t_info->nrecords_per_request = nrecords_per_request; - t_info->start_table_id = last; - t_info->data_of_order = order; - t_info->data_of_rate = rate; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->counter = 0; - t_info->minDelay = INT16_MAX; - - tsem_init(&(t_info->mutex_sem), 0, 1); - t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; - tsem_init(&(t_info->lock_sem), 0, 0); +static int queryDbExec(TAOS *taos, char *command, int type) { + int i; + TAOS_RES *res = NULL; + int32_t code = -1; - if (query_mode == SYNC) { - pthread_create(pids + i, NULL, syncWrite, t_info); - } else { - pthread_create(pids + i, NULL, asyncWrite, t_info); + for (i = 0; i < 5; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; } - } - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - double t = getCurrentTime() - ts; - if (query_mode == SYNC) { - printf("SYNC Insert with %d connections:\n", threads); - } else { - printf("ASYNC Insert with %d connections:\n", threads); + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } } - fprintf(fp, "|%"PRIu64" | %10.2f | %10.2f | %10.4f |\n\n", - (int64_t)ntables * nrecords_per_table, ntables * nrecords_per_table / t, - ((int64_t)ntables * nrecords_per_table) / (t * nrecords_per_request), - t * 1000); - - printf("Spent %.4f seconds to insert %"PRIu64" records with %d record(s) per request: %.2f records/second\n", - t, (int64_t)ntables * nrecords_per_table, nrecords_per_request, - (int64_t)ntables * nrecords_per_table / t); - - int64_t totalDelay = 0; - int64_t maxDelay = 0; - int64_t minDelay = INT16_MAX; - int64_t cntDelay = 0; - double avgDelay = 0; - for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - taos_close(t_info->taos); - tsem_destroy(&(t_info->mutex_sem)); - tsem_destroy(&(t_info->lock_sem)); - - totalDelay += t_info->totalDelay; - cntDelay += t_info->cntDelay; - if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay; - if (t_info->minDelay < minDelay) minDelay = t_info->minDelay; + if (code != 0) { + debugPrint("DEBUG %s() LN%d - command: %s\n", __func__, __LINE__, command); + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); + taos_free_result(res); + //taos_close(taos); + return -1; } - avgDelay = (double)totalDelay / cntDelay; - - fprintf(fp, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); - - printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); - free(pids); - free(infos); - fclose(fp); - - if (method_of_delete != 0) - { - TAOS *dtaos = taos_connect(ip_addr, user, pass, db_name, port); - double dts = getCurrentTime(); - printf("Deleteing %d table(s)......\n", ntables); + if (INSERT_TYPE == type) { + int affectedRows = taos_affected_rows(res); + taos_free_result(res); + return affectedRows; + } + + taos_free_result(res); + return 0; +} - switch (method_of_delete) - { - case 1: - // delete by table - /* Create all the tables; */ - for (int i = 0; i < ntables; i++) { - sprintf(command, "drop table %s.%s%d;", db_name, tb_prefix, i); - queryDB(dtaos, command); - } - break; - case 2: - // delete by stable - if (!use_metric) { - break; - } - else - { - sprintf(command, "drop table %s.meters;", db_name); - queryDB(dtaos, command); - } - break; - case 3: - // delete by database - sprintf(command, "drop database %s;", db_name); - queryDB(dtaos, command); - break; - default: - break; +static void getResult(TAOS_RES *res, char* resultFileName) { + TAOS_ROW row = NULL; + int num_rows = 0; + int num_fields = taos_field_count(res); + TAOS_FIELD *fields = taos_fetch_fields(res); + + FILE *fp = NULL; + if (resultFileName[0] != 0) { + fp = fopen(resultFileName, "at"); + if (fp == NULL) { + fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName); } - - printf("Table(s) droped!\n"); - taos_close(dtaos); - - double dt = getCurrentTime() - dts; - printf("Spent %.4f seconds to drop %d tables\n", dt, ntables); - - FILE *fp = fopen(arguments.output_file, "a"); - fprintf(fp, "Spent %.4f seconds to drop %d tables\n", dt, ntables); - fclose(fp); - } + char* databuf = (char*) calloc(1, 100*1024*1024); + if (databuf == NULL) { + fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n"); + fclose(fp); + return ; + } - if (false == insert_only) { - // query data - pthread_t read_id; - info *rInfo = malloc(sizeof(info)); - rInfo->start_time = 1500000000000; - rInfo->start_table_id = 0; - rInfo->end_table_id = ntables - 1; - rInfo->do_aggreFunc = do_aggreFunc; - rInfo->nrecords_per_table = nrecords_per_table; - rInfo->taos = taos_connect(ip_addr, user, pass, db_name, port); - strcpy(rInfo->tb_prefix, tb_prefix); - strcpy(rInfo->fp, arguments.output_file); + int totalLen = 0; + char temp[16000]; - if (!use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); + // fetch the records row by row + while ((row = taos_fetch_row(res))) { + if (totalLen >= 100*1024*1024 - 32000) { + if (fp) fprintf(fp, "%s", databuf); + totalLen = 0; + memset(databuf, 0, 100*1024*1024); } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - free(rInfo); + num_rows++; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + //printf("query result:%s\n", temp); + memcpy(databuf + totalLen, temp, len); + totalLen += len; } - taos_cleanup(); - return 0; + if (fp) fprintf(fp, "%s", databuf); + tmfclose(fp); + free(databuf); } -#define MAX_SQL_SIZE 65536 -void selectSql(TAOS* taos, char* sqlcmd) -{ - TAOS_RES *pSql = taos_query(taos, sqlcmd); - int32_t code = taos_errno(pSql); - - if (code != 0) { - printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql)); - taos_free_result(pSql); - exit(1); +static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { + TAOS_RES *res = taos_query(taos, command); + if (res == NULL || taos_errno(res) != 0) { + printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); + taos_free_result(res); + return; } + getResult(res, resultFileName); + taos_free_result(res); +} + +double getCurrentTime() { + struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { + perror("Failed to get current time in ms"); + return 0.0; + } + + return tv.tv_sec + tv.tv_usec / 1E6; +} + +static int32_t rand_bool(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 2; +} + +static int32_t rand_tinyint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 128; +} + +static int32_t rand_smallint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 32767; +} + +static int32_t rand_int(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor]; +} + +static int64_t rand_bigint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randbigint[cursor]; + +} + +static float rand_float(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randfloat[cursor]; +} + +static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; +void rand_string(char *str, int size) { + str[0] = 0; + if (size > 0) { + //--size; + int n; + for (n = 0; n < size; n++) { + int key = rand_tinyint() % (int)(sizeof(charset) - 1); + str[n] = charset[key]; + } + str[n] = 0; + } +} + +static double rand_double() { + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randdouble[cursor]; + +} + +static void init_rand_data() { + for (int i = 0; i < MAX_PREPARED_RAND; i++){ + randint[i] = (int)(rand() % 65535); + randbigint[i] = (int64_t)(rand() % 2147483648); + randfloat[i] = (float)(rand() / 1000.0); + randdouble[i] = (double)(rand() / 1000000.0); + } +} + +#define SHOW_PARSE_RESULT_START() \ + do { if (g_args.metaFile) \ + printf("\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_END() \ + do { if (g_args.metaFile) \ + printf("\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \ + do { if (g_args.metaFile) \ + fprintf(fp, "\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \ + do { if (g_args.metaFile) \ + fprintf(fp, "\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ + g_args.metaFile); } while(0) + +static int printfInsertMeta() { + SHOW_PARSE_RESULT_START(); + + printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); + printf("user: \033[33m%s\033[0m\n", g_Dbs.user); + printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); + printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); + printf("insert interval: \033[33m%d\033[0m\n", g_args.insert_interval); + printf("number of records per req: \033[33m%d\033[0m\n", g_args.num_of_RPR); + + printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { + printf("database[\033[33m%d\033[0m]:\n", i); + printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + printf(" drop: \033[33mno\033[0m\n"); + }else { + printf(" drop: \033[33myes\033[0m\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + } else { + printf("\033[1m\033[40;31m precision error: %s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); + return -1; + } + } + + printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%d\033[0m]:\n", j); + + printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); + } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" childTblExists: \033[33m%s\033[0m\n", "error"); + } + + printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); + printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); + printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); + printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); + + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + }else { + printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + } + printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); + printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); + printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange); + printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); + printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + + printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); + printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp); + printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat); + printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile); + printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); + + printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { + printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + printf("column[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + printf("\n"); + + printf(" tagCount: \033[33m%d\033[0m\n ", + g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { + printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + printf("tag[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + printf("\n"); + } + printf("\n"); + } + + SHOW_PARSE_RESULT_END(); + + return 0; +} + +static void printfInsertMetaToFile(FILE* fp) { + SHOW_PARSE_RESULT_START_TO_FILE(fp); + + fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); + fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "password: %s\n", g_Dbs.password); + fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); + fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); + fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); + + fprintf(fp, "database count: %d\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { + fprintf(fp, "database[%d]:\n", i); + fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + fprintf(fp, " drop: no\n"); + }else { + fprintf(fp, " drop: yes\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); + } else { + fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); + } + } + + fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + fprintf(fp, " super table[%d]:\n", j); + + fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "no"); + } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "yes"); + } else { + fprintf(fp, " autoCreateTable: %s\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "yes"); + } else { + fprintf(fp, " childTblExists: %s\n", "error"); + } + + fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount); + fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); + fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); + fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); + fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); + + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + fprintf(fp, " multiThreadWriteOneTbl: no\n"); + }else { + fprintf(fp, " multiThreadWriteOneTbl: yes\n"); + } + fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); + fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); + fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); + fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + + fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); + fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); + + fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { + fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + fprintf(fp, "\n"); + + fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { + fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + fprintf(fp, "\n"); + } + fprintf(fp, "\n"); + } + SHOW_PARSE_RESULT_END_TO_FILE(fp); +} + +static void printfQueryMeta() { + SHOW_PARSE_RESULT_START(); + printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port); + printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); + printf("password: \033[33m%s\033[0m\n", g_queryInfo.password); + printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); + + printf("\n"); + printf("specified table query info: \n"); + printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); + printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + + if (SUBSCRIBE_MODE == g_args.test_mode) { + printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); + printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); + } + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); + } + printf("\n"); + printf("super table query info: \n"); + printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); + printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName); + + if (SUBSCRIBE_MODE == g_args.test_mode) { + printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); + printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress); + } + + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount); + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]); + } + printf("\n"); + + SHOW_PARSE_RESULT_END(); +} + + +static char* xFormatTimestamp(char* buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + } else { + tt = (time_t)(val / 1000); + } + +/* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + + struct tm* ptm = localtime(&tt); + size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } + + return buf; +} + +static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { + if (val == NULL) { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + return; + } + + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", *((int8_t *)val)); + break; + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", *((int16_t *)val)); + break; + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int32_t *)val)); + break; + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%" PRId64, *((int64_t *)val)); + break; + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(buf, val, length); + buf[length] = 0; + fprintf(fp, "\'%s\'", buf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + xFormatTimestamp(buf, *(int64_t*)val, precision); + fprintf(fp, "'%s'", buf); + break; + default: + break; + } +} + +static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } + + FILE* fp = fopen(fname, "at"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to open file: %s\n", fname); + return -1; + } + + int num_fields = taos_num_fields(tres); + TAOS_FIELD *fields = taos_fetch_fields(tres); + int precision = taos_result_precision(tres); + + for (int col = 0; col < num_fields; col++) { + if (col > 0) { + fprintf(fp, ","); + } + fprintf(fp, "%s", fields[col].name); + } + fputc('\n', fp); + + int numOfRows = 0; + do { + int32_t* length = taos_fetch_lengths(tres); + for (int i = 0; i < num_fields; i++) { + if (i > 0) { + fputc(',', fp); + } + xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); + } + fputc('\n', fp); + + numOfRows++; + row = taos_fetch_row(tres); + } while( row != NULL); + + fclose(fp); + + return numOfRows; +} + +static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { + TAOS_RES * res; + TAOS_ROW row = NULL; int count = 0; - while (taos_fetch_row(pSql) != NULL) { + + res = taos_query(taos, "show databases;"); + int32_t code = taos_errno(res); + + if (code != 0) { + fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + + while ((row = taos_fetch_row(res)) != NULL) { + // sys database name : 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); + return -1; + } + + tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); + dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + tstrncpy(dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + count++; + if (count > MAX_DATABASE_COUNT) { + fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); + break; + } + } + + return count; +} + +static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { + if (filename[0] == 0) + return; + + FILE *fp = fopen(filename, "at"); + if (fp == NULL) { + fprintf(stderr, "failed to open file: %s\n", filename); + return; + } + + fprintf(fp, "================ database[%d] ================\n", index); + fprintf(fp, "name: %s\n", dbInfos->name); + fprintf(fp, "created_time: %s\n", dbInfos->create_time); + fprintf(fp, "ntables: %d\n", dbInfos->ntables); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "replica: %d\n", dbInfos->replica); + fprintf(fp, "quorum: %d\n", dbInfos->quorum); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "cache(MB): %d\n", dbInfos->cache); + fprintf(fp, "blocks: %d\n", dbInfos->blocks); + fprintf(fp, "minrows: %d\n", dbInfos->minrows); + fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); + fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); + fprintf(fp, "fsync: %d\n", dbInfos->fsync); + fprintf(fp, "comp: %d\n", dbInfos->comp); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "update: %d\n", dbInfos->update); + fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "\n"); + + fclose(fp); +} + +static void printfQuerySystemInfo(TAOS * taos) { + char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; + char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + TAOS_RES* res; + + time_t t; + struct tm* lt; + time(&t); + lt = localtime(&t); + snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); + + // show variables + res = taos_query(taos, "show variables;"); + //getResult(res, filename); + xDumpResultToFile(filename, res); + + // show dnodes + res = taos_query(taos, "show dnodes;"); + xDumpResultToFile(filename, res); + //getResult(res, filename); + + // show databases + res = taos_query(taos, "show databases;"); + SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + int dbCount = getDbFromServer(taos, dbInfos); + if (dbCount <= 0) { + free(dbInfos); + return; + } + + for (int i = 0; i < dbCount; i++) { + // printf database info + printfDbInfoForQueryToFile(filename, dbInfos[i], i); + + // show db.vgroups + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + // show db.stables + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + free(dbInfos[i]); + } + + free(dbInfos); + +} + +void ERROR_EXIT(const char *msg) { perror(msg); exit(0); } + +int postProceSql(char* host, uint16_t port, char* sqlstr) +{ + char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\n%s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; + + char *url = "/rest/sql"; + char *auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ=="; + + struct hostent *server; + struct sockaddr_in serv_addr; + int sockfd, bytes, sent, received, req_str_len, resp_len; + char *request_buf; + char response_buf[RESP_BUF_LEN]; + uint16_t rest_port = port + TSDB_PORT_HTTP; + + int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; + + request_buf = malloc(req_buf_len); + if (NULL == request_buf) + ERROR_EXIT("ERROR, cannot allocate memory."); + + int r = snprintf(request_buf, + req_buf_len, + req_fmt, url, host, rest_port, + auth, strlen(sqlstr), sqlstr); + if (r >= req_buf_len) { + free(request_buf); + ERROR_EXIT("ERROR too long request"); + } + printf("Request:\n%s\n", request_buf); + + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { + free(request_buf); + ERROR_EXIT("ERROR opening socket"); + } + + server = gethostbyname(host); + if (server == NULL) { + free(request_buf); + ERROR_EXIT("ERROR, no such host"); + } + + memset(&serv_addr, 0, sizeof(serv_addr)); + serv_addr.sin_family = AF_INET; + serv_addr.sin_port = htons(rest_port); + memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length); + + if (connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr)) < 0) { + free(request_buf); + ERROR_EXIT("ERROR connecting"); + } + + req_str_len = strlen(request_buf); + sent = 0; + do { + bytes = write(sockfd, request_buf + sent, req_str_len - sent); + if (bytes < 0) + ERROR_EXIT("ERROR writing message to socket"); + if (bytes == 0) + break; + sent+=bytes; + } while (sent < req_str_len); + + memset(response_buf, 0, RESP_BUF_LEN); + resp_len = sizeof(response_buf) - 1; + received = 0; + do { + bytes = read(sockfd, response_buf + received, resp_len - received); + if (bytes < 0) { + free(request_buf); + ERROR_EXIT("ERROR reading response from socket"); + } + if (bytes == 0) + break; + received += bytes; + } while (received < resp_len); + + if (received == resp_len) { + free(request_buf); + ERROR_EXIT("ERROR storing complete response from socket"); + } + + printf("Response:\n%s\n", response_buf); + + free(request_buf); + close(sockfd); + + return 0; +} + + +char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); + + return dataBuf; +} + +char* generateTagVaulesForStb(SSuperTable* stbInfo) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); + for (int i = 0; i < stbInfo->tagCount; i++) { + if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) { + if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); + tmfree(dataBuf); + return NULL; + } + + char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); + tmfree(dataBuf); + return NULL; + } + rand_string(buf, stbInfo->tags[i].dataLen); + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); + } else { + printf("No support data type: %s\n", stbInfo->tags[i].dataType); + tmfree(dataBuf); + return NULL; + } + } + dataLen -= 2; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); + return dataBuf; +} + +static int calcRowLen(SSuperTable* superTbls) { + int colIndex; + int lenOfOneRow = 0; + + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + lenOfOneRow += 21; + } else { + printf("get error data type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + + int tagIndex; + int lenOfTagOfOneRow = 0; + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char* dataType = superTbls->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; + } else { + printf("get error tag type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + + return 0; +} + + +static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) { + char command[BUFFER_SIZE] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + + char* childTblName = *childTblNameOfSuperTbl; + + //get all child table name use cmd: select tbname from superTblName; + snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + taos_close(taos); + exit(-1); + } + + int childTblCount = 10000; + int count = 0; + childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + char* pTblName = childTblName; + while ((row = taos_fetch_row(res)) != NULL) { + int32_t* len = taos_fetch_lengths(res); + tstrncpy(pTblName, (char *)row[0], len[0]+1); + //printf("==== sub table name: %s\n", pTblName); + count++; + if (count >= childTblCount - 1) { + char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + if (tmp != NULL) { + childTblName = tmp; + childTblCount = (int)(childTblCount*1.5); + memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); + tmfree(childTblName); + taos_free_result(res); + taos_close(taos); + exit(-1); + } + } + pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; + } + + *childTblCountOfSuperTbl = count; + *childTblNameOfSuperTbl = childTblName; + + taos_free_result(res); + return 0; +} + +static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { + char command[BUFFER_SIZE] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + //get schema use cmd: describe superTblName; + snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + return -1; + } + + int tagIndex = 0; + int columnIndex = 0; + TAOS_FIELD *fields = taos_fetch_fields(res); + while ((row = taos_fetch_row(res)) != NULL) { + if (0 == count) { + count++; + continue; + } + + if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { + tstrncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + tagIndex++; + } else { + tstrncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + columnIndex++; + } + count++; + } + + superTbls->columnCount = columnIndex; + superTbls->tagCount = tagIndex; + taos_free_result(res); + + calcRowLen(superTbls); + + if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { + //get all child table name use cmd: select tbname from superTblName; + getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount); + } + return 0; +} + +static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) { + char command[BUFFER_SIZE] = "\0"; + + char cols[STRING_LEN] = "\0"; + int colIndex; + int len = 0; + + int lenOfOneRow = 0; + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(cols + len, STRING_LEN - len, + ", col%d %s(%d)", colIndex, "BINARY", + superTbls->columns[colIndex].dataLen); + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(cols + len, STRING_LEN - len, + ", col%d %s(%d)", colIndex, "NCHAR", + superTbls->columns[colIndex].dataLen); + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT"); + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE"); + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP"); + lenOfOneRow += 21; + } else { + taos_close(taos); + printf("config error data type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow); + + // save for creating child table + superTbls->colsOfCreateChildTable = (char*)calloc(len+20, 1); + if (NULL == superTbls->colsOfCreateChildTable) { + printf("Failed when calloc, size:%d", len+1); + taos_close(taos); + exit(-1); + } + snprintf(superTbls->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); + debugPrint("DEBUG - %s() LN%d: %s\n", __func__, __LINE__, superTbls->colsOfCreateChildTable); + + if (use_metric) { + char tags[STRING_LEN] = "\0"; + int tagIndex; + len = 0; + + int lenOfTagOfOneRow = 0; + len += snprintf(tags + len, STRING_LEN - len, "("); + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char* dataType = superTbls->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; + } else { + taos_close(taos); + printf("config error tag type : %s\n", dataType); + exit(-1); + } + } + len -= 2; + len += snprintf(tags + len, STRING_LEN - len, ")"); + + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + + snprintf(command, BUFFER_SIZE, + "create table if not exists %s.%s (ts timestamp%s) tags %s", + dbName, superTbls->sTblName, cols, tags); + debugPrint("DEBUG - %s() LN%d: %s\n", __func__, __LINE__, command); + + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + fprintf(stderr, "create supertable %s failed!\n\n", superTbls->sTblName); + return -1; + } + debugPrint("DEBUG - create supertable %s success!\n\n", superTbls->sTblName); + } + return 0; +} + + +static int createDatabases() { + TAOS * taos = NULL; + int ret = 0; + taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + return -1; + } + char command[BUFFER_SIZE] = "\0"; + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].drop) { + sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); + debugPrint("DEBUG %s() %d command: %s\n", __func__, __LINE__, command); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + taos_close(taos); + return -1; + } + } + + int dataLen = 0; + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName); + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update); + } + //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { + // dataLen += snprintf(command + dataLen, + // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); + //} + if (g_Dbs.db[i].dbCfg.minRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.cacheLast > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "cachelast %d ", g_Dbs.db[i].dbCfg.cacheLast); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync); + } + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + "precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + } + + debugPrint("DEBUG %s() %d command: %s\n", __func__, __LINE__, command); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + taos_close(taos); + printf("\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + return -1; + } + printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); + + debugPrint("DEBUG %s() %d count:%d\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // describe super table, if exists + sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + debugPrint("DEBUG %s() %d command: %s\n", __func__, __LINE__, command); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS; + ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); + } else { + g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); + } + + if (0 != ret) { + printf("\ncreate super table %d failed!\n\n", j); + taos_close(taos); + return -1; + } + } + } + + taos_close(taos); + return 0; +} + +static void* createTable(void *sarg) +{ + threadInfo *winfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = winfo->superTblInfo; + + int64_t lastPrintTime = taosGetTimestampMs(); + + int buff_len; + if (superTblInfo) + buff_len = superTblInfo->maxSqlLen; + else + buff_len = BUFFER_SIZE; + + char *buffer = calloc(buff_len, 1); + if (buffer == NULL) { + fprintf(stderr, "Memory allocated failed!"); + exit(-1); + } + + int len = 0; + int batchNum = 0; + //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); + for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(buffer, buff_len, + "create table if not exists %s.%s%d %s;", + winfo->db_name, + g_args.tb_prefix, i, + winfo->cols); + } else { + if (0 == len) { + batchNum = 0; + memset(buffer, 0, buff_len); + len += snprintf(buffer + len, + buff_len - len, "create table "); + } + + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(buffer); + return NULL; + } + + len += snprintf(buffer + len, + superTblInfo->maxSqlLen - len, + "if not exists %s.%s%d using %s.%s tags %s ", + winfo->db_name, superTblInfo->childTblPrefix, + i, winfo->db_name, + superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + + if ((batchNum < superTblInfo->batchCreateTableNum) + && ((superTblInfo->maxSqlLen - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } + + len = 0; + debugPrint("DEBUG %s() LN%d %s\n", __func__, __LINE__, buffer); + if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ + free(buffer); + return NULL; + } + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %d - %d tables\n", + winfo->threadID, winfo->start_table_id, i); + lastPrintTime = currentPrintTime; + } + } + + if (0 != len) { + debugPrint("DEBUG %s() %d buffer: %s\n", __func__, __LINE__, buffer); + (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE); + } + + free(buffer); + return NULL; +} + +int startMultiThreadCreateChildTable( + char* cols, int threads, int ntables, + char* db_name, SSuperTable* superTblInfo) { + pthread_t *pids = malloc(threads * sizeof(pthread_t)); + threadInfo *infos = malloc(threads * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed\n"); + exit(-1); + } + + if (threads < 1) { + threads = 1; + } + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + b = ntables % threads; + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); + t_info->superTblInfo = superTblInfo; + t_info->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); + if (t_info->taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + return -1; + } + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + t_info->use_metric = 1; + t_info->cols = cols; + t_info->minDelay = INT16_MAX; + pthread_create(pids + i, NULL, createTable, t_info); + } + + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infos + i; + taos_close(t_info->taos); + } + + free(pids); + free(infos); + + return 0; +} + + +static void createChildTables() { + char tblColsBuf[MAX_SQL_SIZE]; + int len; + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].superTblCount > 0) { + // with super table + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) + || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + + debugPrint("DEBUG - %s() LN%d: %s\n", __func__, __LINE__, + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + startMultiThreadCreateChildTable( + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, + g_Dbs.threadCountByCreateTbl, + g_Dbs.db[i].superTbls[j].childTblCount, + g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; + } + } else { + // normal table + len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + for (int i = 0; i < MAX_COLUMN_COUNT; i++) { + if (g_args.datatype[i]) { + if ((strncasecmp(g_args.datatype[i], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[i], "NCHAR", strlen("NCHAR")) == 0)) { + len = snprintf(tblColsBuf + len, MAX_SQL_SIZE, ", COL%d %s(60)", i, g_args.datatype[i]); + } else { + len = snprintf(tblColsBuf + len, MAX_SQL_SIZE, ", COL%d %s", i, g_args.datatype[i]); + } + len = strlen(tblColsBuf); + } else { + len = snprintf(tblColsBuf + len, MAX_SQL_SIZE, ")"); + break; + } + } + + debugPrint("DEBUG - %s() LN%d: %s\n", __func__, __LINE__, + tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + g_args.num_of_DPT, + g_Dbs.db[i].dbName, + NULL); + } + } +} + +/* +static int taosGetLineNum(const char *fileName) +{ + int lineNum = 0; + char cmd[1024] = { 0 }; + char buf[1024] = { 0 }; + sprintf(cmd, "wc -l %s", fileName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + return lineNum; + } + + if (fgets(buf, sizeof(buf), fp)) { + int index = strchr((const char*)buf, ' ') - buf; + buf[index] = '\0'; + lineNum = atoi(buf); + } + pclose(fp); + return lineNum; +} +*/ + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using +*/ +int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + + FILE *fp = fopen(superTblInfo->tagsFile, "r"); + if (fp == NULL) { + printf("Failed to open tags file: %s, reason:%s\n", + superTblInfo->tagsFile, strerror(errno)); + return -1; + } + + if (superTblInfo->tagDataBuf) { + free(superTblInfo->tagDataBuf); + superTblInfo->tagDataBuf = NULL; + } + + int tagCount = 10000; + int count = 0; + char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + if (tagDataBuf == NULL) { + printf("Failed to calloc, reason:%s\n", strerror(errno)); + fclose(fp); + return -1; + } + + while ((readLen = tgetline(&line, &n, fp)) != -1) { + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + count++; + + if (count >= tagCount - 1) { + char *tmp = realloc(tagDataBuf, + (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + if (tmp != NULL) { + tagDataBuf = tmp; + tagCount = (int)(tagCount*1.5); + memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + tmfree(tagDataBuf); + free(line); + fclose(fp); + return -1; + } + } + } + + superTblInfo->tagDataBuf = tagDataBuf; + superTblInfo->tagSampleCount = count; + + free(line); + fclose(fp); + return 0; +} + +int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { + // TODO + return 0; +} + + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using +*/ +int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + int getRows = 0; + + memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow); + while (1) { + readLen = tgetline(&line, &n, fp); + if (-1 == readLen) { + if(0 != fseek(fp, 0, SEEK_SET)) { + printf("Failed to fseek file: %s, reason:%s\n", + superTblInfo->sampleFile, strerror(errno)); + return -1; + } + continue; + } + + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + if (readLen > superTblInfo->lenOfOneRow) { + printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", + (int32_t)readLen, superTblInfo->lenOfOneRow); + continue; + } + + memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); + getRows++; + + if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { + break; + } + } + + tmfree(line); + return 0; +} + +/* +void readSampleFromFileToMem(SSuperTable * supterTblInfo) { + int ret; + if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) { + ret = readSampleFromCsvFileToMem(supterTblInfo); + } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) { + ret = readSampleFromJsonFileToMem(supterTblInfo); + } + + if (0 != ret) { + exit(-1); + } +} +*/ +static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) { + bool ret = false; + + // columns + cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); + if (columns && columns->type != cJSON_Array) { + printf("failed to read json, columns not found\n"); + goto PARSE_OVER; + } else if (NULL == columns) { + superTbls->columnCount = 0; + superTbls->tagCount = 0; + return true; + } + + int columnSize = cJSON_GetArraySize(columns); + if (columnSize > MAX_COLUMN_COUNT) { + printf("failed to read json, column size overflow, max column size is %d\n", + MAX_COLUMN_COUNT); + goto PARSE_OVER; + } + + int count = 1; + int index = 0; + StrColumn columnCase; + + //superTbls->columnCount = columnSize; + for (int k = 0; k < columnSize; ++k) { + cJSON* column = cJSON_GetArrayItem(columns, k); + if (column == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(column, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + printf("failed to read json, column count not found"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(column, "type"); + if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { + printf("failed to read json, column type not found"); + goto PARSE_OVER; + } + //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + + cJSON* dataLen = cJSON_GetObjectItem(column, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + printf("failed to read json, column len not found"); + goto PARSE_OVER; + } else { + columnCase.dataLen = 8; + } + + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + superTbls->columns[index].dataLen = columnCase.dataLen; + index++; + } + } + superTbls->columnCount = index; + + count = 1; + index = 0; + // tags + cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); + if (!tags || tags->type != cJSON_Array) { + printf("failed to read json, tags not found"); + goto PARSE_OVER; + } + + int tagSize = cJSON_GetArraySize(tags); + if (tagSize > MAX_TAG_COUNT) { + printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT); + goto PARSE_OVER; + } + + //superTbls->tagCount = tagSize; + for (int k = 0; k < tagSize; ++k) { + cJSON* tag = cJSON_GetArrayItem(tags, k); + if (tag == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(tag, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + printf("failed to read json, column count not found"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(tag, "type"); + if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { + printf("failed to read json, tag type not found"); + goto PARSE_OVER; + } + tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + + cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + printf("failed to read json, column len not found"); + goto PARSE_OVER; + } else { + columnCase.dataLen = 0; + } + + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + superTbls->tags[index].dataLen = columnCase.dataLen; + index++; + } + } + superTbls->tagCount = index; + + ret = true; + +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} + +static bool getMetaFromInsertJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE); + } else if (!host) { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_Dbs.port = port->valueint; + } else if (!port) { + g_Dbs.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE); + } else if (!user) { + tstrncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE); + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE); + } else if (!password) { + tstrncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE); + } + + cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); + if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { + tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); + } else if (!resultfile) { + tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); + } + + cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); + if (threads && threads->type == cJSON_Number) { + g_Dbs.threadCount = threads->valueint; + } else if (!threads) { + g_Dbs.threadCount = 1; + } else { + printf("failed to read json, threads not found"); + goto PARSE_OVER; + } + + cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); + if (threads2 && threads2->type == cJSON_Number) { + g_Dbs.threadCountByCreateTbl = threads2->valueint; + } else if (!threads2) { + g_Dbs.threadCountByCreateTbl = 1; + } else { + printf("failed to read json, threads2 not found"); + goto PARSE_OVER; + } + + cJSON* insertInterval = cJSON_GetObjectItem(root, "insert_interval"); + if (insertInterval && insertInterval->type == cJSON_Number) { + g_args.insert_interval = insertInterval->valueint; + } else if (!insertInterval) { + g_args.insert_interval = 0; + } else { + printf("failed to read json, insert_interval not found"); + goto PARSE_OVER; + } + + cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); + if (numRecPerReq && numRecPerReq->type == cJSON_Number) { + g_args.num_of_RPR = numRecPerReq->valueint; + } else if (!numRecPerReq) { + g_args.num_of_RPR = 100; + } else { + printf("failed to read json, num_of_records_per_req not found"); + goto PARSE_OVER; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt + && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (!dbs || dbs->type != cJSON_Array) { + printf("failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + int dbSize = cJSON_GetArraySize(dbs); + if (dbSize > MAX_DB_COUNT) { + printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); + goto PARSE_OVER; + } + + g_Dbs.dbCount = dbSize; + for (int i = 0; i < dbSize; ++i) { + cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); + if (dbinfos == NULL) continue; + + // dbinfo + cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); + if (!dbinfo || dbinfo->type != cJSON_Object) { + printf("failed to read json, dbinfo not found"); + goto PARSE_OVER; + } + + cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); + if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { + printf("failed to read json, db name not found"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); + + cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); + if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { + if (0 == strncasecmp(drop->valuestring, "yes", 3)) { + g_Dbs.db[i].drop = 1; + } else { + g_Dbs.db[i].drop = 0; + } + } else if (!drop) { + g_Dbs.db[i].drop = 0; + } else { + printf("failed to read json, drop not found"); + goto PARSE_OVER; + } + + cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); + if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE); + } else if (!precision) { + //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); + memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, precision not found"); + goto PARSE_OVER; + } + + cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); + if (update && update->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.update = update->valueint; + } else if (!update) { + g_Dbs.db[i].dbCfg.update = -1; + } else { + printf("failed to read json, update not found"); + goto PARSE_OVER; + } + + cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); + if (replica && replica->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.replica = replica->valueint; + } else if (!replica) { + g_Dbs.db[i].dbCfg.replica = -1; + } else { + printf("failed to read json, replica not found"); + goto PARSE_OVER; + } + + cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); + if (keep && keep->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.keep = keep->valueint; + } else if (!keep) { + g_Dbs.db[i].dbCfg.keep = -1; + } else { + printf("failed to read json, keep not found"); + goto PARSE_OVER; + } + + cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); + if (days && days->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.days = days->valueint; + } else if (!days) { + g_Dbs.db[i].dbCfg.days = -1; + } else { + printf("failed to read json, days not found"); + goto PARSE_OVER; + } + + cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); + if (cache && cache->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cache = cache->valueint; + } else if (!cache) { + g_Dbs.db[i].dbCfg.cache = -1; + } else { + printf("failed to read json, cache not found"); + goto PARSE_OVER; + } + + cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); + if (blocks && blocks->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.blocks = blocks->valueint; + } else if (!blocks) { + g_Dbs.db[i].dbCfg.blocks = -1; + } else { + printf("failed to read json, block not found"); + goto PARSE_OVER; + } + + //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); + //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; + //} else if (!maxtablesPerVnode) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; + //} else { + // printf("failed to read json, maxtablesPerVnode not found"); + // goto PARSE_OVER; + //} + + cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); + if (minRows && minRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.minRows = minRows->valueint; + } else if (!minRows) { + g_Dbs.db[i].dbCfg.minRows = -1; + } else { + printf("failed to read json, minRows not found"); + goto PARSE_OVER; + } + + cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); + if (maxRows && maxRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; + } else if (!maxRows) { + g_Dbs.db[i].dbCfg.maxRows = -1; + } else { + printf("failed to read json, maxRows not found"); + goto PARSE_OVER; + } + + cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); + if (comp && comp->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.comp = comp->valueint; + } else if (!comp) { + g_Dbs.db[i].dbCfg.comp = -1; + } else { + printf("failed to read json, comp not found"); + goto PARSE_OVER; + } + + cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); + if (walLevel && walLevel->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; + } else if (!walLevel) { + g_Dbs.db[i].dbCfg.walLevel = -1; + } else { + printf("failed to read json, walLevel not found"); + goto PARSE_OVER; + } + + cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); + if (cacheLast && cacheLast->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint; + } else if (!cacheLast) { + g_Dbs.db[i].dbCfg.cacheLast = -1; + } else { + printf("failed to read json, cacheLast not found"); + goto PARSE_OVER; + } + + cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); + if (quorum && quorum->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.quorum = quorum->valueint; + } else if (!quorum) { + g_Dbs.db[i].dbCfg.quorum = -1; + } else { + printf("failed to read json, walLevel not found"); + goto PARSE_OVER; + } + + cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); + if (fsync && fsync->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.fsync = fsync->valueint; + } else if (!fsync) { + g_Dbs.db[i].dbCfg.fsync = -1; + } else { + printf("failed to read json, fsync not found"); + goto PARSE_OVER; + } + + // super_talbes + cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); + if (!stables || stables->type != cJSON_Array) { + printf("failed to read json, super_tables not found"); + goto PARSE_OVER; + } + + int stbSize = cJSON_GetArraySize(stables); + if (stbSize > MAX_SUPER_TABLE_COUNT) { + printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT); + goto PARSE_OVER; + } + + g_Dbs.db[i].superTblCount = stbSize; + for (int j = 0; j < stbSize; ++j) { + cJSON* stbInfo = cJSON_GetArrayItem(stables, j); + if (stbInfo == NULL) continue; + + // dbinfo + cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); + if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { + printf("failed to read json, stb name not found"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); + + cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); + if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { + printf("failed to read json, childtable_prefix not found"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE); + + cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null + if (autoCreateTbl + && autoCreateTbl->type == cJSON_String + && autoCreateTbl->valuestring != NULL) { + if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; + } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + } else if (!autoCreateTbl) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + printf("failed to read json, auto_create_table not found"); + goto PARSE_OVER; + } + + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; + } else if (!batchCreateTbl) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000; + } else { + printf("failed to read json, batch_create_tbl_num not found"); + goto PARSE_OVER; + } + + cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no + if (childTblExists + && childTblExists->type == cJSON_String + && childTblExists->valuestring != NULL) { + if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; + } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } + } else if (!childTblExists) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + printf("failed to read json, child_table_exists not found"); + goto PARSE_OVER; + } + + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); + if (!count || count->type != cJSON_Number || 0 >= count->valueint) { + printf("failed to read json, childtable_count not found"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + + cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); + if (dataSource && dataSource->type == cJSON_String + && dataSource->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, + dataSource->valuestring, MAX_DB_NAME_SIZE); + } else if (!dataSource) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, data_source not found"); + goto PARSE_OVER; + } + + cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful + if (insertMode && insertMode->type == cJSON_String + && insertMode->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, + insertMode->valuestring, MAX_DB_NAME_SIZE); + } else if (!insertMode) { + tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, insert_mode not found"); + goto PARSE_OVER; + } + + cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); + if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE); + } else if (!ts) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, start_timestamp not found"); + goto PARSE_OVER; + } + + cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); + if (timestampStep && timestampStep->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; + } else if (!timestampStep) { + g_Dbs.db[i].superTbls[j].timeStampStep = 1000; + } else { + printf("failed to read json, timestamp_step not found"); + goto PARSE_OVER; + } + + cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size"); + if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint; + if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; + } + } else if (!sampleDataBufSize) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; + } else { + printf("failed to read json, sample_buf_size not found"); + goto PARSE_OVER; + } + + cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); + if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, + sampleFormat->valuestring, MAX_DB_NAME_SIZE); + } else if (!sampleFormat) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, sample_format not found"); + goto PARSE_OVER; + } + + cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); + if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, + sampleFile->valuestring, MAX_FILE_NAME_LEN); + } else if (!sampleFile) { + memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, sample_file not found"); + goto PARSE_OVER; + } + + cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); + if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, + tagsFile->valuestring, MAX_FILE_NAME_LEN); + if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + g_Dbs.db[i].superTbls[j].tagSource = 1; + } + } else if (!tagsFile) { + memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + printf("failed to read json, tags_file not found"); + goto PARSE_OVER; + } + + cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + int32_t len = maxSqlLen->valueint; + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < TSDB_MAX_SQL_LEN) { + len = TSDB_MAX_SQL_LEN; + } + g_Dbs.db[i].superTbls[j].maxSqlLen = len; + } else if (!maxSqlLen) { + g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; + } else { + printf("failed to read json, maxSqlLen not found"); + goto PARSE_OVER; + } + + cJSON *multiThreadWriteOneTbl = + cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes + if (multiThreadWriteOneTbl + && multiThreadWriteOneTbl->type == cJSON_String + && multiThreadWriteOneTbl->valuestring != NULL) { + if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; + } else { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } + } else if (!multiThreadWriteOneTbl) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } else { + printf("failed to read json, multiThreadWriteOneTbl not found"); + goto PARSE_OVER; + } + + cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql"); + if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint; + } else if (!numberOfTblInOneSql) { + g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0; + } else { + printf("failed to read json, numberOfTblInOneSql not found"); + goto PARSE_OVER; + } + + cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl"); + if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; + } else if (!rowsPerTbl) { + g_Dbs.db[i].superTbls[j].rowsPerTbl = 1; + } else { + printf("failed to read json, rowsPerTbl not found"); + goto PARSE_OVER; + } + + cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); + if (disorderRatio && disorderRatio->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; + } else if (!disorderRatio) { + g_Dbs.db[i].superTbls[j].disorderRatio = 0; + } else { + printf("failed to read json, disorderRatio not found"); + goto PARSE_OVER; + } + + cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); + if (disorderRange && disorderRange->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; + } else if (!disorderRange) { + g_Dbs.db[i].superTbls[j].disorderRange = 1000; + } else { + printf("failed to read json, disorderRange not found"); + goto PARSE_OVER; + } + + + cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); + if (insertRows && insertRows->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; + //if (0 == g_Dbs.db[i].superTbls[j].insertRows) { + // g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + //} + } else if (!insertRows) { + g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + } else { + printf("failed to read json, insert_rows not found"); + goto PARSE_OVER; + } + + if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable + || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + + int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]); + if (false == retVal) { + goto PARSE_OVER; + } + } + } + + ret = true; + +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} + +static bool getMetaFromQueryJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE); + } else if (!host) { + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_queryInfo.port = port->valueint; + } else if (!port) { + g_queryInfo.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE); + } else if (!user) { + tstrncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ; + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE); + } else if (!password) { + tstrncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { + tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); + } else if (!dbs) { + printf("failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); + if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { + tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); + } else if (!queryMode) { + tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); + } else { + printf("failed to read json, query_mode not found\n"); + goto PARSE_OVER; + } + + // super_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.concurrent = 0; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + printf("failed to read json, super_table_query not found"); + goto PARSE_OVER; + } else { + cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (rate && rate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.rate = rate->valueint; + } else if (!rate) { + g_queryInfo.superQueryInfo.rate = 0; + } + + cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + g_queryInfo.superQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.superQueryInfo.concurrent = 1; + } + + cJSON* mode = cJSON_GetObjectItem(superQuery, "mode"); + if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { + if (0 == strcmp("sync", mode->valuestring)) { + g_queryInfo.superQueryInfo.subscribeMode = 0; + } else if (0 == strcmp("async", mode->valuestring)) { + g_queryInfo.superQueryInfo.subscribeMode = 1; + } else { + printf("failed to read json, subscribe mod error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeMode = 0; + } + + cJSON* interval = cJSON_GetObjectItem(superQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(superQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = 1; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = 0; + } else { + printf("failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = 1; + } + + cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (keepProgress + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } else { + printf("failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + printf("failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + // sub_table_query + cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!subQuery) { + g_queryInfo.subQueryInfo.threadCnt = 0; + g_queryInfo.subQueryInfo.sqlCount = 0; + } else if (subQuery->type != cJSON_Object) { + printf("failed to read json, sub_table_query not found"); + ret = true; + goto PARSE_OVER; + } else { + cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.subQueryInfo.rate = subrate->valueint; + } else if (!subrate) { + g_queryInfo.subQueryInfo.rate = 0; + } + + cJSON* threads = cJSON_GetObjectItem(subQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + g_queryInfo.subQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.subQueryInfo.threadCnt = 1; + } + + //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.subQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname"); + if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + } else { + printf("failed to read json, super table name not found\n"); + goto PARSE_OVER; + } + + cJSON* submode = cJSON_GetObjectItem(subQuery, "mode"); + if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (0 == strcmp("sync", submode->valuestring)) { + g_queryInfo.subQueryInfo.subscribeMode = 0; + } else if (0 == strcmp("async", submode->valuestring)) { + g_queryInfo.subQueryInfo.subscribeMode = 1; + } else { + printf("failed to read json, subscribe mod error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeMode = 0; + } + + cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval"); + if (subinterval && subinterval->type == cJSON_Number) { + g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint; + } else if (!subinterval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.subQueryInfo.subscribeInterval = 10000; + } + + cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { + g_queryInfo.subQueryInfo.subscribeRestart = 1; + } else if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.subQueryInfo.subscribeRestart = 0; + } else { + printf("failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeRestart = 1; + } + + cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress"); + if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", subkeepProgress->valuestring)) { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", subkeepProgress->valuestring)) { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; + } else { + printf("failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls"); + if (!subsqls) { + g_queryInfo.subQueryInfo.sqlCount = 0; + } else if (subsqls->type != cJSON_Array) { + printf("failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(subsqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.subQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(subsqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ + tstrncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, sub query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + ret = true; + +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} + +static bool getInfoFromJsonFile(char* file) { + debugPrint("DEBUG - %s %d %s\n", __func__, __LINE__, file); + + FILE *fp = fopen(file, "r"); + if (!fp) { + printf("failed to read %s, reason:%s\n", file, strerror(errno)); + return false; + } + + bool ret = false; + int maxLen = 64000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + printf("failed to read %s, content is null", file); + return false; + } + + content[len] = 0; + cJSON* root = cJSON_Parse(content); + if (root == NULL) { + printf("failed to cjson parse %s, invalid json format", file); + goto PARSE_OVER; + } + + cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); + if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_args.test_mode = INSERT_MODE; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_args.test_mode = QUERY_MODE; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_args.test_mode = SUBSCRIBE_MODE; + } else { + printf("failed to read json, filetype not support\n"); + goto PARSE_OVER; + } + } else if (!filetype) { + g_args.test_mode = INSERT_MODE; + } else { + printf("failed to read json, filetype not found\n"); + goto PARSE_OVER; + } + + if (INSERT_MODE == g_args.test_mode) { + ret = getMetaFromInsertJsonFile(root); + } else if (QUERY_MODE == g_args.test_mode) { + ret = getMetaFromQueryJsonFile(root); + } else if (SUBSCRIBE_MODE == g_args.test_mode) { + ret = getMetaFromQueryJsonFile(root); + } else { + printf("input json file type error! please input correct file type: insert or query or subscribe\n"); + goto PARSE_OVER; + } + +PARSE_OVER: + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; +} + +void prePareSampleData() { + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) { + // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]); + //} + + if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { + (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); + } + } + } +} + +void postFreeResource() { + tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { + free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { + free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { + free(g_Dbs.db[i].superTbls[j].tagDataBuf); + g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].childTblName) { + free(g_Dbs.db[i].superTbls[j].childTblName); + g_Dbs.db[i].superTbls[j].childTblName = NULL; + } + } + } +} + +int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) { + if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf); + if (0 != ret) { + return -1; + } + *sampleUsePos = 0; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + + (*sampleUsePos)++; + + return dataLen; +} + +int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + for (int i = 0; i < stbInfo->columnCount; i++) { + if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { + if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); + return (-1); + } + + char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + return (-1); + } + rand_string(buf, stbInfo->columns[i].dataLen); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + } else { + printf("No support data type: %s\n", stbInfo->columns[i].dataType); + return (-1); + } + } + dataLen -= 2; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + + return dataLen; +} + +static void syncWriteForNumberOfTblInOneSql( + threadInfo *winfo, FILE *fp, char* sampleDataBuf) { + SSuperTable* superTblInfo = winfo->superTblInfo; + + int samplePos = 0; + + //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); + int64_t totalRowsInserted = 0; + int64_t totalAffectedRows = 0; + int64_t lastPrintTime = taosGetTimestampMs(); + + char* buffer = calloc(superTblInfo->maxSqlLen+1, 1); + if (NULL == buffer) { + printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen); + return; + } + + int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql; + int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1; + if (numberOfTblInOneSql > tbls) { + numberOfTblInOneSql = tbls; + } + + uint64_t time_counter = winfo->start_time; + int64_t tmp_time; + int sampleUsePos; + + int64_t st = 0; + int64_t et = 0; + for (int i = 0; i < superTblInfo->insertRows;) { + int32_t tbl_id = 0; + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) { + int inserted = i; + + for (int k = 0; k < g_args.num_of_RPR;) { + int len = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + char *pstr = buffer; + + int32_t end_tbl_id = tID + numberOfTblInOneSql; + if (end_tbl_id > winfo->end_table_id) { + end_tbl_id = winfo->end_table_id+1; + } + + for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) { + sampleUsePos = samplePos; + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, tbl_id % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + goto free_and_statistics; + } + + if (0 == len) { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s%d using %s.%s tags %s values ", + winfo->db_name, + superTblInfo->childTblPrefix, + tbl_id, + winfo->db_name, + superTblInfo->sTblName, + tagsValBuf); + } else { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + " %s.%s%d using %s.%s tags %s values ", + winfo->db_name, + superTblInfo->childTblPrefix, + tbl_id, + winfo->db_name, + superTblInfo->sTblName, + tagsValBuf); + } + tmfree(tagsValBuf); + } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + if (0 == len) { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s values ", + winfo->db_name, + superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); + } else { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + " %s.%s values ", + winfo->db_name, + superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); + } + } else { // pre-create child table + if (0 == len) { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s%d values ", + winfo->db_name, + superTblInfo->childTblPrefix, + tbl_id); + } else { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + " %s.%s%d values ", + winfo->db_name, + superTblInfo->childTblPrefix, + tbl_id); + } + } + + tmp_time = time_counter; + for (k = 0; k < superTblInfo->rowsPerTbl;) { + int retLen = 0; + if (0 == strncasecmp(superTblInfo->dataSource, + "sample", strlen("sample"))) { + retLen = getRowDataFromSample(pstr + len, + superTblInfo->maxSqlLen - len, + tmp_time += superTblInfo->timeStampStep, + superTblInfo, + &sampleUsePos, + fp, + sampleDataBuf); + if (retLen < 0) { + goto free_and_statistics; + } + } else if (0 == strncasecmp( + superTblInfo->dataSource, "rand", strlen("rand"))) { + int rand_num = rand_tinyint() % 100; + if (0 != superTblInfo->disorderRatio + && rand_num < superTblInfo->disorderRatio) { + int64_t d = tmp_time - rand() % superTblInfo->disorderRange; + retLen = generateRowData(pstr + len, + superTblInfo->maxSqlLen - len, + d, + superTblInfo); + } else { + retLen = generateRowData(pstr + len, + superTblInfo->maxSqlLen - len, + tmp_time += superTblInfo->timeStampStep, + superTblInfo); + } + if (retLen < 0) { + goto free_and_statistics; + } + } + len += retLen; + //inserted++; + k++; + totalRowsInserted++; + + if (inserted >= superTblInfo->insertRows || + (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) { + tID = tbl_id + 1; + printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", + superTblInfo->lenOfOneRow); + goto send_to_server; + } + } + } + + tID = tbl_id; + inserted += superTblInfo->rowsPerTbl; + +send_to_server: + if (g_args.insert_interval && (g_args.insert_interval > (et - st))) { + int sleep_time = g_args.insert_interval - (et -st); + debugPrint("DEBUG sleep: %d ms\n", sleep_time); + taosMsleep(sleep_time); // ms + } + + if (g_args.insert_interval) { + st = taosGetTimestampMs(); + } + + if (0 == strncasecmp(superTblInfo->insertMode, + "taosc", + strlen("taosc"))) { + //printf("multi table===== sql: %s \n\n", buffer); + //int64_t t1 = taosGetTimestampMs(); + int64_t startTs; + int64_t endTs; + startTs = taosGetTimestampUs(); + + debugPrint("DEBUG %s() LN%d buff: %s\n", __func__, __LINE__, buffer); + int affectedRows = queryDbExec( + winfo->taos, buffer, INSERT_TYPE); + + if (0 > affectedRows) { + goto free_and_statistics; + } else { + endTs = taosGetTimestampUs(); + int64_t delay = endTs - startTs; + if (delay > winfo->maxDelay) winfo->maxDelay = delay; + if (delay < winfo->minDelay) winfo->minDelay = delay; + winfo->cntDelay++; + winfo->totalDelay += delay; + //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; + } + totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + winfo->threadID, + totalRowsInserted, + totalAffectedRows); + lastPrintTime = currentPrintTime; + } + //int64_t t2 = taosGetTimestampMs(); + //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); + } else { + //int64_t t1 = taosGetTimestampMs(); + int retCode = postProceSql(g_Dbs.host, g_Dbs.port, buffer); + //int64_t t2 = taosGetTimestampMs(); + //printf("http insert sql return, Spent %ld ms \n", t2 - t1); + + if (0 != retCode) { + printf("========restful return fail, threadID[%d]\n", winfo->threadID); + goto free_and_statistics; + } + } + if (g_args.insert_interval) { + et = taosGetTimestampMs(); + } + + break; + } + + if (tID > winfo->end_table_id) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample"))) { + samplePos = sampleUsePos; + } + i = inserted; + time_counter = tmp_time; + } + } + + //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); + } + +free_and_statistics: + tmfree(buffer); + winfo->totalRowsInserted = totalRowsInserted; + winfo->totalAffectedRows = totalAffectedRows; + printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); + return; +} + +int32_t generateData(char *res, char **data_type, + int num_of_cols, int64_t timestamp, int len_of_binary) { + memset(res, 0, MAX_DATA_SIZE); + char *pstr = res; + pstr += sprintf(pstr, "(%" PRId64, timestamp); + int c = 0; + + for (; c < MAX_NUM_DATATYPE; c++) { + if (data_type[c] == NULL) { + break; + } + } + + if (0 == c) { + perror("data type error!"); + exit(-1); + } + + for (int i = 0; i < num_of_cols; i++) { + if (strcasecmp(data_type[i % c], "tinyint") == 0) { + pstr += sprintf(pstr, ", %d", rand_tinyint() ); + } else if (strcasecmp(data_type[i % c], "smallint") == 0) { + pstr += sprintf(pstr, ", %d", rand_smallint()); + } else if (strcasecmp(data_type[i % c], "int") == 0) { + pstr += sprintf(pstr, ", %d", rand_int()); + } else if (strcasecmp(data_type[i % c], "bigint") == 0) { + pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); + } else if (strcasecmp(data_type[i % c], "float") == 0) { + pstr += sprintf(pstr, ", %10.4f", rand_float()); + } else if (strcasecmp(data_type[i % c], "double") == 0) { + double t = rand_double(); + pstr += sprintf(pstr, ", %20.8f", t); + } else if (strcasecmp(data_type[i % c], "bool") == 0) { + bool b = rand() & 1; + pstr += sprintf(pstr, ", %s", b ? "true" : "false"); + } else if (strcasecmp(data_type[i % c], "binary") == 0) { + char *s = malloc(len_of_binary); + rand_string(s, len_of_binary); + pstr += sprintf(pstr, ", \"%s\"", s); + free(s); + }else if (strcasecmp(data_type[i % c], "nchar") == 0) { + char *s = malloc(len_of_binary); + rand_string(s, len_of_binary); + pstr += sprintf(pstr, ", \"%s\"", s); + free(s); + } + + if (pstr - res > MAX_DATA_SIZE) { + perror("column length too long, abort"); + exit(-1); + } } - - taos_free_result(pSql); - return; + + pstr += sprintf(pstr, ")"); + + return (int32_t)(pstr - res); } +// sync insertion +/* + 1 thread: 100 tables * 2000 rows/s + 1 thread: 10 tables * 20000 rows/s + 6 thread: 300 tables * 2000 rows/s -/* Function to do regular expression check */ -static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; + 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s +*/ +static void* syncWrite(void *sarg) { - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); - } + threadInfo *winfo = (threadInfo *)sarg; - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); - regfree(®ex); - exit(-1); + char buffer[BUFFER_SIZE] = "\0"; + char data[MAX_DATA_SIZE]; + char **data_type = g_args.datatype; + int len_of_binary = g_args.len_of_binary; + + int ncols_per_record = 1; // count first col ts + for (int i = 0; i < MAX_COLUMN_COUNT; i ++) { + if (NULL == g_args.datatype[i]) + break; + else + ncols_per_record ++; } - return 0; -} + srand((uint32_t)time(NULL)); + int64_t time_counter = winfo->start_time; -static int isCommentLine(char *line) { - if (line == NULL) return 1; + uint64_t st = 0; + uint64_t et = 0; - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); + for (int i = 0; i < g_args.num_of_DPT;) { + + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { + int inserted = i; + int64_t tmp_time = time_counter; + + char *pstr = buffer; + pstr += sprintf(pstr, + "insert into %s.%s%d values", + winfo->db_name, g_args.tb_prefix, tID); + int k; + for (k = 0; k < g_args.num_of_RPR;) { + int rand_num = rand() % 100; + int len = -1; + + if ((g_args.disorderRatio != 0) + && (rand_num < g_args.disorderRange)) { + + int64_t d = tmp_time - rand() % 1000000 + rand_num; + len = generateData(data, data_type, + ncols_per_record, d, len_of_binary); + } else { + len = generateData(data, data_type, + ncols_per_record, tmp_time += 1000, len_of_binary); + } + + //assert(len + pstr - buffer < BUFFER_SIZE); + if (len + pstr - buffer >= BUFFER_SIZE) { // too long + break; + } + + pstr += sprintf(pstr, " %s", data); + inserted++; + k++; + + if (inserted >= g_args.num_of_DPT) + break; + } + + /* puts(buffer); */ + int64_t startTs; + int64_t endTs; + startTs = taosGetTimestampUs(); + //queryDB(winfo->taos, buffer); + if (i > 0 && g_args.insert_interval + && (g_args.insert_interval > (et - st) )) { + int sleep_time = g_args.insert_interval - (et -st); + debugPrint("DEBUG sleep: %d ms\n", sleep_time); + taosMsleep(sleep_time); // ms + } + + if (g_args.insert_interval) { + st = taosGetTimestampMs(); + } + debugPrint("DEBUG - %s() LN%d %s\n", __func__, __LINE__, buffer); + int affectedRows = queryDbExec(winfo->taos, buffer, 1); + + if (0 <= affectedRows){ + endTs = taosGetTimestampUs(); + int64_t delay = endTs - startTs; + if (delay > winfo->maxDelay) + winfo->maxDelay = delay; + if (delay < winfo->minDelay) + winfo->minDelay = delay; + winfo->cntDelay++; + winfo->totalDelay += delay; + //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; + } + + if (g_args.insert_interval) { + et = taosGetTimestampMs(); + } + + if (tID == winfo->end_table_id) { + i = inserted; + time_counter = tmp_time; + } + } + + } + return NULL; } -void querySqlFile(TAOS* taos, char* sqlFile) -{ - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - exit(-1); + +static void* syncWriteWithStb(void *sarg) { + uint64_t totalRowsInserted = 0; + uint64_t totalAffectedRows = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + + threadInfo *winfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = winfo->superTblInfo; + + FILE *fp = NULL; + char* sampleDataBuf = NULL; + int samplePos = 0; + + // each thread read sample data from csv file + if (0 == strncasecmp(superTblInfo->dataSource, + "sample", + strlen("sample"))) { + sampleDataBuf = calloc( + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + if (sampleDataBuf == NULL) { + printf("Failed to calloc %d Bytes, reason:%s\n", + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + strerror(errno)); + return NULL; + } + + fp = fopen(superTblInfo->sampleFile, "r"); + if (fp == NULL) { + printf("Failed to open sample file: %s, reason:%s\n", + superTblInfo->sampleFile, strerror(errno)); + tmfree(sampleDataBuf); + return NULL; + } + int ret = readSampleFromCsvFileToMem(fp, + superTblInfo, sampleDataBuf); + if (0 != ret) { + tmfree(sampleDataBuf); + tmfclose(fp); + return NULL; + } } - - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - double t = getCurrentTime(); + if (superTblInfo->numberOfTblInOneSql > 0) { + syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf); + tmfree(sampleDataBuf); + tmfclose(fp); + return NULL; + } + + char* buffer = calloc(superTblInfo->maxSqlLen, 1); + if (NULL == buffer) { + printf("Failed to calloc %d Bytes, reason:%s\n", + superTblInfo->maxSqlLen, + strerror(errno)); + tmfree(sampleDataBuf); + tmfclose(fp); + return NULL; + } + + int64_t time_counter = winfo->start_time; + uint64_t st = 0; + uint64_t et = 0; + + debugPrint("DEBUG - %s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, superTblInfo->insertRows); + + for (int i = 0; i < superTblInfo->insertRows;) { + + for (uint32_t tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { + uint64_t inserted = i; + uint64_t tmp_time = time_counter; + + int sampleUsePos = samplePos; + int k = 0; + debugPrint("DEBUG - %s() LN%d num_of_RPR=%d\n", __func__, __LINE__, g_args.num_of_RPR); + for (k = 0; k < g_args.num_of_RPR;) { + int len = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + char *pstr = buffer; + + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + tID % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + goto free_and_statistics_2; + } + + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s%d using %s.%s tags %s values", + winfo->db_name, + superTblInfo->childTblPrefix, + tID, + winfo->db_name, + superTblInfo->sTblName, + tagsValBuf); + tmfree(tagsValBuf); + } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s values", + winfo->db_name, + superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN); + } else { + len += snprintf(pstr + len, + superTblInfo->maxSqlLen - len, + "insert into %s.%s%d values", + winfo->db_name, + superTblInfo->childTblPrefix, + tID); + } + + int retLen = 0; + if (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample"))) { + retLen = getRowDataFromSample( + pstr + len, + superTblInfo->maxSqlLen - len, + tmp_time += superTblInfo->timeStampStep, + superTblInfo, + &sampleUsePos, + fp, + sampleDataBuf); + if (retLen < 0) { + goto free_and_statistics_2; + } + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + int rand_num = rand_tinyint() % 100; + if (0 != superTblInfo->disorderRatio + && rand_num < superTblInfo->disorderRatio) { + int64_t d = tmp_time - rand() % superTblInfo->disorderRange; + retLen = generateRowData( + pstr + len, + superTblInfo->maxSqlLen - len, d, + superTblInfo); + //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d); + } else { + retLen = generateRowData( + pstr + len, + superTblInfo->maxSqlLen - len, + tmp_time += superTblInfo->timeStampStep, + superTblInfo); + } + if (retLen < 0) { + goto free_and_statistics_2; + } + } +/* len += retLen; +*/ + inserted++; + k++; + totalRowsInserted++; - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; + if (inserted > superTblInfo->insertRows) + break; +/* if (inserted >= superTblInfo->insertRows + || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) + break; +*/ + if (i > 0 && g_args.insert_interval + && (g_args.insert_interval > (et - st) )) { + int sleep_time = g_args.insert_interval - (et -st); + debugPrint("DEBUG sleep: %d ms\n", sleep_time); + taosMsleep(sleep_time); // ms + } - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } + if (g_args.insert_interval) { + st = taosGetTimestampMs(); + } - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; + if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { + //printf("===== sql: %s \n\n", buffer); + //int64_t t1 = taosGetTimestampMs(); + int64_t startTs; + int64_t endTs; + startTs = taosGetTimestampUs(); + + debugPrint("DEBUG %s() LN%d %s\n", __func__, __LINE__, buffer); + int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); + + if (0 > affectedRows){ + goto free_and_statistics_2; + } else { + endTs = taosGetTimestampUs(); + int64_t delay = endTs - startTs; + if (delay > winfo->maxDelay) winfo->maxDelay = delay; + if (delay < winfo->minDelay) winfo->minDelay = delay; + winfo->cntDelay++; + winfo->totalDelay += delay; + //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; + } + totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + winfo->threadID, + totalRowsInserted, + totalAffectedRows); + lastPrintTime = currentPrintTime; + } + //int64_t t2 = taosGetTimestampMs(); + //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); + } else { + //int64_t t1 = taosGetTimestampMs(); + int retCode = postProceSql(g_Dbs.host, g_Dbs.port, buffer); + //int64_t t2 = taosGetTimestampMs(); + //printf("http insert sql return, Spent %ld ms \n", t2 - t1); + + if (0 != retCode) { + printf("========restful return fail, threadID[%d]\n", winfo->threadID); + goto free_and_statistics_2; + } + } + if (g_args.insert_interval) { + et = taosGetTimestampMs(); + } +/* + if (loop_cnt) { + loop_cnt--; + if ((1 == loop_cnt) && (0 != nrecords_last_req)) { + nrecords_cur_req = nrecords_last_req; + } else if (0 == loop_cnt){ + nrecords_cur_req = nrecords_no_last_req; + loop_cnt = loop_cnt_orig; + break; + } + } else { + break; + } + */ + } + + if (tID == winfo->end_table_id) { + if (0 == strncasecmp( + superTblInfo->dataSource, "sample", strlen("sample"))) { + samplePos = sampleUsePos; + } + + i = inserted; + time_counter = tmp_time; + } } - memcpy(cmd + cmd_len, line, read_len); - selectSql(taos, cmd); - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; + //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); } - t = getCurrentTime() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); +free_and_statistics_2: + tmfree(buffer); + tmfree(sampleDataBuf); + tmfclose(fp); - free(cmd); - if (line) free(line); - fclose(fp); - return; + winfo->totalRowsInserted = totalRowsInserted; + winfo->totalAffectedRows = totalAffectedRows; + + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + winfo->threadID, + totalRowsInserted, + totalAffectedRows); + return NULL; } -void * createTable(void *sarg) -{ - char command[BUFFER_SIZE] = "\0"; - - info *winfo = (info *)sarg; +void callBack(void *param, TAOS_RES *res, int code) { + threadInfo* winfo = (threadInfo*)param; - if (!winfo->use_metric) { - /* Create all the tables; */ - printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s);", winfo->db_name, winfo->tb_prefix, i, winfo->cols); - queryDB(winfo->taos, command); + if (g_args.insert_interval) { + winfo->et = taosGetTimestampMs(); + if (winfo->et - winfo->st < 1000) { + taosMsleep(1000 - (winfo->et - winfo->st)); // ms } - } else { - /* Create all the tables; */ - printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - int j; - if (i % 10 == 0) { - j = 10; - } else { - j = i % 10; - } - if (j % 2 == 0) { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai"); + } + + char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); + char *data = calloc(1, MAX_DATA_SIZE); + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); + if (winfo->counter >= winfo->superTblInfo->insertRows) { + winfo->start_table_id++; + winfo->counter = 0; + } + if (winfo->start_table_id > winfo->end_table_id) { + tsem_post(&winfo->lock_sem); + free(buffer); + free(data); + taos_free_result(res); + return; + } + + for (int i = 0; i < g_args.num_of_RPR; i++) { + int rand_num = rand() % 100; + if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) + { + int64_t d = winfo->lastTs - rand() % 1000000 + rand_num; + //generateData(data, datatype, ncols_per_record, d, len_of_binary); + (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); } else { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing"); + //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); + (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); } - queryDB(winfo->taos, command); + pstr += sprintf(pstr, "%s", data); + winfo->counter++; + + if (winfo->counter >= winfo->superTblInfo->insertRows) { + break; } } + + if (g_args.insert_interval) { + winfo->st = taosGetTimestampMs(); + } + taos_query_a(winfo->taos, buffer, callBack, winfo); + free(buffer); + free(data); + + taos_free_result(res); +} + +void *asyncWrite(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + + winfo->st = 0; + winfo->et = 0; + winfo->lastTs = winfo->start_time; + + if (g_args.insert_interval) { + winfo->st = taosGetTimestampMs(); + } + taos_query_a(winfo->taos, "show databases", callBack, winfo); + + tsem_wait(&(winfo->lock_sem)); return NULL; } -void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) { - double ts = getCurrentTime(); - printf("create table......\n"); - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - info *infos = malloc(threads * sizeof(info)); +void startMultiThreadInsertData(int threads, char* db_name, char* precision, + SSuperTable* superTblInfo) { - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + pthread_t *pids = malloc(threads * sizeof(pthread_t)); + threadInfo *infos = malloc(threads * sizeof(threadInfo)); + memset(pids, 0, threads * sizeof(pthread_t)); + memset(infos, 0, threads * sizeof(threadInfo)); + + int ntables = 0; + if (superTblInfo) + ntables = superTblInfo->childTblCount; + else + ntables = g_args.num_of_tables; + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + + //TAOS* taos; + //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + // if (NULL == taos) { + // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); + // exit(-1); + // } + //} + + int32_t timePrec = TSDB_TIME_PRECISION_MILLI; + if (0 != precision[0]) { + if (0 == strncasecmp(precision, "ms", 2)) { + timePrec = TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, "us", 2)) { + timePrec = TSDB_TIME_PRECISION_MICRO; + } else { + printf("No support precision: %s\n", precision); + exit(-1); + } } - int b = 0; - if (threads != 0) - b = ntables % threads; + int64_t start_time; + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + start_time = taosGetTimestamp(timePrec); + } else { + taosParseTime( + superTblInfo->startTimestamp, + &start_time, + strlen(superTblInfo->startTimestamp), + timePrec, 0); + } + } else { + start_time = 1500000000000; + } + + double start = getCurrentTime(); + int last = 0; for (int i = 0; i < threads; i++) { - info *t_info = infos + i; + threadInfo *t_info = infos + i; t_info->threadID = i; tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE); - t_info->taos = taos_connect(ip_addr, user, pass, db_name, port); - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->use_metric = use_metric; - t_info->cols = cols; - pthread_create(pids + i, NULL, createTable, t_info); + t_info->superTblInfo = superTblInfo; + + t_info->start_time = start_time; + t_info->minDelay = INT16_MAX; + + if ((NULL == superTblInfo) || + (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { + //t_info->taos = taos; + t_info->taos = taos_connect( + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); + if (NULL == t_info->taos) { + printf("connect to server fail from insert sub thread, reason: %s\n", + taos_errstr(NULL)); + exit(-1); + } + } else { + t_info->taos = NULL; + } + + if ((NULL == superTblInfo) + || (0 == superTblInfo->multiThreadWriteOneTbl)) { + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + } else { + t_info->start_table_id = 0; + t_info->end_table_id = superTblInfo->childTblCount - 1; + t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); + } + + tsem_init(&(t_info->lock_sem), 0, 0); + if (SYNC == g_Dbs.queryMode) { + if (superTblInfo) { + pthread_create(pids + i, NULL, syncWriteWithStb, t_info); + } else { + pthread_create(pids + i, NULL, syncWrite, t_info); + } + } else { + pthread_create(pids + i, NULL, asyncWrite, t_info); + } } for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } - double t = getCurrentTime() - ts; - printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads); + int64_t totalDelay = 0; + int64_t maxDelay = 0; + int64_t minDelay = INT16_MAX; + int64_t cntDelay = 1; + double avgDelay = 0; for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - tsem_destroy(&(t_info->mutex_sem)); + threadInfo *t_info = infos + i; + tsem_destroy(&(t_info->lock_sem)); + taos_close(t_info->taos); + + if (superTblInfo) { + superTblInfo->totalAffectedRows += t_info->totalAffectedRows; + superTblInfo->totalRowsInserted += t_info->totalRowsInserted; + + totalDelay += t_info->totalDelay; + cntDelay += t_info->cntDelay; + if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay; + if (t_info->minDelay < minDelay) minDelay = t_info->minDelay; + } + } + cntDelay -= 1; + + if (cntDelay == 0) cntDelay = 1; + avgDelay = (double)totalDelay / cntDelay; + + double end = getCurrentTime(); + double t = end - start; + + if (superTblInfo) { + printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + t, superTblInfo->totalRowsInserted, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + superTblInfo->totalRowsInserted / t); + fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + t, superTblInfo->totalRowsInserted, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + superTblInfo->totalRowsInserted / t); } + printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n", + avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", + avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + + //taos_close(taos); + free(pids); free(infos); - - return ; } + void *readTable(void *sarg) { - info *rinfo = (info *)sarg; +#if 1 + threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; - int64_t sTime = rinfo->start_time; + uint64_t sTime = rinfo->start_time; char *tb_prefix = rinfo->tb_prefix; FILE *fp = fopen(rinfo->fp, "a"); if (NULL == fp) { printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); return NULL; } - - int num_of_DPT = rinfo->nrecords_per_table; + + int num_of_DPT; + if (rinfo->superTblInfo) { + num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; + } else { + num_of_DPT = g_args.num_of_DPT; + } + int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = rinfo->do_aggreFunc; + bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; if (!do_aggreFunc) { @@ -1140,7 +4722,8 @@ void *readTable(void *sarg) { fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); - exit(EXIT_FAILURE); + fclose(fp); + return NULL; } while (taos_fetch_row(pSql) != NULL) { @@ -1159,13 +4742,14 @@ void *readTable(void *sarg) { printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); } fprintf(fp, "\n"); - fclose(fp); +#endif return NULL; } void *readMetric(void *sarg) { - info *rinfo = (info *)sarg; +#if 1 + threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; FILE *fp = fopen(rinfo->fp, "a"); @@ -1173,11 +4757,11 @@ void *readMetric(void *sarg) { printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); return NULL; } - - int num_of_DPT = rinfo->nrecords_per_table; + + int num_of_DPT = rinfo->superTblInfo->insertRows; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = rinfo->do_aggreFunc; + bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; if (!do_aggreFunc) { @@ -1194,9 +4778,9 @@ void *readMetric(void *sarg) { for (int i = 1; i <= m; i++) { if (i == 1) { - sprintf(tempS, "areaid = %d", i); + sprintf(tempS, "t1 = %d", i); } else { - sprintf(tempS, " or areaid = %d ", i); + sprintf(tempS, " or t1 = %d ", i); } strcat(condition, tempS); @@ -1214,7 +4798,8 @@ void *readMetric(void *sarg) { fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); - exit(1); + fclose(fp); + return NULL; } int count = 0; while (taos_fetch_row(pSql) != NULL) { @@ -1229,333 +4814,918 @@ void *readMetric(void *sarg) { } fprintf(fp, "\n"); } - fclose(fp); +#endif return NULL; } -static int queryDbExec(TAOS *taos, char *command, int type) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - for (i = 0; i < 5; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; +int insertTestProcess() { + + setupForAnsiEscape(); + int ret = printfInsertMeta(); + resetAfterAnsiEscape(); + + if (ret == -1) + exit(EXIT_FAILURE); + + debugPrint("DEBUG - %d result file: %s\n", __LINE__, g_Dbs.resultFile); + g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); + if (NULL == g_fpOfInsertResult) { + fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile); + return -1; + } { + printfInsertMetaToFile(g_fpOfInsertResult); + } + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + init_rand_data(); + + // create database and super tables + if(createDatabases() != 0) { + fclose(g_fpOfInsertResult); + return -1; + } + + // pretreatement + prePareSampleData(); + + double start; + double end; + + // create child tables + start = getCurrentTime(); + createChildTables(); + end = getCurrentTime(); + + if (g_totalChildTables > 0) { + printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", + end - start, g_totalChildTables, g_Dbs.threadCount); + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", + end - start, g_totalChildTables, g_Dbs.threadCount); + } + + taosMsleep(1000); + // create sub threads for inserting data + //start = getCurrentTime(); + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].superTblCount > 0) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + if (0 == g_Dbs.db[i].superTbls[j].insertRows) { + continue; + } + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + superTblInfo); + } + } else { + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + NULL); + } + } + //end = getCurrentTime(); + + //int64_t totalRowsInserted = 0; + //int64_t totalAffectedRows = 0; + //for (int i = 0; i < g_Dbs.dbCount; i++) { + // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted; + // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; + //} + //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount); + postFreeResource(); + + return 0; +} + +void *superQueryProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + + //char sqlStr[MAX_TB_NAME_SIZE*2]; + //sprintf(sqlStr, "use %s", g_queryInfo.dbName); + //queryDB(winfo->taos, sqlStr); + + int64_t st = 0; + int64_t et = 0; + while (1) { + if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } + + st = taosGetTimestampMs(); + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + int64_t t1 = taosGetTimestampUs(); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } else { + int64_t t1 = taosGetTimestampUs(); + int retCode = postProceSql(g_queryInfo.host, + g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i]); + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", winfo->threadID); + return NULL; + } + } + } + et = taosGetTimestampMs(); + printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", + taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } + return NULL; +} + +void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { + char sourceString[32] = "xxxx"; + char subTblName[MAX_TB_NAME_SIZE*3]; + sprintf(subTblName, "%s.%s", + g_queryInfo.dbName, + g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); + + //printf("inSql: %s\n", inSql); + + char* pos = strstr(inSql, sourceString); + if (0 == pos) { + return; + } + + tstrncpy(outSql, inSql, pos - inSql + 1); + //printf("1: %s\n", outSql); + strcat(outSql, subTblName); + //printf("2: %s\n", outSql); + strcat(outSql, pos+strlen(sourceString)); + //printf("3: %s\n", outSql); +} + +void *subQueryProcess(void *sarg) { + char sqlstr[1024]; + threadInfo *winfo = (threadInfo *)sarg; + int64_t st = 0; + int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000; + while (1) { + if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } + + st = taosGetTimestampMs(); + for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { + for (int j = 0; j < g_queryInfo.subQueryInfo.sqlCount; j++) { + memset(sqlstr,0,sizeof(sqlstr)); + replaceSubTblName(g_queryInfo.subQueryInfo.sql[j], sqlstr, i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.subQueryInfo.result[i], + winfo->threadID); + } + selectAndGetResult(winfo->taos, sqlstr, tmpFile); + } + } + et = taosGetTimestampMs(); + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", + taosGetSelfPthreadId(), + winfo->start_table_id, + winfo->end_table_id, + (double)(et - st)/1000.0); + } + return NULL; +} + +static int queryTestProcess() { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + exit(-1); + } + + if (0 != g_queryInfo.subQueryInfo.sqlCount) { + (void)getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.subQueryInfo.sTblName, + &g_queryInfo.subQueryInfo.childTblName, + &g_queryInfo.subQueryInfo.childTblCount); + } + + printfQueryMeta(); + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + printfQuerySystemInfo(taos); + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from specify table + if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { + + pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); } - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + t_info->taos = taos; + + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + debugPrint("DEBUG %s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE); + } else { + t_info->taos = NULL; + } + + pthread_create(pids + i, NULL, superQueryProcess, t_info); + } + }else { + g_queryInfo.superQueryInfo.concurrent = 0; + } + + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + //==== create sub threads for query from all sub table of the super table + if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); + infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); + } + + int ntables = g_queryInfo.subQueryInfo.childTblCount; + int threads = g_queryInfo.subQueryInfo.threadCnt; + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infosOfSub + i; + t_info->threadID = i; + + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + t_info->taos = taos; + pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); + } + + g_queryInfo.subQueryInfo.threadCnt = threads; + }else { + g_queryInfo.subQueryInfo.threadCnt = 0; + } + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + pthread_join(pids[i], NULL); } - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); - taos_free_result(res); - //taos_close(taos); - return -1; + tmfree((char*)pids); + tmfree((char*)infos); + + for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); } - if (1 == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); + + taos_close(taos); + return 0; +} + +static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + if (res == NULL || taos_errno(res) != 0) { + printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res)); + return; } + getResult(res, (char*)param); taos_free_result(res); - return 0; } -void queryDB(TAOS *taos, char *command) { - int i; - TAOS_RES *pSql = NULL; - int32_t code = -1; +static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { + TAOS_SUB* tsub = NULL; - for (i = 0; i < 5; i++) { - if (NULL != pSql) { - taos_free_result(pSql); - pSql = NULL; + if (g_queryInfo.superQueryInfo.subscribeMode) { + tsub = taos_subscribe(taos, + g_queryInfo.superQueryInfo.subscribeRestart, + topic, sql, subscribe_callback, (void*)resultFileName, + g_queryInfo.superQueryInfo.subscribeInterval); + } else { + tsub = taos_subscribe(taos, + g_queryInfo.superQueryInfo.subscribeRestart, + topic, sql, NULL, NULL, 0); + } + + if (tsub == NULL) { + printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + return NULL; + } + + return tsub; +} + +void *subSubscribeProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + char subSqlstr[1024]; + + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + debugPrint("DEBUG %s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ + return NULL; + } + + //int64_t st = 0; + //int64_t et = 0; + do { + //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + //} + + //st = taosGetTimestampMs(); + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + sprintf(topic, "taosdemo-subscribe-%d", i); + memset(subSqlstr,0,sizeof(subSqlstr)); + replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { + return NULL; + } + } + //et = taosGetTimestampMs(); + //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } while (0); + + // start loop to consume result + TAOS_RES* res = NULL; + while (1) { + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + if (1 == g_queryInfo.subQueryInfo.subscribeMode) { + continue; + } + + res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.subQueryInfo.result[i], + winfo->threadID); + } + getResult(res, tmpFile); + } + } + } + taos_free_result(res); + + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], + g_queryInfo.subQueryInfo.subscribeKeepProgress); + } + return NULL; +} + +void *superSubscribeProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + debugPrint("DEBUG %s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { + return NULL; + } + + //int64_t st = 0; + //int64_t et = 0; + do { + //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + //} + + //st = taosGetTimestampMs(); + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + sprintf(topic, "taosdemo-subscribe-%d", i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.superQueryInfo.tsub[i] = + subscribeImpl(winfo->taos, + g_queryInfo.superQueryInfo.sql[i], + topic, tmpFile); + if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { + return NULL; + } + } + //et = taosGetTimestampMs(); + //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } while (0); + + // start loop to consume result + TAOS_RES* res = NULL; + while (1) { + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + if (1 == g_queryInfo.superQueryInfo.subscribeMode) { + continue; + } + + res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); + } + } + } + taos_free_result(res); + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], + g_queryInfo.superQueryInfo.subscribeKeepProgress); + } + return NULL; +} + +static int subscribeTestProcess() { + printfQueryMeta(); + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + exit(-1); + } + + if (0 != g_queryInfo.subQueryInfo.sqlCount) { + (void)getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.subQueryInfo.sTblName, + &g_queryInfo.subQueryInfo.childTblName, + &g_queryInfo.subQueryInfo.childTblCount); + } + + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from super table + if (g_queryInfo.superQueryInfo.sqlCount > 0 + && g_queryInfo.superQueryInfo.concurrent > 0) { + pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); } - pSql = taos_query(taos, command); - code = taos_errno(pSql); - if (0 == code) { - break; - } + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + t_info->taos = taos; + pthread_create(pids + i, NULL, superSubscribeProcess, t_info); + } + } + + //==== create sub threads for query from sub table + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + if ((g_queryInfo.subQueryInfo.sqlCount > 0) + && (g_queryInfo.subQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * + sizeof(pthread_t)); + infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * + sizeof(threadInfo)); + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); + } + + int ntables = g_queryInfo.subQueryInfo.childTblCount; + int threads = g_queryInfo.subQueryInfo.threadCnt; + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infosOfSub + i; + t_info->threadID = i; + + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + t_info->taos = taos; + pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); + } + g_queryInfo.subQueryInfo.threadCnt = threads; } + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + pthread_join(pids[i], NULL); + } - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - exit(EXIT_FAILURE); + tmfree((char*)pids); + tmfree((char*)infos); + + for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); } - taos_free_result(pSql); + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); + taos_close(taos); + return 0; } -// sync insertion -void *syncWrite(void *sarg) { - info *winfo = (info *)sarg; - char buffer[BUFFER_SIZE] = "\0"; - char data[MAX_DATA_SIZE]; - char **data_type = winfo->datatype; - int len_of_binary = winfo->len_of_binary; - int ncols_per_record = winfo->ncols_per_record; - srand((uint32_t)time(NULL)); - int64_t time_counter = winfo->start_time; - for (int i = 0; i < winfo->nrecords_per_table;) { - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { - int inserted = i; - int64_t tmp_time = time_counter; +void initOfInsertMeta() { + memset(&g_Dbs, 0, sizeof(SDbs)); + + // set default values + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + g_Dbs.port = 6030; + tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); + g_Dbs.threadCount = 2; + g_Dbs.use_metric = true; +} + +void initOfQueryMeta() { + memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); + + // set default values + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + g_queryInfo.port = 6030; + tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); +} + +void setParaFromArg(){ + if (g_args.host) { + strcpy(g_Dbs.host, g_args.host); + } else { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } + + if (g_args.user) { + strcpy(g_Dbs.user, g_args.user); + } + + if (g_args.password) { + strcpy(g_Dbs.password, g_args.password); + } + + if (g_args.port) { + g_Dbs.port = g_args.port; + } + + g_Dbs.threadCount = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + + g_Dbs.dbCount = 1; + g_Dbs.db[0].drop = 1; - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, tID); - int k; - for (k = 0; k < winfo->nrecords_per_request;) { - int rand_num = rand() % 100; - int len = -1; - if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) { - int64_t d = tmp_time - rand() % 1000000 + rand_num; - len = generateData(data, data_type, ncols_per_record, d, len_of_binary); - } else { - len = generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary); - } + tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); + g_Dbs.db[0].dbCfg.replica = g_args.replica; + tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - //assert(len + pstr - buffer < BUFFER_SIZE); - if (len + pstr - buffer >= BUFFER_SIZE) { // too long - break; - } + tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - pstr += sprintf(pstr, " %s", data); - inserted++; - k++; + g_Dbs.use_metric = g_args.use_metric; + g_Dbs.insert_only = g_args.insert_only; - if (inserted >= winfo->nrecords_per_table) break; - } + g_Dbs.do_aggreFunc = true; - /* puts(buffer); */ - int64_t startTs; - int64_t endTs; - startTs = taosGetTimestampUs(); - //queryDB(winfo->taos, buffer); - int affectedRows = queryDbExec(winfo->taos, buffer, 1); - - if (0 <= affectedRows){ - endTs = taosGetTimestampUs(); - int64_t delay = endTs - startTs; - if (delay > winfo->maxDelay) winfo->maxDelay = delay; - if (delay < winfo->minDelay) winfo->minDelay = delay; - winfo->cntDelay++; - winfo->totalDelay += delay; - //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; - } + char dataString[STRING_LEN]; + char **data_type = g_args.datatype; + + memset(dataString, 0, STRING_LEN); - if (tID == winfo->end_table_id) { - i = inserted; - time_counter = tmp_time; + if (strcasecmp(data_type[0], "BINARY") == 0 + || strcasecmp(data_type[0], "BOOL") == 0 + || strcasecmp(data_type[0], "NCHAR") == 0 ) { + g_Dbs.do_aggreFunc = false; + } + + if (g_args.use_metric) { + g_Dbs.db[0].superTblCount = 1; + tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; + g_Dbs.threadCount = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = 1; + g_Dbs.queryMode = g_args.mode; + + g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; + g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; + g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; + tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, + g_args.tb_prefix, MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, + "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].timeStampStep = 10; + + g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; + g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; + + g_Dbs.db[0].superTbls[0].columnCount = 0; + for (int i = 0; i < MAX_NUM_DATATYPE; i++) { + if (data_type[i] == NULL) { + break; + } + + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + data_type[i], MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].columnCount++; + } + + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + } else { + for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; + g_Dbs.db[0].superTbls[0].columnCount++; } } + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].tagCount = 2; + } else { + g_Dbs.threadCountByCreateTbl = 1; + g_Dbs.db[0].superTbls[0].tagCount = 0; } - return NULL; + } -void *asyncWrite(void *sarg) { - info *winfo = (info *)sarg; - taos_query_a(winfo->taos, "show databases", callBack, winfo); +/* Function to do regular expression check */ +static int regexMatch(const char *s, const char *reg, int cflags) { + regex_t regex; + char msgbuf[100] = {0}; - tsem_wait(&(winfo->lock_sem)); + /* Compile regular expression */ + if (regcomp(®ex, reg, cflags) != 0) { + printf("Fail to compile regex\n"); + exit(-1); + } - return NULL; + /* Execute regular expression */ + int reti = regexec(®ex, s, 0, NULL, 0); + if (!reti) { + regfree(®ex); + return 1; + } else if (reti == REG_NOMATCH) { + regfree(®ex); + return 0; + } else { + regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); + printf("Regex match failed: %s\n", msgbuf); + regfree(®ex); + exit(-1); + } + + return 0; } -void callBack(void *param, TAOS_RES *res, int code) { - info* winfo = (info*)param; - char **datatype = winfo->datatype; - int ncols_per_record = winfo->ncols_per_record; - int len_of_binary = winfo->len_of_binary; +static int isCommentLine(char *line) { + if (line == NULL) return 1; - int64_t tmp_time = winfo->start_time; - char *buffer = calloc(1, BUFFER_SIZE); - char *data = calloc(1, MAX_DATA_SIZE); - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); - if (winfo->counter >= winfo->nrecords_per_table) { - winfo->start_table_id++; - winfo->counter = 0; - } - if (winfo->start_table_id > winfo->end_table_id) { - tsem_post(&winfo->lock_sem); - free(buffer); - free(data); - taos_free_result(res); + return regexMatch(line, "^\\s*#.*", REG_EXTENDED); +} + +void querySqlFile(TAOS* taos, char* sqlFile) +{ + FILE *fp = fopen(sqlFile, "r"); + if (fp == NULL) { + printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); return; } - - for (int i = 0; i < winfo->nrecords_per_request; i++) { - int rand_num = rand() % 100; - if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) - { - int64_t d = tmp_time - rand() % 1000000 + rand_num; - generateData(data, datatype, ncols_per_record, d, len_of_binary); - } else - { - generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); - } - pstr += sprintf(pstr, "%s", data); - winfo->counter++; - if (winfo->counter >= winfo->nrecords_per_table) { - break; + int read_len = 0; + char * cmd = calloc(1, MAX_SQL_SIZE); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + double t = getCurrentTime(); + + while ((read_len = tgetline(&line, &line_len, fp)) != -1) { + if (read_len >= MAX_SQL_SIZE) continue; + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; } - } - taos_query_a(winfo->taos, buffer, callBack, winfo); - free(buffer); - free(data); - taos_free_result(res); -} + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } -double getCurrentTime() { - struct timeval tv; - if (gettimeofday(&tv, NULL) != 0) { - perror("Failed to get current time in ms"); - exit(EXIT_FAILURE); + memcpy(cmd + cmd_len, line, read_len); + debugPrint("DEBUG %s() LN%d cmd: %s\n", __func__, __LINE__, cmd); + queryDbExec(taos, cmd, NO_INSERT_TYPE); + memset(cmd, 0, MAX_SQL_SIZE); + cmd_len = 0; } - return tv.tv_sec + tv.tv_usec / 1E6; + t = getCurrentTime() - t; + printf("run %s took %.6f second(s)\n\n", sqlFile, t); + + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; } -int32_t randint[MAX_PREPARED_RAND]; -int64_t randbigint[MAX_PREPARED_RAND]; -float randfloat[MAX_PREPARED_RAND]; -double randdouble[MAX_PREPARED_RAND]; +static void testMetaFile() { + if (INSERT_MODE == g_args.test_mode) { + if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + insertTestProcess(); + } else if (QUERY_MODE == g_args.test_mode) { + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + queryTestProcess(); + } else if (SUBSCRIBE_MODE == g_args.test_mode) { + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + subscribeTestProcess(); + } else { + ; + } +} -int32_t rand_tinyint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 128; +static void testCmdLine() { -} + g_args.test_mode = INSERT_MODE; + insertTestProcess(); -int32_t rand_smallint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 32767; -} + if (g_Dbs.insert_only) + return; -int32_t rand_int(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor]; -} + // select + if (false == g_Dbs.insert_only) { + // query data -int64_t rand_bigint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randbigint[cursor]; - -} + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_id = 0; -float rand_float(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; - -} + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + if (g_args.use_metric) { + rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + strcpy(rInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix); + } else { + rInfo->end_table_id = g_args.num_of_tables -1; + strcpy(rInfo->tb_prefix, g_args.tb_prefix); + } -double rand_double() { - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randdouble[cursor]; + rInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (rInfo->taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + free(rInfo); + exit(-1); + } -} + strcpy(rInfo->fp, g_Dbs.resultFile); -void init_rand_data(){ - for (int i = 0; i < MAX_PREPARED_RAND; i++){ - randint[i] = (int)(rand() % 10); - randbigint[i] = (int64_t)(rand() % 2147483648); - randfloat[i] = (float)(rand() / 1000.0); - randdouble[i] = (double)(rand() / 1000000.0); - } + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + free(rInfo); + } } -int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) { - memset(res, 0, MAX_DATA_SIZE); - char *pstr = res; - pstr += sprintf(pstr, "(%" PRId64, timestamp); - int c = 0; +int main(int argc, char *argv[]) { + parse_args(argc, argv, &g_args); - for (; c < MAX_NUM_DATATYPE; c++) { - if (data_type[c] == NULL) { - break; - } - } + debugPrint("DEBUG - meta file: %s\n", g_args.metaFile); - if (0 == c) { - perror("data type error!"); - exit(-1); - } + if (g_args.metaFile) { + initOfInsertMeta(); + initOfQueryMeta(); - for (int i = 0; i < num_of_cols; i++) { - if (strcasecmp(data_type[i % c], "tinyint") == 0) { - pstr += sprintf(pstr, ", %d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % c], "smallint") == 0) { - pstr += sprintf(pstr, ", %d", rand_smallint()); - } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); - } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % c], "float") == 0) { - pstr += sprintf(pstr, ", %10.4f", rand_float()); - } else if (strcasecmp(data_type[i % c], "double") == 0) { - double t = rand_double(); - pstr += sprintf(pstr, ", %20.8f", t); - } else if (strcasecmp(data_type[i % c], "bool") == 0) { - bool b = rand() & 1; - pstr += sprintf(pstr, ", %s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % c], "binary") == 0) { - char *s = malloc(len_of_binary); - rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", \"%s\"", s); - free(s); - }else if (strcasecmp(data_type[i % c], "nchar") == 0) { - char *s = malloc(len_of_binary); - rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", \"%s\"", s); - free(s); + if (false == getInfoFromJsonFile(g_args.metaFile)) { + printf("Failed to read %s\n", g_args.metaFile); + return 1; } - if (pstr - res > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + testMetaFile(); + } else { + memset(&g_Dbs, 0, sizeof(SDbs)); + setParaFromArg(); + + if (NULL != g_args.sqlFile) { + TAOS* qtaos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + querySqlFile(qtaos, g_args.sqlFile); + taos_close(qtaos); + + } else { + testCmdLine(); } } - pstr += sprintf(pstr, ")"); - - return (int32_t)(pstr - res); + return 0; } -static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890"; -void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - --size; - int n; - for (n = 0; n < size; n++) { - int key = rand() % (int)(sizeof charset - 1); - str[n] = charset[key]; - } - str[n] = 0; - } -} diff --git a/src/kit/taosdemox/CMakeLists.txt b/src/kit/taosdemox/CMakeLists.txt deleted file mode 100644 index 3993cb0feb749d4bb2d762f203baeb920f8db495..0000000000000000000000000000000000000000 --- a/src/kit/taosdemox/CMakeLists.txt +++ /dev/null @@ -1,47 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) -PROJECT(TDengine) - -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include) - -IF (TD_LINUX) - AUX_SOURCE_DIRECTORY(. SRC) - ADD_EXECUTABLE(taosdemox ${SRC}) - - #find_program(HAVE_CURL NAMES curl) - IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) - ADD_DEFINITIONS(-DTD_LOWA_CURL) - LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) - ADD_LIBRARY(curl STATIC IMPORTED) - SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) - TARGET_LINK_LIBRARIES(taosdemox curl) - ENDIF () - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemox taos_static cJson) - ELSE () - TARGET_LINK_LIBRARIES(taosdemox taos cJson) - ENDIF () -ENDIF () - -IF (TD_DARWIN) - # missing a few dependencies, such as - # AUX_SOURCE_DIRECTORY(. SRC) - # ADD_EXECUTABLE(taosdemox ${SRC}) - # - # #find_program(HAVE_CURL NAMES curl) - # IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) - # ADD_DEFINITIONS(-DTD_LOWA_CURL) - # LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) - # ADD_LIBRARY(curl STATIC IMPORTED) - # SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) - # TARGET_LINK_LIBRARIES(taosdemox curl) - # ENDIF () - # - # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdemox taos_static cJson) - # ELSE () - # TARGET_LINK_LIBRARIES(taosdemox taos cJson) - # ENDIF () -ENDIF () - diff --git a/src/kit/taosdemox/query.json b/src/kit/taosdemox/query.json deleted file mode 100644 index b7b08edfc912bdccc12bc6b6672d62a8ee4ad417..0000000000000000000000000000000000000000 --- a/src/kit/taosdemox/query.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "filetype":"query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "db01", - "specified_table_query": - {"query_interval":1, "concurrent":1, - "sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}] - }, - "super_table_query": - {"stblname": "stb01", "query_interval":1, "threads":1, - "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] - } -} diff --git a/src/kit/taosdemox/taosdemox.c b/src/kit/taosdemox/taosdemox.c deleted file mode 100644 index 40ca2323275725f0a496a702f9e0a14e221d0f74..0000000000000000000000000000000000000000 --- a/src/kit/taosdemox/taosdemox.c +++ /dev/null @@ -1,5069 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - - -/* - when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. -*/ - -#define _GNU_SOURCE -#define CURL_STATICLIB - -#ifdef TD_LOWA_CURL -#include "curl/curl.h" -#endif - -#ifdef LINUX - #include "os.h" - #include "cJSON.h" - #include - #include - #include - #ifndef _ALPINE - #include - #endif - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include -#else - #include - #include - #include - #include "os.h" - - #pragma comment ( lib, "libcurl.lib" ) - #pragma comment ( lib, "ws2_32.lib" ) - #pragma comment ( lib, "winmm.lib" ) - #pragma comment ( lib, "wldap32.lib" ) -#endif - -#include "taos.h" -#include "tutil.h" - -extern char configDir[]; - -#define INSERT_JSON_NAME "insert.json" -#define QUERY_JSON_NAME "query.json" -#define SUBSCRIBE_JSON_NAME "subscribe.json" - -#define INSERT_MODE 0 -#define QUERY_MODE 1 -#define SUBSCRIBE_MODE 2 - -#define MAX_SQL_SIZE 65536 -#define BUFFER_SIZE (65536*2) -#define MAX_DB_NAME_SIZE 64 -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 -#define MAX_NUM_DATATYPE 10 -#define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 -#define MAX_PREPARED_RAND 1000000 -//#define MAX_SQL_SIZE 65536 -#define MAX_FILE_NAME_LEN 256 - -#define MAX_SAMPLES_ONCE_FROM_FILE 10000 -#define MAX_NUM_DATATYPE 10 - -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 8 -#define MAX_COLUMN_COUNT 1024 -#define MAX_TAG_COUNT 128 - -#define MAX_QUERY_SQL_COUNT 10 -#define MAX_QUERY_SQL_LENGTH 256 - -#define MAX_DATABASE_COUNT 256 - -typedef enum CREATE_SUB_TALBE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL -} CREATE_SUB_TALBE_MOD_EN; - -typedef enum TALBE_EXISTS_EN { - TBL_ALREADY_EXISTS, - TBL_NO_EXISTS, - TBL_EXISTS_BUTT -} TALBE_EXISTS_EN; - -enum MODE { - SYNC, - ASYNC, - MODE_BUT -}; - -enum QUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT -} ; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- -enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES -}; -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[128]; -} SColDes; - -/* Used by main to communicate with parse_opt. */ -typedef struct SArguments_S { - char * metaFile; - char * host; - uint16_t port; - char * user; - char * password; - char * database; - int replica; - char * tb_prefix; - char * sqlFile; - bool use_metric; - bool insert_only; - char * output_file; - int mode; - char * datatype[MAX_NUM_DATATYPE + 1]; - int len_of_binary; - int num_of_CPR; - int num_of_threads; - int num_of_RPR; - int num_of_tables; - int num_of_DPT; - int abort; - int disorderRatio; - int disorderRange; - int method_of_delete; - char ** arg_list; -} SArguments; - -typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; - int dataLen; - char note[128]; -} StrColumn; - -typedef struct SSuperTable_S { - char sTblName[MAX_TB_NAME_SIZE]; - int childTblCount; - bool superTblExists; // 0: no, 1: yes - bool childTblExists; // 0: no, 1: yes - int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - char childTblPrefix[MAX_TB_NAME_SIZE]; - char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample - char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful - int insertRate; // 0: unlimit > 0 rows/s - - int multiThreadWriteOneTbl; // 0: no, 1: yes - int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl - int rowsPerTbl; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms or us by database precision - int maxSqlLen; // - - int64_t insertRows; // 0: no limit - int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // - char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - - int columnCount; - StrColumn columns[MAX_COLUMN_COUNT]; - int tagCount; - StrColumn tags[MAX_TAG_COUNT]; - - char* childTblName; - char* colsOfCreatChildTable; - int lenOfOneRow; - int lenOfTagOfOneRow; - - char* sampleDataBuf; - int sampleDataBufSize; - //int sampleRowCount; - //int sampleUsePos; - - int tagSource; // 0: rand, 1: tag sample - char* tagDataBuf; - int tagSampleCount; - int tagUsePos; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} SSuperTable; - -typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; - char create_time[32]; - int32_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[32]; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[8]; // time resolution - int8_t update; - char status[16]; -} SDbInfo; - -typedef struct SDbCfg_S { -// int maxtablesPerVnode; - int minRows; - int maxRows; - int comp; - int walLevel; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[MAX_TB_NAME_SIZE]; -} SDbCfg; - -typedef struct SDataBase_S { - char dbName[MAX_DB_NAME_SIZE]; - int drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - int superTblCount; - SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; -} SDataBase; - -typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_DB_NAME_SIZE]; - uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; - char resultFile[MAX_FILE_NAME_LEN]; - bool use_metric; - bool insert_only; - bool do_aggreFunc; - bool queryMode; - - int threadCount; - int threadCountByCreateTbl; - int dbCount; - SDataBase db[MAX_DB_COUNT]; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} SDbs; - -typedef struct SuperQueryInfo_S { - int rate; // 0: unlimit > 0 loop/s - int concurrent; - int sqlCount; - int subscribeMode; // 0: sync, 1: async - int subscribeInterval; // ms - int subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; -} SuperQueryInfo; - -typedef struct SubQueryInfo_S { - char sTblName[MAX_TB_NAME_SIZE]; - int rate; // 0: unlimit > 0 loop/s - int threadCnt; - int subscribeMode; // 0: sync, 1: async - int subscribeInterval; // ms - int subscribeRestart; - int subscribeKeepProgress; - int childTblCount; - char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - - char* childTblName; -} SubQueryInfo; - -typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_DB_NAME_SIZE]; - uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; - char dbName[MAX_DB_NAME_SIZE]; - char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful - - SuperQueryInfo superQueryInfo; - SubQueryInfo subQueryInfo; -} SQueryMetaInfo; - -typedef struct SThreadInfo_S { - TAOS *taos; - #ifdef TD_LOWA_CURL - CURL *curl_handle; - #endif - int threadID; - char db_name[MAX_DB_NAME_SIZE]; - char fp[4096]; - char tb_prefix[MAX_TB_NAME_SIZE]; - int start_table_id; - int end_table_id; - int data_of_rate; - int64_t start_time; - char* cols; - bool use_metric; - SSuperTable* superTblInfo; - - // for async insert - tsem_t lock_sem; - int64_t counter; - int64_t st; - int64_t et; - int64_t lastTs; - int nrecords_per_request; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} threadInfo; - -typedef struct curlMemInfo_S { - char *buf; - size_t sizeleft; - } curlMemInfo; - - - -#ifdef LINUX - /* The options we understand. */ - static struct argp_option options[] = { - {0, 'f', "meta file", 0, "The meta data to the execution procedure, if use -f, all others options invalid. Default is NULL.", 0}, - #ifdef _TD_POWER_ - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 1}, - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 2}, - #else - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 1}, - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 2}, - #endif - {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 2}, - {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 2}, - {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2}, - {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, - {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 4}, - {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 4}, - {0, 's', "sql file", 0, "The select sql file.", 6}, - {0, 'M', 0, 0, "Use metric flag.", 4}, - {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 6}, - {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 4}, - {0, 'b', "type_of_cols", 0, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.", 4}, - {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4}, - {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4}, - {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4}, - // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, - {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4}, - {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4}, - {0, 'x', 0, 0, "Not insert only flag.", 4}, - {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4}, - {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4}, - //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, - {0}}; - -/* Parse a single option. */ -static error_t parse_opt(int key, char *arg, struct argp_state *state) { - // Get the input argument from argp_parse, which we know is a pointer to our arguments structure. - SArguments *arguments = state->input; - wordexp_t full_path; - char **sptr; - switch (key) { - case 'f': - arguments->metaFile = arg; - break; - case 'h': - arguments->host = arg; - break; - case 'p': - arguments->port = atoi(arg); - break; - case 'u': - arguments->user = arg; - break; - case 'P': - arguments->password = arg; - break; - case 'o': - arguments->output_file = arg; - break; - case 's': - arguments->sqlFile = arg; - break; - case 'q': - arguments->mode = atoi(arg); - break; - case 'T': - arguments->num_of_threads = atoi(arg); - break; - //case 'r': - // arguments->num_of_RPR = atoi(arg); - // break; - case 't': - arguments->num_of_tables = atoi(arg); - break; - case 'n': - arguments->num_of_DPT = atoi(arg); - break; - case 'd': - arguments->database = arg; - break; - case 'l': - arguments->num_of_CPR = atoi(arg); - break; - case 'b': - sptr = arguments->datatype; - if (strstr(arg, ",") == NULL) { - if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && - strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && - strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "TIMESTAMP") != 0 && - strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && - strcasecmp(arg, "BINARY") != 0 && strcasecmp(arg, "NCHAR") != 0) { - argp_error(state, "Invalid data_type!"); - } - sptr[0] = arg; - } else { - int index = 0; - char *dupstr = strdup(arg); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "TIMESTAMP") != 0 && - strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "DOUBLE") != 0 && - strcasecmp(token, "BINARY") != 0 && strcasecmp(token, "NCHAR") != 0) { - argp_error(state, "Invalid data_type!"); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - break; - case 'w': - arguments->len_of_binary = atoi(arg); - break; - case 'm': - arguments->tb_prefix = arg; - break; - case 'M': - arguments->use_metric = true; - break; - case 'x': - arguments->insert_only = false; - break; - case 'c': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - break; - case 'O': - arguments->disorderRatio = atoi(arg); - if (arguments->disorderRatio < 0 || arguments->disorderRatio > 100) - { - argp_error(state, "Invalid disorder ratio, should 1 ~ 100!"); - } - break; - case 'R': - arguments->disorderRange = atoi(arg); - break; - case 'a': - arguments->replica = atoi(arg); - if (arguments->replica > 3 || arguments->replica < 1) - { - arguments->replica = 1; - } - break; - //case 'D': - // arguments->method_of_delete = atoi(arg); - // break; - case OPT_ABORT: - arguments->abort = 1; - break; - case ARGP_KEY_ARG: - /*arguments->arg_list = &state->argv[state->next-1]; - state->next = state->argc;*/ - argp_usage(state); - break; - - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} - -static struct argp argp = {options, parse_opt, 0, 0}; - -void parse_args(int argc, char *argv[], SArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, arguments); - if (arguments->abort) { - #ifndef _ALPINE - error(10, 0, "ABORTED"); - #else - abort(); - #endif - } -} - -#else - void printHelp() { - char indent[10] = " "; - printf("%s%s\n", indent, "-f"); - printf("%s%s%s\n", indent, indent, "The meta file to the execution procedure. Default is './meta.json'."); - printf("%s%s\n", indent, "-c"); - printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); - } - - void parse_args(int argc, char *argv[], SArguments *arguments) { - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-f") == 0) { - arguments->metaFile = argv[++i]; - } else if (strcmp(argv[i], "-c") == 0) { - strcpy(configDir, argv[++i]); - } else if (strcmp(argv[i], "--help") == 0) { - printHelp(); - exit(EXIT_FAILURE); - } else { - fprintf(stderr, "wrong options\n"); - printHelp(); - exit(EXIT_FAILURE); - } - } - } -#endif - -static bool getInfoFromJsonFile(char* file); -//static int generateOneRowDataForStb(SSuperTable* stbInfo); -//static int getDataIntoMemForStb(SSuperTable* stbInfo); -static void init_rand_data(); -static int createDatabases(); -static void createChildTables(); -static int queryDbExec(TAOS *taos, char *command, int type); - -/* ************ Global variables ************ */ - -int32_t randint[MAX_PREPARED_RAND]; -int64_t randbigint[MAX_PREPARED_RAND]; -float randfloat[MAX_PREPARED_RAND]; -double randdouble[MAX_PREPARED_RAND]; -char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; - -SArguments g_args = {NULL, - "127.0.0.1", // host - 6030, // port - "root", // user - #ifdef _TD_POWER_ - "powerdb", // password - #else - "taosdata", // password - #endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, // sqlFile - false, // use_metric - true, // insert_only - "./output.txt", // output_file - 0, // mode : sync or async - { - "TINYINT", // datatype - "SMALLINT", - "INT", - "BIGINT", - "FLOAT", - "DOUBLE", - "BINARY", - "NCHAR", - "BOOL", - "TIMESTAMP" - }, - 16, // len_of_binary - 10, // num_of_CPR - 10, // num_of_connections/thread - 100, // num_of_RPR - 10000, // num_of_tables - 10000, // num_of_DPT - 0, // abort - 0, // disorderRatio - 1000, // disorderRange - 1, // method_of_delete - NULL // arg_list -}; - - -static int g_jsonType = 0; -static SDbs g_Dbs; -static int g_totalChildTables = 0; -static SQueryMetaInfo g_queryInfo; -static FILE * g_fpOfInsertResult = NULL; - - -void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } -} - -void tmfree(char *buf) { - if (NULL != buf) { - free(buf); - } -} - -static int queryDbExec(TAOS *taos, char *command, int type) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - - for (i = 0; i < 5; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; - } - - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } - } - - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); - taos_free_result(res); - //taos_close(taos); - return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; -} - -static void getResult(TAOS_RES *res, char* resultFileName) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - FILE *fp = NULL; - if (resultFileName[0] != 0) { - fp = fopen(resultFileName, "at"); - if (fp == NULL) { - fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName); - } - } - - char* databuf = (char*) calloc(1, 100*1024*1024); - if (databuf == NULL) { - fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n"); - return ; - } - - int totalLen = 0; - char temp[16000]; - - // fetch the records row by row - while ((row = taos_fetch_row(res))) { - if (totalLen >= 100*1024*1024 - 32000) { - if (fp) fprintf(fp, "%s", databuf); - totalLen = 0; - memset(databuf, 0, 100*1024*1024); - } - num_rows++; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - //printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - } - - if (fp) fprintf(fp, "%s", databuf); - tmfclose(fp); - free(databuf); -} - -static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { - TAOS_RES *res = taos_query(taos, command); - if (res == NULL || taos_errno(res) != 0) { - printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); - taos_free_result(res); - return; - } - - getResult(res, resultFileName); - taos_free_result(res); -} - -double getCurrentTime() { - struct timeval tv; - if (gettimeofday(&tv, NULL) != 0) { - perror("Failed to get current time in ms"); - return 0.0; - } - - return tv.tv_sec + tv.tv_usec / 1E6; -} - -static int32_t rand_bool(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 2; -} - -static int32_t rand_tinyint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 128; -} - -static int32_t rand_smallint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 32767; -} - -static int32_t rand_int(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor]; -} - -static int64_t rand_bigint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randbigint[cursor]; - -} - -static float rand_float(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; -} - -static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; -void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = rand_tinyint() % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } -} - -static double rand_double() { - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randdouble[cursor]; - -} - -static void init_rand_data() { - for (int i = 0; i < MAX_PREPARED_RAND; i++){ - randint[i] = (int)(rand() % 65535); - randbigint[i] = (int64_t)(rand() % 2147483648); - randfloat[i] = (float)(rand() / 1000.0); - randdouble[i] = (double)(rand() / 1000000.0); - } -} - -static void printfInsertMeta() { - printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n"); - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); - }else { - printf(" drop: \033[33myes\033[0m\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); - } else { - printf(" precision error: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); - exit(EXIT_FAILURE); - } - } - - printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%d\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", "error"); - } - - printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); - printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); - printf(" insertRate: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].insertRate); - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); - - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); - } - printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); - printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); - printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - - printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile); - printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); - - printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { - printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - printf("column[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - printf("\n"); - - printf(" tagCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - printf("\n"); - } - printf("\n"); - } - printf("\033[1m\033[40;32m================ insert.json parse result END================\033[0m\n"); -} - -static void printfInsertMetaToFile(FILE* fp) { - fprintf(fp, "================ insert.json parse result START================\n"); - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "password: %s\n", g_Dbs.password); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - }else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); - fprintf(fp, " insertRate: %d\n", g_Dbs.db[i].superTbls[j].insertRate); - fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); - fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); - fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { - fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); - - fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { - fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - fprintf(fp, "\n"); - } - fprintf(fp, "\n"); - } - fprintf(fp, "================ insert.json parse result END ================\n\n"); -} - -static void printfQueryMeta() { - printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); - printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("password: \033[33m%s\033[0m\n", g_queryInfo.password); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); - - if (SUBSCRIBE_MODE == g_jsonType) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); - } - - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName); - - if (SUBSCRIBE_MODE == g_jsonType) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount); - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]); - } - printf("\n"); - printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); -} - - -static char* xFormatTimestamp(char* buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } else { - tt = (time_t)(val / 1000); - } - -/* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ - -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif - - struct tm* ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); - - if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } - - return buf; -} - -static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64, *((int64_t *)val)); - break; - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - xFormatTimestamp(buf, *(int64_t*)val, precision); - fprintf(fp, "'%s'", buf); - break; - default: - break; - } -} - -static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE* fp = fopen(fname, "at"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to open file: %s\n", fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); - - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); - } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - - int numOfRows = 0; - do { - int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); - } - fputc('\n', fp); - - numOfRows++; - row = taos_fetch_row(tres); - } while( row != NULL); - - fclose(fp); - - return numOfRows; -} - -static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); - - if (code != 0) { - fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while ((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); - return -1; - } - - strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); - break; - } - } - - return count; -} - -static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { - FILE *fp = NULL; - if (filename[0] != 0) { - fp = fopen(filename, "at"); - if (fp == NULL) { - fprintf(stderr, "failed to open file: %s\n", filename); - return; - } - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %d\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep1,keep2,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); -} - -static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; - TAOS_RES* res; - - time_t t; - struct tm* lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - //getResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - //getResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) return; - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - free(dbInfos[i]); - } - - free(dbInfos); - -} - - -#ifdef TD_LOWA_CURL -static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp) -{ - size_t realsize = size * nmemb; - curlMemInfo* mem = (curlMemInfo*)userp; - - char *ptr = realloc(mem->buf, mem->sizeleft + realsize + 1); - if(ptr == NULL) { - /* out of memory! */ - printf("not enough memory (realloc returned NULL)\n"); - return 0; - } - - mem->buf = ptr; - memcpy(&(mem->buf[mem->sizeleft]), contents, realsize); - mem->sizeleft += realsize; - mem->buf[mem->sizeleft] = 0; - - //printf("result:%s\n\n", mem->buf); - - return realsize; -} - -void curlProceLogin(void) -{ - CURL *curl_handle; - CURLcode res; - - curlMemInfo chunk; - - chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ - chunk.sizeleft = 0; /* no data at this point */ - - //curl_global_init(CURL_GLOBAL_ALL); - - /* init the curl session */ - curl_handle = curl_easy_init(); - - curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1); - - char dstUrl[128] = {0}; - snprintf(dstUrl, 128, "http://%s:6041/rest/login/root/taosdata", g_Dbs.host); - - /* specify URL to get */ - curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); - - /* send all data to this function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); - - /* we pass our 'chunk' struct to the callback function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); - - /* do it! */ - res = curl_easy_perform(curl_handle); - - /* check for errors */ - if(res != CURLE_OK) { - fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - } - else { - //printf("response len:%lu, content: %s \n", (unsigned long)chunk.sizeleft, chunk.buf); - ; - } - - /* cleanup curl stuff */ - curl_easy_cleanup(curl_handle); - - free(chunk.buf); - - /* we're done with libcurl, so clean it up */ - //curl_global_cleanup(); - - return; -} - -int curlProceSql(char* host, uint16_t port, char* sqlstr, CURL *curl_handle) -{ - //curlProceLogin(); - - //CURL *curl_handle; - CURLcode res; - - curlMemInfo chunk; - - chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ - chunk.sizeleft = 0; /* no data at this point */ - - - char dstUrl[128] = {0}; - snprintf(dstUrl, 128, "http://%s:%u/rest/sql", host, port+TSDB_PORT_HTTP); - - //curl_global_init(CURL_GLOBAL_ALL); - - /* init the curl session */ - //curl_handle = curl_easy_init(); - - //curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1L); - - /* specify URL to get */ - curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); - - /* enable TCP keep-alive for this transfer */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPALIVE, 1L); - /* keep-alive idle time to 120 seconds */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPIDLE, 120L); - /* interval time between keep-alive probes: 60 seconds */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPINTVL, 60L); - - /* send all data to this function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); - - /* we pass our 'chunk' struct to the callback function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); - - struct curl_slist *list = NULL; - list = curl_slist_append(list, "Authorization: Basic cm9vdDp0YW9zZGF0YQ=="); - curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); - curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); - - /* Set the expected upload size. */ - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)strlen(sqlstr)); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, sqlstr); - - /* get it! */ - res = curl_easy_perform(curl_handle); - - /* check for errors */ - if(res != CURLE_OK) { - fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - return -1; - } - else { - /* curl_easy_perform() block end and return result */ - //printf("[%32.32s] sql response len:%lu, content: %s \n\n", sqlstr, (unsigned long)chunk.sizeleft, chunk.buf); - ; - } - - curl_slist_free_all(list); /* free the list again */ - - /* cleanup curl stuff */ - //curl_easy_cleanup(curl_handle); - - free(chunk.buf); - - /* we're done with libcurl, so clean it up */ - //curl_global_cleanup(); - - return 0; -} -#endif - -char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); - - return dataBuf; -} - -char* generateTagVaulesForStb(SSuperTable* stbInfo) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - tmfree(dataBuf); - return NULL; - } - - char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); - tmfree(dataBuf); - return NULL; - } - rand_string(buf, stbInfo->tags[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); - } else { - printf("No support data type: %s\n", stbInfo->tags[i].dataType); - tmfree(dataBuf); - return NULL; - } - } - dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return dataBuf; -} - -static int calcRowLen(SSuperTable* superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - lenOfOneRow += 21; - } else { - printf("get error data type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - printf("get error tag type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - return 0; -} - - -static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) { - char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - - char* childTblName = *childTblNameOfSuperTbl; - - //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); - taos_free_result(res); - taos_close(taos); - exit(-1); - } - - int childTblCount = 10000; - int count = 0; - childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - char* pTblName = childTblName; - while ((row = taos_fetch_row(res)) != NULL) { - int32_t* len = taos_fetch_lengths(res); - strncpy(pTblName, (char *)row[0], len[0]); - //printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount*1.5); - memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); - tmfree(childTblName); - taos_free_result(res); - taos_close(taos); - exit(-1); - } - } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; - - taos_free_result(res); - return 0; -} - -static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); - taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - tagIndex++; - } else { - strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); - - if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; - getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount); - } - return 0; -} - -static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) { - char command[BUFFER_SIZE] = "\0"; - - char cols[STRING_LEN] = "\0"; - int colIndex; - int len = 0; - - int lenOfOneRow = 0; - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT"); - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE"); - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP"); - lenOfOneRow += 21; - } else { - taos_close(taos); - printf("config error data type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow); - - // save for creating child table - superTbls->colsOfCreatChildTable = (char*)calloc(len+20, 1); - if (NULL == superTbls->colsOfCreatChildTable) { - printf("Failed when calloc, size:%d", len+1); - taos_close(taos); - exit(-1); - } - snprintf(superTbls->colsOfCreatChildTable, len+20, "(ts timestamp%s)", cols); - - if (use_metric) { - char tags[STRING_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - taos_close(taos); - printf("config error tag type : %s\n", dataType); - exit(-1); - } - } - len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbls->sTblName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - return -1; - } - printf("\ncreate supertable %s success!\n\n", superTbls->sTblName); - } - return 0; -} - - -static int createDatabases() { - TAOS * taos = NULL; - int ret = 0; - taos_init(); - taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - char command[BUFFER_SIZE] = "\0"; - - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - taos_close(taos); - return -1; - } - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update); - } - //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - taos_close(taos); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); - - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // describe super table, if exists - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS; - ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); - } else { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); - } - - if (0 != ret) { - taos_close(taos); - return -1; - } - } - } - - taos_close(taos); - return 0; -} - - -void * createTable(void *sarg) -{ - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; - - int64_t lastPrintTime = taosGetTimestampMs(); - - char* buffer = calloc(superTblInfo->maxSqlLen, 1); - - int len = 0; - int batchNum = 0; - //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable); - } else { - if (0 == len) { - batchNum = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table "); - } - - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - free(buffer); - return NULL; - } - - len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; - - if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; - } - } - - len = 0; - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ - free(buffer); - return NULL; - } - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %d - %d tables\n", winfo->threadID, winfo->start_table_id, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE); - } - - free(buffer); - return NULL; -} - -void startMultiThreadCreateChildTable(char* cols, int threads, int ntables, char* db_name, SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); - } - - if (threads < 1) { - threads = 1; - } - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - b = ntables % threads; - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - t_info->superTblInfo = superTblInfo; - t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->use_metric = 1; - t_info->cols = cols; - pthread_create(pids + i, NULL, createTable, t_info); - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - taos_close(t_info->taos); - } - - free(pids); - free(infos); -} - - -static void createChildTables() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - startMultiThreadCreateChildTable(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable, g_Dbs.threadCountByCreateTbl, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - } - } -} - -/* -static int taosGetLineNum(const char *fileName) -{ - int lineNum = 0; - char cmd[1024] = { 0 }; - char buf[1024] = { 0 }; - sprintf(cmd, "wc -l %s", fileName); - - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - return lineNum; - } - - if (fgets(buf, sizeof(buf), fp)) { - int index = strchr((const char*)buf, ' ') - buf; - buf[index] = '\0'; - lineNum = atoi(buf); - } - pclose(fp); - return lineNum; -} -*/ - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ -int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(superTblInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", superTblInfo->tagsFile, strerror(errno)); - return -1; - } - - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; - } - - int tagCount = 10000; - int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while ((readLen = getline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = realloc(tagDataBuf, (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); - tmfree(tagDataBuf); - free(line); - fclose(fp); - return -1; - } - } - } - - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; - - free(line); - fclose(fp); - return 0; -} - -int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { - // TODO - return 0; -} - - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ -int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow); - while (1) { - readLen = getline(&line, &n, fp); - if (-1 == readLen) { - if(0 != fseek(fp, 0, SEEK_SET)) { - printf("Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); - return -1; - } - continue; - } - - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); - continue; - } - - memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); - getRows++; - - if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { - break; - } - } - - tmfree(line); - return 0; -} - -/* -void readSampleFromFileToMem(SSuperTable * supterTblInfo) { - int ret; - if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) { - ret = readSampleFromCsvFileToMem(supterTblInfo); - } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) { - ret = readSampleFromJsonFileToMem(supterTblInfo); - } - - if (0 != ret) { - exit(-1); - } -} -*/ -static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) { - bool ret = false; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - printf("failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return true; - } - - int columnSize = cJSON_GetArraySize(columns); - if (columnSize > MAX_COLUMN_COUNT) { - printf("failed to read json, column size overflow, max column size is %d\n", MAX_COLUMN_COUNT); - goto PARSE_OVER; - } - - int count = 1; - int index = 0; - StrColumn columnCase; - - //superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON* column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - printf("failed to read json, column count not found"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("failed to read json, column type not found"); - goto PARSE_OVER; - } - //strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - printf("failed to read json, column len not found"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 8; - } - - for (int n = 0; n < count; ++n) { - strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - superTbls->columnCount = index; - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - printf("failed to read json, tags not found"); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > MAX_TAG_COUNT) { - printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT); - goto PARSE_OVER; - } - - //superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON* tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - printf("failed to read json, column count not found"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("failed to read json, tag type not found"); - goto PARSE_OVER; - } - strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - printf("failed to read json, column len not found"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; - } - - for (int n = 0; n < count; ++n) { - strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; - } - } - superTbls->tagCount = index; - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getMetaFromInsertJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE); - } else if (!host) { - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = port->valueint; - } else if (!port) { - g_Dbs.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE); - } else if (!user) { - strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE); - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE); - } else if (!password) { - strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE); - } - - cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { - strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); - } - - cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = 1; - } else { - printf("failed to read json, threads not found"); - goto PARSE_OVER; - } - - cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountByCreateTbl = threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountByCreateTbl = 1; - } else { - printf("failed to read json, threads2 not found"); - goto PARSE_OVER; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - printf("failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); - goto PARSE_OVER; - } - - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - printf("failed to read json, dbinfo not found"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - printf("failed to read json, db name not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", 3)) { - g_Dbs.db[i].drop = 1; - } else { - g_Dbs.db[i].drop = 0; - } - } else if (!drop) { - g_Dbs.db[i].drop = 0; - } else { - printf("failed to read json, drop not found"); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { - strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE); - } else if (!precision) { - //strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, precision not found"); - goto PARSE_OVER; - } - - cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; - } else { - printf("failed to read json, update not found"); - goto PARSE_OVER; - } - - cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - printf("failed to read json, replica not found"); - goto PARSE_OVER; - } - - cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - printf("failed to read json, keep not found"); - goto PARSE_OVER; - } - - cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - printf("failed to read json, days not found"); - goto PARSE_OVER; - } - - cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - printf("failed to read json, cache not found"); - goto PARSE_OVER; - } - - cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; - } else { - printf("failed to read json, block not found"); - goto PARSE_OVER; - } - - //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); - //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} - - cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = -1; - } else { - printf("failed to read json, minRows not found"); - goto PARSE_OVER; - } - - cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = -1; - } else { - printf("failed to read json, maxRows not found"); - goto PARSE_OVER; - } - - cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; - } else { - printf("failed to read json, comp not found"); - goto PARSE_OVER; - } - - cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; - } else { - printf("failed to read json, walLevel not found"); - goto PARSE_OVER; - } - - cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = -1; - } else { - printf("failed to read json, walLevel not found"); - goto PARSE_OVER; - } - - cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - printf("failed to read json, fsync not found"); - goto PARSE_OVER; - } - - // super_talbes - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - printf("failed to read json, super_tables not found"); - goto PARSE_OVER; - } - - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; - } - - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON* stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; - - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { - printf("failed to read json, stb name not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); - - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - printf("failed to read json, childtable_prefix not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE); - - cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null - if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) { - if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; - } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - printf("failed to read json, auto_create_table not found"); - goto PARSE_OVER; - } - - cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000; - } else { - printf("failed to read json, batch_create_tbl_num not found"); - goto PARSE_OVER; - } - - cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { - if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - printf("failed to read json, child_table_exists not found"); - goto PARSE_OVER; - } - - cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - printf("failed to read json, childtable_count not found"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE); - } else if (!dataSource) { - strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, data_source not found"); - goto PARSE_OVER; - } - - cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful - if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE); - #ifndef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) { - printf("There no libcurl, so no support resetful test! please use taosc mode.\n"); - goto PARSE_OVER; - } - #endif - } else if (!insertMode) { - strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, insert_mode not found"); - goto PARSE_OVER; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE); - } else if (!ts) { - strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, start_timestamp not found"); - goto PARSE_OVER; - } - - cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = 1000; - } else { - printf("failed to read json, timestamp_step not found"); - goto PARSE_OVER; - } - - cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size"); - if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint; - if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } - } else if (!sampleDataBufSize) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } else { - printf("failed to read json, sample_buf_size not found"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE); - } else if (!sampleFormat) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, sample_format not found"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, sample_file not found"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; - } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - printf("failed to read json, tags_file not found"); - goto PARSE_OVER; - } - - cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - int32_t len = maxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; - } else { - printf("failed to read json, maxSqlLen not found"); - goto PARSE_OVER; - } - - cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes - if (multiThreadWriteOneTbl && multiThreadWriteOneTbl->type == cJSON_String && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; - } else { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - printf("failed to read json, multiThreadWriteOneTbl not found"); - goto PARSE_OVER; - } - - cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql"); - if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint; - } else if (!numberOfTblInOneSql) { - g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0; - } else { - printf("failed to read json, numberOfTblInOneSql not found"); - goto PARSE_OVER; - } - - cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl"); - if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; - } else if (!rowsPerTbl) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = 1; - } else { - printf("failed to read json, rowsPerTbl not found"); - goto PARSE_OVER; - } - - cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - printf("failed to read json, disorderRatio not found"); - goto PARSE_OVER; - } - - cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = 1000; - } else { - printf("failed to read json, disorderRange not found"); - goto PARSE_OVER; - } - - cJSON* insertRate = cJSON_GetObjectItem(stbInfo, "insert_rate"); - if (insertRate && insertRate->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertRate = insertRate->valueint; - } else if (!insertRate) { - g_Dbs.db[i].superTbls[j].insertRate = 0; - } else { - printf("failed to read json, insert_rate not found"); - goto PARSE_OVER; - } - - cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - if (0 == g_Dbs.db[i].superTbls[j].insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - printf("failed to read json, insert_rows not found"); - goto PARSE_OVER; - } - - if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - - int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]); - if (false == retVal) { - goto PARSE_OVER; - } - } - } - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getMetaFromQueryJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE); - } else if (!host) { - strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = port->valueint; - } else if (!port) { - g_queryInfo.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE); - } else if (!user) { - strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ; - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE); - } else if (!password) { - strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); - } else if (!dbs) { - printf("failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { - strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); - } else if (!queryMode) { - strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); - } else { - printf("failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.concurrent = 0; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - printf("failed to read json, super_table_query not found"); - goto PARSE_OVER; - } else { - cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (rate && rate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.rate = rate->valueint; - } else if (!rate) { - g_queryInfo.superQueryInfo.rate = 0; - } - - cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - g_queryInfo.superQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.superQueryInfo.concurrent = 1; - } - - cJSON* mode = cJSON_GetObjectItem(superQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 1; - } else { - printf("failed to read json, subscribe mod error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeMode = 0; - } - - cJSON* interval = cJSON_GetObjectItem(superQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = 10000; - } - - cJSON* restart = cJSON_GetObjectItem(superQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 0; - } else { - printf("failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = 1; - } - - cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (keepProgress && keepProgress->type == cJSON_String && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - printf("failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - printf("failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("failed to read json, sql not found\n"); - goto PARSE_OVER; - } - strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { - strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, super query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - // sub_table_query - cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!subQuery) { - g_queryInfo.subQueryInfo.threadCnt = 0; - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subQuery->type != cJSON_Object) { - printf("failed to read json, sub_table_query not found"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.subQueryInfo.rate = subrate->valueint; - } else if (!subrate) { - g_queryInfo.subQueryInfo.rate = 0; - } - - cJSON* threads = cJSON_GetObjectItem(subQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - g_queryInfo.subQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.subQueryInfo.threadCnt = 1; - } - - //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.subQueryInfo.childTblCount = 0; - //} - - cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); - } else { - printf("failed to read json, super table name not found\n"); - goto PARSE_OVER; - } - - cJSON* submode = cJSON_GetObjectItem(subQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { - if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 1; - } else { - printf("failed to read json, subscribe mod error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } - - cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval"); - if (subinterval && subinterval->type == cJSON_Number) { - g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.subQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 0; - } else { - printf("failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } - - cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress"); - if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } else { - printf("failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls"); - if (!subsqls) { - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subsqls->type != cJSON_Array) { - printf("failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(subsqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.subQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(subsqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("failed to read json, sql not found\n"); - goto PARSE_OVER; - } - strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, sub query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getInfoFromJsonFile(char* file) { - FILE *fp = fopen(file, "r"); - if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); - return false; - } - - bool ret = false; - int maxLen = 64000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - printf("failed to read %s, content is null", file); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - printf("failed to cjson parse %s, invalid json format", file); - goto PARSE_OVER; - } - - cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_jsonType = INSERT_MODE; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_jsonType = QUERY_MODE; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_jsonType = SUBSCRIBE_MODE; - } else { - printf("failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else if (!filetype) { - g_jsonType = INSERT_MODE; - } else { - printf("failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_MODE == g_jsonType) { - ret = getMetaFromInsertJsonFile(root); - } else if (QUERY_MODE == g_jsonType) { - ret = getMetaFromQueryJsonFile(root); - } else if (SUBSCRIBE_MODE == g_jsonType) { - ret = getMetaFromQueryJsonFile(root); - } else { - printf("input json file type error! please input correct file type: insert or query or subscribe\n"); - goto PARSE_OVER; - } - -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; -} - - -void prePareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) { - // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]); - //} - - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); - } - - #ifdef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { - curl_global_init(CURL_GLOBAL_ALL); - } - #endif - } - } -} - -void postFreeResource() { - tmfclose(g_fpOfInsertResult); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreatChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreatChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - - #ifdef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { - curl_global_cleanup(); - } - #endif - } - } -} - -int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) { - if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf); - if (0 != ret) { - return -1; - } - *sampleUsePos = 0; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - (*sampleUsePos)++; - - return dataLen; -} - -int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); - for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - return (-1); - } - - char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return (-1); - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); - } else { - printf("No support data type: %s\n", stbInfo->columns[i].dataType); - return (-1); - } - } - dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - return dataLen; -} - -void syncWriteForNumberOfTblInOneSql(threadInfo *winfo, FILE *fp, char* sampleDataBuf) { - SSuperTable* superTblInfo = winfo->superTblInfo; - - int samplePos = 0; - - //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); - int64_t totalRowsInserted = 0; - int64_t totalAffectedRows = 0; - int64_t lastPrintTime = taosGetTimestampMs(); - - char* buffer = calloc(superTblInfo->maxSqlLen+1, 1); - if (NULL == buffer) { - printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen); - return; - } - - int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql; - int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1; - if (numberOfTblInOneSql > tbls) { - numberOfTblInOneSql = tbls; - } - - int64_t time_counter = winfo->start_time; - int64_t tmp_time; - int sampleUsePos; - - int64_t st = 0; - int64_t et = 0; - for (int i = 0; i < superTblInfo->insertRows;) { - if (superTblInfo->insertRate && (et - st) < 1000) { - taosMsleep(1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - if (superTblInfo->insertRate) { - st = taosGetTimestampMs(); - } - - int32_t tbl_id = 0; - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) { - int inserted = i; - - int k = 0; - int batchRowsSql = 0; - while (1) - { - int len = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - char *pstr = buffer; - - int32_t end_tbl_id = tID + numberOfTblInOneSql; - if (end_tbl_id > winfo->end_table_id) { - end_tbl_id = winfo->end_table_id+1; - } - for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) { - sampleUsePos = samplePos; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, tbl_id % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - goto free_and_statistics; - } - - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - } - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); - } - } else { // pre-create child table - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); - } - } - - tmp_time = time_counter; - for (k = 0; k < superTblInfo->rowsPerTbl;) { - int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); - if (retLen < 0) { - goto free_and_statistics; - } - } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { - int rand_num = rand_tinyint() % 100; - if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = tmp_time - rand() % superTblInfo->disorderRange; - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); - } else { - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); - } - if (retLen < 0) { - goto free_and_statistics; - } - } - len += retLen; - //inserted++; - k++; - totalRowsInserted++; - batchRowsSql++; - - if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128) || batchRowsSql >= INT16_MAX - 1) { - tID = tbl_id + 1; - printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", superTblInfo->lenOfOneRow); - goto send_to_server; - } - } - - } - - tID = tbl_id; - inserted += superTblInfo->rowsPerTbl; - - send_to_server: - batchRowsSql = 0; - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //printf("multi table===== sql: %s \n\n", buffer); - //int64_t t1 = taosGetTimestampMs(); - int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); - if (0 > affectedRows) { - goto free_and_statistics; - } - totalAffectedRows += affectedRows; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - lastPrintTime = currentPrintTime; - } - //int64_t t2 = taosGetTimestampMs(); - //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); - } else { - #ifdef TD_LOWA_CURL - //int64_t t1 = taosGetTimestampMs(); - int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); - //int64_t t2 = taosGetTimestampMs(); - //printf("http insert sql return, Spent %ld ms \n", t2 - t1); - - if (0 != retCode) { - printf("========curl return fail, threadID[%d]\n", winfo->threadID); - goto free_and_statistics; - } - #else - printf("========no use http mode for no curl lib!\n"); - goto free_and_statistics; - #endif - } - - //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); - break; - } - - if (tID > winfo->end_table_id) { - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - samplePos = sampleUsePos; - } - i = inserted; - time_counter = tmp_time; - } - } - - if (superTblInfo->insertRate) { - et = taosGetTimestampMs(); - } - //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); - } - - free_and_statistics: - tmfree(buffer); - winfo->totalRowsInserted = totalRowsInserted; - winfo->totalAffectedRows = totalAffectedRows; - printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - return; -} - -// sync insertion -/* - 1 thread: 100 tables * 2000 rows/s - 1 thread: 10 tables * 20000 rows/s - 6 thread: 300 tables * 2000 rows/s - - 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s -*/ -void *syncWrite(void *sarg) { - int64_t totalRowsInserted = 0; - int64_t totalAffectedRows = 0; - int64_t lastPrintTime = taosGetTimestampMs(); - - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; - - FILE *fp = NULL; - char* sampleDataBuf = NULL; - int samplePos = 0; - - // each thread read sample data from csv file - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - sampleDataBuf = calloc(superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); - if (sampleDataBuf == NULL) { - printf("Failed to calloc %d Bytes, reason:%s\n", superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); - return NULL; - } - - fp = fopen(superTblInfo->sampleFile, "r"); - if (fp == NULL) { - printf("Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); - tmfree(sampleDataBuf); - return NULL; - } - int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleDataBuf); - if (0 != ret) { - tmfree(sampleDataBuf); - tmfclose(fp); - return NULL; - } - } - - if (superTblInfo->numberOfTblInOneSql > 0) { - syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf); - tmfree(sampleDataBuf); - tmfclose(fp); - return NULL; - } - - //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); - - char* buffer = calloc(superTblInfo->maxSqlLen, 1); - - int nrecords_per_request = 0; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - nrecords_per_request = (superTblInfo->maxSqlLen - 1280 - superTblInfo->lenOfTagOfOneRow) / superTblInfo->lenOfOneRow; - } else { - nrecords_per_request = (superTblInfo->maxSqlLen - 1280) / superTblInfo->lenOfOneRow; - } - - int nrecords_no_last_req = nrecords_per_request; - int nrecords_last_req = 0; - int loop_cnt = 0; - if (0 != superTblInfo->insertRate) { - if (nrecords_no_last_req >= superTblInfo->insertRate) { - nrecords_no_last_req = superTblInfo->insertRate; - } else { - nrecords_last_req = superTblInfo->insertRate % nrecords_per_request; - loop_cnt = (superTblInfo->insertRate / nrecords_per_request) + (superTblInfo->insertRate % nrecords_per_request ? 1 : 0) ; - } - } - - if (nrecords_no_last_req <= 0) { - nrecords_no_last_req = 1; - } - - if (nrecords_no_last_req >= INT16_MAX) { - nrecords_no_last_req = INT16_MAX - 1; - } - - if (nrecords_last_req >= INT16_MAX) { - nrecords_last_req = INT16_MAX - 1; - } - - int nrecords_cur_req = nrecords_no_last_req; - int loop_cnt_orig = loop_cnt; - - //printf("========nrecords_per_request:%d, nrecords_no_last_req:%d, nrecords_last_req:%d, loop_cnt:%d\n", nrecords_per_request, nrecords_no_last_req, nrecords_last_req, loop_cnt); - - int64_t time_counter = winfo->start_time; - - int64_t st = 0; - int64_t et = 0; - for (int i = 0; i < superTblInfo->insertRows;) { - if (superTblInfo->insertRate && (et - st) < 1000) { - taosMsleep(1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - if (superTblInfo->insertRate) { - st = taosGetTimestampMs(); - } - - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { - int inserted = i; - int64_t tmp_time = time_counter; - - int sampleUsePos = samplePos; - int k = 0; - while (1) - { - int len = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - char *pstr = buffer; - - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, tID % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - goto free_and_statistics_2; - } - - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values", winfo->db_name, superTblInfo->childTblPrefix, tID, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values", winfo->db_name, superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values", winfo->db_name, superTblInfo->childTblPrefix, tID); - } - - for (k = 0; k < nrecords_cur_req;) { - int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); - if (retLen < 0) { - goto free_and_statistics_2; - } - } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { - int rand_num = rand_tinyint() % 100; - if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = tmp_time - rand() % superTblInfo->disorderRange; - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); - //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d); - } else { - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); - } - if (retLen < 0) { - goto free_and_statistics_2; - } - } - len += retLen; - inserted++; - k++; - totalRowsInserted++; - - if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) break; - } - - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //printf("===== sql: %s \n\n", buffer); - //int64_t t1 = taosGetTimestampMs(); - int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); - if (0 > affectedRows){ - goto free_and_statistics_2; - } - totalAffectedRows += affectedRows; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - lastPrintTime = currentPrintTime; - } - //int64_t t2 = taosGetTimestampMs(); - //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); - } else { - #ifdef TD_LOWA_CURL - //int64_t t1 = taosGetTimestampMs(); - int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); - //int64_t t2 = taosGetTimestampMs(); - //printf("http insert sql return, Spent %ld ms \n", t2 - t1); - - if (0 != retCode) { - printf("========curl return fail, threadID[%d]\n", winfo->threadID); - goto free_and_statistics_2; - } - #else - printf("========no use http mode for no curl lib!\n"); - goto free_and_statistics_2; - #endif - } - - //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); - - if (loop_cnt) { - loop_cnt--; - if ((1 == loop_cnt) && (0 != nrecords_last_req)) { - nrecords_cur_req = nrecords_last_req; - } else if (0 == loop_cnt){ - nrecords_cur_req = nrecords_no_last_req; - loop_cnt = loop_cnt_orig; - break; - } - } else { - break; - } - } - - if (tID == winfo->end_table_id) { - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - samplePos = sampleUsePos; - } - i = inserted; - time_counter = tmp_time; - } - } - - if (superTblInfo->insertRate) { - et = taosGetTimestampMs(); - } - //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); - } - - free_and_statistics_2: - tmfree(buffer); - tmfree(sampleDataBuf); - tmfclose(fp); - - winfo->totalRowsInserted = totalRowsInserted; - winfo->totalAffectedRows = totalAffectedRows; - - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - return NULL; -} - -void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; - - if (winfo->superTblInfo->insertRate) { - winfo->et = taosGetTimestampMs(); - if (winfo->et - winfo->st < 1000) { - taosMsleep(1000 - (winfo->et - winfo->st)); // ms - } - } - - char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); - char *data = calloc(1, MAX_DATA_SIZE); - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); - if (winfo->counter >= winfo->superTblInfo->insertRows) { - winfo->start_table_id++; - winfo->counter = 0; - } - if (winfo->start_table_id > winfo->end_table_id) { - tsem_post(&winfo->lock_sem); - free(buffer); - free(data); - taos_free_result(res); - return; - } - - for (int i = 0; i < winfo->nrecords_per_request; i++) { - int rand_num = rand() % 100; - if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) - { - int64_t d = winfo->lastTs - rand() % 1000000 + rand_num; - //generateData(data, datatype, ncols_per_record, d, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); - } else { - //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); - } - pstr += sprintf(pstr, "%s", data); - winfo->counter++; - - if (winfo->counter >= winfo->superTblInfo->insertRows) { - break; - } - } - - if (winfo->superTblInfo->insertRate) { - winfo->st = taosGetTimestampMs(); - } - taos_query_a(winfo->taos, buffer, callBack, winfo); - free(buffer); - free(data); - - taos_free_result(res); -} - -void *asyncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - winfo->nrecords_per_request = 0; - //if (AUTO_CREATE_SUBTBL == winfo->superTblInfo->autoCreateTable) { - winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280 - winfo->superTblInfo->lenOfTagOfOneRow) / winfo->superTblInfo->lenOfOneRow; - //} else { - // winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280) / winfo->superTblInfo->lenOfOneRow; - //} - - if (0 != winfo->superTblInfo->insertRate) { - if (winfo->nrecords_per_request >= winfo->superTblInfo->insertRate) { - winfo->nrecords_per_request = winfo->superTblInfo->insertRate; - } - } - - if (winfo->nrecords_per_request <= 0) { - winfo->nrecords_per_request = 1; - } - - if (winfo->nrecords_per_request >= INT16_MAX) { - winfo->nrecords_per_request = INT16_MAX - 1; - } - - if (winfo->nrecords_per_request >= INT16_MAX) { - winfo->nrecords_per_request = INT16_MAX - 1; - } - - winfo->st = 0; - winfo->et = 0; - winfo->lastTs = winfo->start_time; - - if (winfo->superTblInfo->insertRate) { - winfo->st = taosGetTimestampMs(); - } - taos_query_a(winfo->taos, "show databases", callBack, winfo); - - tsem_wait(&(winfo->lock_sem)); - - return NULL; -} - -void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - int ntables = superTblInfo->childTblCount; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - //TAOS* taos; - //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - // if (NULL == taos) { - // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); - // exit(-1); - // } - //} - - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - } else { - printf("No support precision: %s\n", precision); - exit(-1); - } - } - - int64_t start_time; - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); - } else { - (void)taosParseTime(superTblInfo->startTimestamp, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0); - } - - double start = getCurrentTime(); - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - t_info->superTblInfo = superTblInfo; - - t_info->start_time = start_time; - - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //t_info->taos = taos; - t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == t_info->taos) { - printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); - exit(-1); - } - } else { - t_info->taos = NULL; - #ifdef TD_LOWA_CURL - t_info->curl_handle = curl_easy_init(); - #endif - } - - if (0 == superTblInfo->multiThreadWriteOneTbl) { - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - } else { - t_info->start_table_id = 0; - t_info->end_table_id = superTblInfo->childTblCount - 1; - t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); - } - - tsem_init(&(t_info->lock_sem), 0, 0); - - if (SYNC == g_Dbs.queryMode) { - pthread_create(pids + i, NULL, syncWrite, t_info); - } else { - pthread_create(pids + i, NULL, asyncWrite, t_info); - } - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - - tsem_destroy(&(t_info->lock_sem)); - taos_close(t_info->taos); - - superTblInfo->totalAffectedRows += t_info->totalAffectedRows; - superTblInfo->totalRowsInserted += t_info->totalRowsInserted; - #ifdef TD_LOWA_CURL - if (t_info->curl_handle) { - curl_easy_cleanup(t_info->curl_handle); - } - #endif - } - - double end = getCurrentTime(); - - //taos_close(taos); - - free(pids); - free(infos); - - printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", - end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); - fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", - end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); -} - - -void *readTable(void *sarg) { -#if 1 - threadInfo *rinfo = (threadInfo *)sarg; - TAOS *taos = rinfo->taos; - char command[BUFFER_SIZE] = "\0"; - int64_t sTime = rinfo->start_time; - char *tb_prefix = rinfo->tb_prefix; - FILE *fp = fopen(rinfo->fp, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); - return NULL; - } - - int num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; - int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; - int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; - - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%d records:\n", totalData); - fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - int count = 0; - for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); - - double t = getCurrentTime(); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - return NULL; - } - - while (taos_fetch_row(pSql) != NULL) { - count++; - } - - t = getCurrentTime() - t; - totalT += t; - - taos_free_result(pSql); - } - - fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); - } - fprintf(fp, "\n"); - fclose(fp); -#endif - return NULL; -} - -void *readMetric(void *sarg) { -#if 1 - threadInfo *rinfo = (threadInfo *)sarg; - TAOS *taos = rinfo->taos; - char command[BUFFER_SIZE] = "\0"; - FILE *fp = fopen(rinfo->fp, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); - return NULL; - } - - int num_of_DPT = rinfo->superTblInfo->insertRows; - int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; - int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; - - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%d records:\n", totalData); - fprintf(fp, "Querying On %d records:\n", totalData); - - for (int j = 0; j < n; j++) { - char condition[BUFFER_SIZE - 30] = "\0"; - char tempS[64] = "\0"; - - int m = 10 < num_of_tables ? 10 : num_of_tables; - - for (int i = 1; i <= m; i++) { - if (i == 1) { - sprintf(tempS, "t1 = %d", i); - } else { - sprintf(tempS, " or t1 = %d ", i); - } - strcat(condition, tempS); - - sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); - - printf("Where condition: %s\n", condition); - fprintf(fp, "%s\n", command); - - double t = getCurrentTime(); - - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - return NULL; - } - int count = 0; - while (taos_fetch_row(pSql) != NULL) { - count++; - } - t = getCurrentTime() - t; - - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t); - - taos_free_result(pSql); - } - fprintf(fp, "\n"); - } - fclose(fp); -#endif - return NULL; -} - - -int insertTestProcess() { - - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile); - return 1; - }; - - printfInsertMeta(); - printfInsertMetaToFile(g_fpOfInsertResult); - - printf("Press enter key to continue\n\n"); - (void)getchar(); - - init_rand_data(); - - // create database and super tables - (void)createDatabases(); - - // pretreatement - prePareSampleData(); - - double start; - double end; - - // create child tables - start = getCurrentTime(); - createChildTables(); - end = getCurrentTime(); - if (g_totalChildTables > 0) { - printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); - fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); - } - - usleep(1000*1000); - - // create sub threads for inserting data - //start = getCurrentTime(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; - startMultiThreadInsertData(g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, superTblInfo); - } - } - //end = getCurrentTime(); - - //int64_t totalRowsInserted = 0; - //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { - // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted; - // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; - //} - //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount); - if (NULL == g_args.metaFile && false == g_Dbs.insert_only) { - // query data - pthread_t read_id; - threadInfo *rInfo = malloc(sizeof(threadInfo)); - rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - rInfo->start_table_id = 0; - rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1; - //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows; - rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); - strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix); - strcpy(rInfo->fp, g_Dbs.resultFile); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); - } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - } - - postFreeResource(); - - return 0; -} - -void *superQueryProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - //char sqlStr[MAX_TB_NAME_SIZE*2]; - //sprintf(sqlStr, "use %s", g_queryInfo.dbName); - //queryDB(winfo->taos, sqlStr); - - int64_t st = 0; - int64_t et = 0; - while (1) { - if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - st = taosGetTimestampMs(); - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0); - } else { - #ifdef TD_LOWA_CURL - int64_t t1 = taosGetTimestampUs(); - int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0); - - if (0 != retCode) { - printf("====curl return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } - #endif - } - } - et = taosGetTimestampMs(); - printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0); - } - return NULL; -} - -void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[MAX_TB_NAME_SIZE*3]; - sprintf(subTblName, "%s.%s", g_queryInfo.dbName, g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); - - //printf("inSql: %s\n", inSql); - - char* pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - strncpy(outSql, inSql, pos - inSql); - //printf("1: %s\n", outSql); - strcat(outSql, subTblName); - //printf("2: %s\n", outSql); - strcat(outSql, pos+strlen(sourceString)); - //printf("3: %s\n", outSql); -} - -void *subQueryProcess(void *sarg) { - char sqlstr[1024]; - threadInfo *winfo = (threadInfo *)sarg; - int64_t st = 0; - int64_t et = g_queryInfo.subQueryInfo.rate*1000; - while (1) { - if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - st = taosGetTimestampMs(); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - memset(sqlstr,0,sizeof(sqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, sqlstr, tmpFile); - } - } - et = taosGetTimestampMs(); - printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0); - } - return NULL; -} - -int queryTestProcess() { - TAOS * taos = NULL; - taos_init(); - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.subQueryInfo.sqlCount) { - (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); - } - - printfQueryMeta(); - printf("Press enter key to continue\n\n"); - (void)getchar(); - - printfQuerySystemInfo(taos); - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { - - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - t_info->taos = taos; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE); - } else { - t_info->taos = NULL; - #ifdef TD_LOWA_CURL - t_info->curl_handle = curl_easy_init(); - #endif - } - - pthread_create(pids + i, NULL, superQueryProcess, t_info); - } - }else { - g_queryInfo.superQueryInfo.concurrent = 0; - } - - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infosOfSub + i; - t_info->threadID = i; - - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->taos = taos; - pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); - } - - g_queryInfo.subQueryInfo.threadCnt = threads; - }else { - g_queryInfo.subQueryInfo.threadCnt = 0; - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); - } - - tmfree((char*)pids); - tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } - - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); - - taos_close(taos); - return 0; -} - -static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res)); - return; - } - - getResult(res, (char*)param); - taos_free_result(res); -} - -static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { - TAOS_SUB* tsub = NULL; - - if (g_queryInfo.superQueryInfo.subscribeMode) { - tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0); - } - - if (tsub == NULL) { - printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql); - return NULL; - } - - return tsub; -} - -void *subSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - char subSqlstr[1024]; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ - return NULL; - } - - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - memset(subSqlstr,0,sizeof(subSqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); - if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { - return NULL; - } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0); - } while (0); - - // start loop to consume result - TAOS_RES* res = NULL; - while (1) { - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.subQueryInfo.subscribeMode) { - continue; - } - - res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - getResult(res, tmpFile); - } - } - } - taos_free_result(res); - - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - return NULL; -} - -void *superSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { - return NULL; - } - - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile); - if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { - return NULL; - } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0); - } while (0); - - // start loop to consume result - TAOS_RES* res = NULL; - while (1) { - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.subscribeMode) { - continue; - } - - res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - getResult(res, tmpFile); - } - } - } - taos_free_result(res); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); - } - return NULL; -} - -int subscribeTestProcess() { - printfQueryMeta(); - - printf("Press enter key to continue\n\n"); - (void)getchar(); - - TAOS * taos = NULL; - taos_init(); - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.subQueryInfo.sqlCount) { - (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); - } - - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from super table - if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - t_info->taos = taos; - pthread_create(pids + i, NULL, superSubscribeProcess, t_info); - } - } - - //==== create sub threads for query from sub table - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infosOfSub + i; - t_info->threadID = i; - - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - t_info->taos = taos; - pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); - } - g_queryInfo.subQueryInfo.threadCnt = threads; - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); - } - - tmfree((char*)pids); - tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } - - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); - taos_close(taos); - return 0; -} - -void initOfInsertMeta() { - memset(&g_Dbs, 0, sizeof(SDbs)); - - // set default values - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - g_Dbs.port = 6030; - strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); - g_Dbs.threadCount = 2; - g_Dbs.use_metric = true; -} - -void initOfQueryMeta() { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - - // set default values - strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); - g_queryInfo.port = 6030; - strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); -} - -void setParaFromArg(){ - if (g_args.host) { - strcpy(g_Dbs.host, g_args.host); - } else { - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } - - if (g_args.user) { - strcpy(g_Dbs.user, g_args.user); - } - - if (g_args.password) { - strcpy(g_Dbs.password, g_args.password); - } - - if (g_args.port) { - g_Dbs.port = g_args.port; - } - - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = 1; - - strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - - - strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - - g_Dbs.use_metric = g_args.use_metric; - g_Dbs.insert_only = g_args.insert_only; - - g_Dbs.db[0].superTblCount = 1; - strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountByCreateTbl = 1; - g_Dbs.queryMode = g_args.mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].insertRate = 0; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = 10; - - // g_args.num_of_RPR; - g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; - - g_Dbs.do_aggreFunc = true; - - char dataString[STRING_LEN]; - char **data_type = g_args.datatype; - - memset(dataString, 0, STRING_LEN); - - if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { - g_Dbs.do_aggreFunc = false; - } - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_DATATYPE; i++) { - if (data_type[i] == NULL) { - break; - } - - strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; - } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { - strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - if (g_Dbs.use_metric) { - strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - - strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.db[0].superTbls[0].tagCount = 0; - } -} - -/* Function to do regular expression check */ -static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); - regfree(®ex); - exit(-1); - } - - return 0; -} - -static int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} - -void querySqlFile(TAOS* taos, char* sqlFile) -{ - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = getCurrentTime(); - - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - queryDbExec(taos, cmd, NO_INSERT_TYPE); - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; - } - - t = getCurrentTime() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); - - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; -} - -int main(int argc, char *argv[]) { - parse_args(argc, argv, &g_args); - - if (g_args.metaFile) { - initOfInsertMeta(); - initOfQueryMeta(); - if (false == getInfoFromJsonFile(g_args.metaFile)) { - printf("Failed to read %s\n", g_args.metaFile); - return 1; - } - } else { - - memset(&g_Dbs, 0, sizeof(SDbs)); - g_jsonType = INSERT_MODE; - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS* qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); - return 0; - } - - (void)insertTestProcess(); - if (g_Dbs.insert_only) return 0; - - // select - - //printf("At present, there is no integration of taosdemo, please wait patiently!\n"); - return 0; - } - - if (INSERT_MODE == g_jsonType) { - if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - (void)insertTestProcess(); - } else if (QUERY_MODE == g_jsonType) { - if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - (void)queryTestProcess(); - } else if (SUBSCRIBE_MODE == g_jsonType) { - if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - (void)subscribeTestProcess(); - } else { - ; - } - - taos_cleanup(); - return 0; -} - diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index b50ad85c08cf5fa35862ce42bcc441dc502c3166..58897b89e95743c802755c0476f3b2843a244a59 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 60707f22e297e3cd253d88cc9bed962dd36dacd1..e2105ad18b1df1c430e42f5fb63d8cab06f12663 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -793,6 +793,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu *totalNumOfThread = numOfThread; + free(tblBuf); return 0; } @@ -1553,6 +1554,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao (void)remove(tmpBuf); } + free(tblBuf); return 0; } @@ -2107,13 +2109,15 @@ static void taosGetDirectoryFileList(char *inputDir) if (fileStat.st_mode & S_IFDIR) { taosCheckTablesSQLFile(inputDir); tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); - int totalSQLFileNum = tsSqlFileNum; + int tsSqlFileNumOfTbls = tsSqlFileNum; if (tsDbSqlFile[0] != 0) { - tsSqlFileNum--; + tsSqlFileNumOfTbls--; } taosMallocSQLFiles(); - taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNum); - fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + if (0 != tsSqlFileNumOfTbls) { + taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNumOfTbls); + } + fprintf(stdout, "\nstart to dispose %d files in %s\n", tsSqlFileNum, inputDir); } else { fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); @@ -2266,7 +2270,10 @@ int taosDumpIn(struct arguments *arguments) { taosGetDirectoryFileList(arguments->inpath); + int32_t tsSqlFileNumOfTbls = tsSqlFileNum; if (tsDbSqlFile[0] != 0) { + tsSqlFileNumOfTbls--; + fp = taosOpenDumpInFile(tsDbSqlFile); if (NULL == fp) { fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); @@ -2279,7 +2286,9 @@ int taosDumpIn(struct arguments *arguments) { taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); } - taosStartDumpInWorkThreads(taos, arguments); + if (0 != tsSqlFileNumOfTbls) { + taosStartDumpInWorkThreads(taos, arguments); + } taos_close(taos); taosFreeSQLFiles(); diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt index fffc82c6ef2fb5741d81fb59e4e5fa271e3100f4..2df4708c239515febafc7a4f3ab3f63bd9e434e8 100644 --- a/src/mnode/CMakeLists.txt +++ b/src/mnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/mnode/inc/mnodeDnode.h b/src/mnode/inc/mnodeDnode.h index 56d7455ad24f76a0b289e8ad8882db1867d0bed9..fa1995254e9598614cebfef57221b3ff4238b6f1 100644 --- a/src/mnode/inc/mnodeDnode.h +++ b/src/mnode/inc/mnodeDnode.h @@ -67,6 +67,7 @@ void mnodeCleanupDnodes(); int32_t mnodeGetDnodesNum(); int32_t mnodeGetOnlinDnodesCpuCoreNum(); int32_t mnodeGetOnlineDnodesNum(); +void mnodeGetOnlineAndTotalDnodesNum(int32_t *onlineNum, int32_t *totalNum); void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode); void mnodeCancelGetNextDnode(void *pIter); void mnodeIncDnodeRef(SDnodeObj *pDnode); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index dbb5f56692341f62378c7d65fa937ee3d20788c4..9fdbaa79650c6b1a421501a7c5fe4b4d1709ea3f 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -568,7 +568,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "keep1,keep2,keep(D)"); + strcpy(pSchema[cols].name, "keep0,keep1,keep(D)"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index d76ecd9ba00307d8031144a3dab8480a35d1b25f..304096f3ae6178bdb5b7ce0620d81cf29bef11d0 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -263,6 +263,28 @@ int32_t mnodeGetOnlineDnodesNum() { return onlineDnodes; } +void mnodeGetOnlineAndTotalDnodesNum(int32_t *onlineNum, int32_t *totalNum) { + SDnodeObj *pDnode = NULL; + void * pIter = NULL; + int32_t onlineDnodes = 0, totalDnodes = 0; + + while (1) { + pIter = mnodeGetNextDnode(pIter, &pDnode); + if (pDnode == NULL) break; + if (pDnode->status != TAOS_DN_STATUS_OFFLINE) ++onlineDnodes; + ++totalDnodes; + mnodeDecDnodeRef(pDnode); + } + + if (onlineNum) { + *onlineNum = onlineDnodes; + } + + if (totalNum) { + *totalNum = totalDnodes; + } +} + void *mnodeGetDnode(int32_t dnodeId) { return sdbGetRow(tsDnodeSdb, &dnodeId); } diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 89dc32f03a8d63592bf34ba7e0e44566e9bcb92a..17a4282d05935684df4ab6585fef5f2398a62979 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -356,7 +356,7 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->bytes[cols] = 8; pSchema[cols].type = TSDB_DATA_TYPE_BIGINT; - strcpy(pSchema[cols].name, "time(us)"); + strcpy(pSchema[cols].name, "time"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 0377df97fd0d53731eb552aa2871d35d85af91c5..4ff2a38dffcb2186f73e2254efeaa6419c68d50b 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -280,8 +280,11 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { } } - pRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); - pRsp->totalDnodes = htonl(mnodeGetDnodesNum()); + int32_t onlineDnodes = 0, totalDnodes = 0; + mnodeGetOnlineAndTotalDnodesNum(&onlineDnodes, &totalDnodes); + + pRsp->onlineDnodes = htonl(onlineDnodes); + pRsp->totalDnodes = htonl(totalDnodes); mnodeGetMnodeEpSetForShell(&pRsp->epSet, false); pMsg->rpcRsp.rsp = pRsp; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 7f463899ce2b484bcd3978fa26e4c84394cbecd7..1378847d0b472bb65621fc88951d4fbd6e7d0790 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -777,11 +777,11 @@ static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnode } if (pCreateTable->numOfTags != 0) { - mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, + mDebug("msg:%p, app:%p table:%s, batch create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableName, pMsg->rpcMsg.handle); return mnodeProcessCreateSuperTableMsg(pMsg); } else { - mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, + mDebug("msg:%p, app:%p table:%s, batch create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableName, pMsg->rpcMsg.handle); return mnodeProcessCreateChildTableMsg(pMsg); } @@ -832,12 +832,13 @@ static int32_t mnodeProcessBatchCreateTableMsg(SMnodeMsg *pMsg) { return code; } else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { ++pMsg->pBatchMasterMsg->received; + pMsg->pBatchMasterMsg->code = code; mnodeDestroySubMsg(pMsg); } if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received >= pMsg->pBatchMasterMsg->expected) { - dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, TSDB_CODE_SUCCESS); + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->pBatchMasterMsg->code); } return TSDB_CODE_MND_ACTION_IN_PROGRESS; @@ -916,11 +917,13 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_DB_IN_DROPPING; } +#if 0 if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle, pDrop->name); return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN; } +#endif if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->name); if (pMsg->pTable == NULL) { @@ -1904,6 +1907,20 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->tid, pTable->uid, tstrerror(code)); SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .pTable = tsChildTableSdb}; sdbDeleteRow(&desc); + + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->received; + pMsg->pBatchMasterMsg->code = code; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code); + } + + mnodeDestroySubMsg(pMsg); + + return TSDB_CODE_MND_ACTION_IN_PROGRESS; + } + return code; } } @@ -2675,6 +2692,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { if (pMsg->pBatchMasterMsg) { ++pMsg->pBatchMasterMsg->received; + pMsg->pBatchMasterMsg->code = code; if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received >= pMsg->pBatchMasterMsg->expected) { dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code); @@ -2713,6 +2731,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { if (pMsg->pBatchMasterMsg) { ++pMsg->pBatchMasterMsg->received; + pMsg->pBatchMasterMsg->code = rpcMsg->code; if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received >= pMsg->pBatchMasterMsg->expected) { dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code); @@ -3007,10 +3026,12 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_DB_IN_DROPPING; } +#if 0 if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) { mError("msg:%p, app:%p table:%s, failed to alter table, its log db", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname); return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN; } +#endif if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableFname); if (pMsg->pTable == NULL) { diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 09e367ed66b127bfe43cb9e78716464ddc1a9f2e..98eab3f1ed07545974bc3247275530c81fd34e84 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -534,6 +534,20 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { tstrerror(code)); SSdbRow desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .pTable = tsVgroupSdb}; sdbDeleteRow(&desc); + + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->received; + pMsg->pBatchMasterMsg->code = pMsg->code; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->code); + } + + mnodeDestroySubMsg(pMsg); + + return TSDB_CODE_MND_ACTION_IN_PROGRESS; + } + return code; } else { mInfo("msg:%p, app:%p vgId:%d, is created in sdb, db:%s replica:%d", pMsg, pMsg->rpcMsg.ahandle, pVgroup->vgId, @@ -989,6 +1003,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (mnodeMsg->pBatchMasterMsg) { ++mnodeMsg->pBatchMasterMsg->received; + mnodeMsg->pBatchMasterMsg->code = code; if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received >= mnodeMsg->pBatchMasterMsg->expected) { dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code); @@ -1011,6 +1026,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (mnodeMsg->pBatchMasterMsg) { ++mnodeMsg->pBatchMasterMsg->received; + mnodeMsg->pBatchMasterMsg->code = mnodeMsg->code; if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received >= mnodeMsg->pBatchMasterMsg->expected) { dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code); diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index f41719f2404297070656910653b65a3fbffa7916..ab8b0f76785c24e3385f49245f6e191b2d57cc40 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h index cb91b0526bdec17af1d5d86bfb24f8f95b56e01c..bbe0f98ec0b632deb39691f74350bced6d2a6515 100644 --- a/src/os/inc/osDef.h +++ b/src/os/inc/osDef.h @@ -83,6 +83,20 @@ extern "C" { } \ } while (0) +#define DEFAULT_DOUBLE_COMP(x, y) \ + do { \ + if (isnan(x) && isnan(y)) { return 0; } \ + if (isnan(x)) { return -1; } \ + if (isnan(y)) { return 1; } \ + if ((x) == (y)) { \ + return 0; \ + } else { \ + return (x) < (y) ? -1 : 1; \ + } \ + } while (0) + +#define DEFAULT_FLOAT_COMP(x, y) DEFAULT_DOUBLE_COMP(x, y) + #define ALIGN_NUM(n, align) (((n) + ((align)-1)) & (~((align)-1))) // align to 8bytes diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index 67cfdb3b530a7db17b834a1443d681e1cd5361a7..540efc310c1d89530d6e41733ce2419ad79b1e36 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -25,8 +25,8 @@ extern "C" { // TAOS_OS_FUNC_DIR void taosRemoveDir(char *rootDir); int taosMkDir(const char *pathname, mode_t mode); -void taosRename(char* oldName, char *newName); void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays); +int32_t taosRename(char* oldName, char *newName); int32_t taosCompressFile(char *srcFileName, char *destFileName); #ifdef __cplusplus diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index d54d519cc3c893885a1c92b04501ee4b558290af..a058f2cc99029e311168428ce6e91f88212a4c73 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -69,6 +69,8 @@ extern "C" { #define TAOS_OS_FUNC_FILE_GETTMPFILEPATH #define TAOS_OS_FUNC_FILE_FTRUNCATE +#define TAOS_OS_FUNC_DIR + #define TAOS_OS_FUNC_MATH #define SWAP(a, b, c) \ do { \ diff --git a/src/os/src/alpine/CMakeLists.txt b/src/os/src/alpine/CMakeLists.txt index daa0b3cf43d0de60fafc960e48e3ad8aeec1a9ad..b5e739c24ce7ec3ef3ffc537ca8769706f7b56de 100644 --- a/src/os/src/alpine/CMakeLists.txt +++ b/src/os/src/alpine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/CMakeLists.txt b/src/os/src/darwin/CMakeLists.txt index 7f05ddd64b87f69e5fa03c874bb2bc401e5094cc..c4cb28aa05e4716ca98c2687ce41d436b1300bb2 100644 --- a/src/os/src/darwin/CMakeLists.txt +++ b/src/os/src/darwin/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/darwinEnv.c b/src/os/src/darwin/darwinEnv.c index 83344df22104ccd4e691494089d7364ae647d719..8ac97eacfbc53eb67fa141f8bdddc79d8a65777c 100644 --- a/src/os/src/darwin/darwinEnv.c +++ b/src/os/src/darwin/darwinEnv.c @@ -29,17 +29,15 @@ static const char* expand_like_shell(const char *path) { void osInit() { if (configDir[0] == 0) { - strcpy(configDir, expand_like_shell("~/TDengine/cfg")); + strcpy(configDir, expand_like_shell("/usr/local/etc/taos")); } + strcpy(tsDataDir, expand_like_shell("/usr/local/var/lib/taos")); + strcpy(tsLogDir, expand_like_shell("/usr/local/var/log/taos")); + strcpy(tsScriptDir, expand_like_shell("/usr/local/etc/taos")); strcpy(tsVnodeDir, ""); strcpy(tsDnodeDir, ""); strcpy(tsMnodeDir, ""); - - strcpy(tsDataDir, expand_like_shell("~/TDengine/data")); - strcpy(tsLogDir, expand_like_shell("~/TDengine/log")); - strcpy(tsScriptDir, expand_like_shell("~/TDengine/cfg")); - strcpy(tsOsName, "Darwin"); } diff --git a/src/os/src/darwin/darwinSemphone.c b/src/os/src/darwin/darwinSemphone.c index fa3d364db79dc08dd11fa439c3067835662f7c6a..5bb926732a99a1c2094366624b2a0577c85528d2 100644 --- a/src/os/src/darwin/darwinSemphone.c +++ b/src/os/src/darwin/darwinSemphone.c @@ -32,6 +32,7 @@ #include #include #include +#include static pthread_t sem_thread; static pthread_once_t sem_once; @@ -288,7 +289,9 @@ bool taosCheckPthreadValid(pthread_t thread) { } int64_t taosGetSelfPthreadId() { - return (int64_t)pthread_self(); + uint64_t id; + pthread_threadid_np(0, &id); + return (int64_t) id; } int64_t taosGetPthreadId(pthread_t thread) { diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 1c5e55a522250a1532e35075616e9efddb008217..facfbd23af7a579ed11655ce66dddd971677fb18 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(.) diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index 4f2985548c3e05d60b90a5c58aa48d5ee96e7ba2..144c59fa2db3ae24a81679de16116812fddbccc5 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -51,19 +51,22 @@ int taosMkDir(const char *path, mode_t mode) { return code; } -void taosRename(char* oldName, char *newName) { - // if newName in not empty, rename return fail. - // the newName must be empty or does not exist -#ifdef WINDOWS - remove(newName); -#endif - if (rename(oldName, newName)) { + +#ifndef TAOS_OS_FUNC_DIR + +int32_t taosRename(char* oldName, char *newName) { + int32_t code = rename(oldName, newName); + if (code < 0) { uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } else { - uInfo("successfully to rename file %s to %s", oldName, newName); + uTrace("successfully to rename file %s to %s", oldName, newName); } + + return code; } +#endif + void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) { DIR *dir = opendir(rootDir); if (dir == NULL) return; diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index 0b7b5ca487e5728a5dbdd93e45ee2b0593f98000..538ed378798f53c09c8cb8ada2f2a1a066d8b6bf 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -25,7 +25,8 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { char tmpPath[PATH_MAX]; int32_t len = strlen(tsTempDir); memcpy(tmpPath, tsTempDir, len); - + static uint64_t seqId = 0; + if (tmpPath[len - 1] != '/') { tmpPath[len++] = '/'; } @@ -36,8 +37,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[8] = {0}; - taosRandStr(rand, tListLen(rand) - 1); + char rand[32] = {0}; + + sprintf(rand, "%"PRIu64, atomic_add_fetch_64(&seqId, 1)); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); } diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemphone.c index 1d05ce20a5af51b8e941b0a92d2aea0a04b94882..2bf2f2448752bf468497fc48a8871c5f0120b9ab 100644 --- a/src/os/src/detail/osSemphone.c +++ b/src/os/src/detail/osSemphone.c @@ -31,7 +31,14 @@ int tsem_wait(tsem_t* sem) { #ifndef TAOS_OS_FUNC_SEMPHONE_PTHREAD bool taosCheckPthreadValid(pthread_t thread) { return thread != 0; } -int64_t taosGetSelfPthreadId() { return (int64_t)pthread_self(); } + +int64_t taosGetSelfPthreadId() { + static __thread int id = 0; + if (id != 0) return id; + id = syscall(SYS_gettid); + return id; +} + int64_t taosGetPthreadId(pthread_t thread) { return (int64_t)thread; } void taosResetPthread(pthread_t *thread) { *thread = 0; } bool taosComparePthread(pthread_t first, pthread_t second) { return first == second; } diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt index 8ab8f554672843eec34ed1880ba672e61e54aa7b..b1a7ebf54e58bbbdeea6d5cc219904916cc2ba03 100644 --- a/src/os/src/linux/CMakeLists.txt +++ b/src/os/src/linux/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/CMakeLists.txt b/src/os/src/windows/CMakeLists.txt index a430dd3b3f968cd845732ead4aa1b780aea10c22..9dcc9e7e6d93ff200b7571d98724f898712658eb 100644 --- a/src/os/src/windows/CMakeLists.txt +++ b/src/os/src/windows/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/wFile.c b/src/os/src/windows/wFile.c index 4ad195dc6f1449c1cd09bdc2e7c4c9cb592d34ad..3bfe1c1b5dd24185407b2e2cd1b94ff621089b0b 100644 --- a/src/os/src/windows/wFile.c +++ b/src/os/src/windows/wFile.c @@ -160,7 +160,6 @@ int32_t taosFtruncate(int32_t fd, int64_t l_size) { return 0; } - int fsync(int filedes) { if (filedes < 0) { errno = EBADF; @@ -172,3 +171,14 @@ int fsync(int filedes) { return FlushFileBuffers(h); } + +int32_t taosRename(char* oldName, char *newName) { + int32_t code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED); + if (code < 0) { + uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); + } else { + uTrace("successfully to rename file %s to %s", oldName, newName); + } + + return code; +} \ No newline at end of file diff --git a/src/os/src/windows/wSemphone.c b/src/os/src/windows/wSemphone.c index 0bc760b35e9016f63a8cd40e1035891cf36e27ae..a3f0367ee1179f74769c8883afe5ee47237ac32b 100644 --- a/src/os/src/windows/wSemphone.c +++ b/src/os/src/windows/wSemphone.c @@ -20,6 +20,7 @@ #include "ttimer.h" #include "tulog.h" #include "tutil.h" +#include bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; } @@ -33,7 +34,9 @@ int64_t taosGetPthreadId(pthread_t thread) { #endif } -int64_t taosGetSelfPthreadId() { return taosGetPthreadId(pthread_self()); } +int64_t taosGetSelfPthreadId() { + return GetCurrentThreadId(); +} bool taosComparePthread(pthread_t first, pthread_t second) { return first.p == second.p; @@ -55,4 +58,4 @@ int32_t taosGetCurrentAPPName(char *name, int32_t* len) { } return 0; -} \ No newline at end of file +} diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index e66997dc8ec37409c3bba94979255db523796667..7dcaaf27e615ead75e83630788288a27e938b0a9 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(monitor) diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt index 42016b8645690e24d1bced4c31261b9d785269c5..bfb47ad12e8b1ef7099109ecf5849ec3575caf5f 100644 --- a/src/plugins/http/CMakeLists.txt +++ b/src/plugins/http/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) diff --git a/src/plugins/http/inc/httpRestJson.h b/src/plugins/http/inc/httpRestJson.h index 112e845f369c6ae3f0df76d880f84de5aeff5d8d..5f19983826a4f90bc3498ca82cc9fd495b4f300c 100644 --- a/src/plugins/http/inc/httpRestJson.h +++ b/src/plugins/http/inc/httpRestJson.h @@ -34,6 +34,8 @@ #define REST_JSON_DATA_LEN 4 #define REST_JSON_HEAD "head" #define REST_JSON_HEAD_LEN 4 +#define REST_JSON_HEAD_INFO "column_meta" +#define REST_JSON_HEAD_INFO_LEN 11 #define REST_JSON_ROWS "rows" #define REST_JSON_ROWS_LEN 4 #define REST_JSON_AFFECT_ROWS "affected_rows" @@ -51,4 +53,4 @@ bool restBuildSqlLocalTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAO bool restBuildSqlUtcTimeStringJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows); void restStopSqlJson(HttpContext *pContext, HttpSqlCmd *cmd); -#endif \ No newline at end of file +#endif diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index f71a84a5afe29e94d5bd9ba78638f295645d8e45..13f706af653e9c5c1b9fb0a4c602355b001d0cac 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -154,7 +154,10 @@ void httpReleaseContext(HttpContext *pContext, bool clearRes) { } if (clearRes) { - httpClearParser(pContext->parser); + if (pContext->parser) { + httpClearParser(pContext->parser); + } + memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd)); } HttpContext **ppContext = pContext->ppContext; @@ -185,9 +188,9 @@ void httpCloseContextByApp(HttpContext *pContext) { pContext->parsed = false; bool keepAlive = true; - if (parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { + if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { keepAlive = false; - } else if (parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) { + } else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) { keepAlive = false; } else { } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index e537253f0d886dfd3d93d632fda43c77d49acf15..4ce54a8ee630579499fe498e5d8c155bb5916eea 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -229,7 +229,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const return 0; } - else if (strncasecmp(key, "Connection: ", 12) == 0) { + else if (strncasecmp(key, "Connection", 10) == 0) { if (strncasecmp(val, "Keep-Alive", 10) == 0) { parser->keepAlive = HTTP_KEEPALIVE_ENABLE; } else { diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c index aebba97fb8cd320afd7ba6a0f2b1e39dbaf4bee1..7f7ce404600b96b0efe8385ea0aed7be412a6ba0 100644 --- a/src/plugins/http/src/httpQueue.c +++ b/src/plugins/http/src/httpQueue.c @@ -59,7 +59,9 @@ void httpDispatchToResultQueue(void *param, TAOS_RES *result, int32_t code, int3 pMsg->fp = fp; taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg); } else { - (*fp)(param, result, code, rows); + taos_stop_query(result); + taos_free_result(result); + //(*fp)(param, result, code, rows); } } diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 37eef2bfad16f9b4e83317c0fa529035deb598bd..063f2bb04e4760d683434d9e6a600791c2a57888 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -74,7 +74,7 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = 404; else if (errNo == TSDB_CODE_HTTP_UNSUPPORT_URL) httpCode = 404; - else if (errNo == TSDB_CODE_HTTP_INVLALID_URL) + else if (errNo == TSDB_CODE_HTTP_INVALID_URL) httpCode = 404; else if (errNo == TSDB_CODE_HTTP_NO_ENOUGH_MEMORY) httpCode = 507; diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c index 8999fb879edae6e4cfa0ac45b4aec3aa3bdb6fdf..8728b9e37a1561dd545c6878756dcfbc909a27fd 100644 --- a/src/plugins/http/src/httpRestHandle.c +++ b/src/plugins/http/src/httpRestHandle.c @@ -141,6 +141,6 @@ bool restProcessRequest(struct HttpContext* pContext) { } else { } - httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVLALID_URL); + httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVALID_URL); return false; } diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index a620625d25919433a406856289dad0f8da1fc709..61a5a361c4865d6b3dbac73773cc0b8cfc562e56 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -75,6 +75,44 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) // head array end httpJsonToken(jsonBuf, JsonArrEnd); + // column_meta begin + httpJsonItemToken(jsonBuf); + httpJsonPairHead(jsonBuf, REST_JSON_HEAD_INFO, REST_JSON_HEAD_INFO_LEN); + // column_meta array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + if (num_fields == 0) { + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + httpJsonItemToken(jsonBuf); + httpJsonInt(jsonBuf, TSDB_DATA_TYPE_INT); + httpJsonItemToken(jsonBuf); + httpJsonInt(jsonBuf, 4); + + httpJsonToken(jsonBuf, JsonArrEnd); + } else { + for (int32_t i = 0; i < num_fields; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + httpJsonItemToken(jsonBuf); + httpJsonInt(jsonBuf, fields[i].type); + httpJsonItemToken(jsonBuf); + httpJsonInt(jsonBuf, fields[i].bytes); + + httpJsonToken(jsonBuf, JsonArrEnd); + } + } + + // column_meta array end + httpJsonToken(jsonBuf, JsonArrEnd); + // data begin httpJsonItemToken(jsonBuf); httpJsonPairHead(jsonBuf, REST_JSON_DATA, REST_JSON_DATA_LEN); diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a84ae9f617d7ecd43c98d942dea95d99fd9ba901..399a33954d5670a7e928f856a898dcde1e4ac4eb 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -239,6 +239,10 @@ JsonBuf *httpMallocJsonBuf(HttpContext *pContext) { pContext->jsonBuf = (JsonBuf *)malloc(sizeof(JsonBuf)); } + if (!pContext->jsonBuf->pContext) { + pContext->jsonBuf->pContext = pContext; + } + return pContext->jsonBuf; } diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt index abab07e0cd026d00499c5835e5446fdab9c16df7..28c62a099c0f2bea8b33a57c577bc89c7fb15aaa 100644 --- a/src/plugins/monitor/CMakeLists.txt +++ b/src/plugins/monitor/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 424ab0f216162b00d96ae39fcb4351f3ffea75cf..ac80ad62509a755b5b86b65593a90a2730120df8 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -103,7 +103,9 @@ int32_t monInitSystem() { } int32_t monStartSystem() { - taos_init(); + if (taos_init()) { + return -1; + } tsMonitor.start = 1; monExecuteSQLFp = monExecuteSQL; monInfo("monitor module start"); diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt index b6de4215170be827c9e049044508d586ae6a6977..50b0bbe8af4faeab41a7b041d6aa51747f0aab3e 100644 --- a/src/plugins/mqtt/CMakeLists.txt +++ b/src/plugins/mqtt/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index 967e86de3c5d9288834e3eb3b3222e551009bc49..f23ac7dd86932ba42dde7c2891865f7dff546a00 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 029bbda67661b4eb3c99aa00fa563a0ba6daddf2..fe585d47ea6d38766c61a430e5f280cb6e43e92d 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -76,6 +76,7 @@ typedef struct SQuerySQL { typedef struct SCreatedTableInfo { SStrToken name; // table name token SStrToken stableName; // super table name token , for using clause + SArray *pTagNames; // create by using super table, tag name SArray *pTagVals; // create by using super table, tag value char *fullname; // table full name STagData tagdata; // true tag data, super table full name is in STagData @@ -244,6 +245,8 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t s tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType); +tSQLExpr *tSqlExprClone(tSQLExpr *pSrc); + void tSqlExprDestroy(tSQLExpr *pExpr); tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pDistinct, SStrToken *pToken); @@ -259,7 +262,7 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe void tSqlExprNodeDestroy(tSQLExpr *pExpr); SAlterTableInfo * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable); -SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 0ab9e578afb613a681f716845f0d14a06825c9fc..4083cbec62b74301c7c0a0b6e891e159901a2dec 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -27,7 +27,7 @@ #define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t)) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1) +#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1) int32_t getOutputInterResultBufSize(SQuery* pQuery); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index a6b03f91b18749e1df5cce193ba4e97ab3108cdb..ab5750bfd33f9c3ba6886cce3c6298f3647c55b1 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -356,9 +356,20 @@ create_stable_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP T create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { X.n += F.n; V.n += Z.n; - A = createNewChildTableInfo(&X, Y, &V, &U); + A = createNewChildTableInfo(&X, NULL, Y, &V, &U); } +create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(F) LP tagNamelist(P) RP TAGS LP tagitemlist(Y) RP. { + X.n += F.n; + V.n += Z.n; + A = createNewChildTableInfo(&X, P, Y, &V, &U); +} + +%type tagNamelist{SArray*} +%destructor tagNamelist {taosArrayDestroy($$);} +tagNamelist(A) ::= tagNamelist(X) COMMA ids(Y). {taosArrayPush(X, &Y); A = X; } +tagNamelist(A) ::= ids(X). {A = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(A, &X);} + // create stream // create table table_name as select count(*) from super_table_name interval(time) create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) AS select(S). { @@ -663,6 +674,8 @@ expr(A) ::= expr(X) GE expr(Y). {A = tSqlExprCreate(X, Y, TK_GE);} expr(A) ::= expr(X) NE expr(Y). {A = tSqlExprCreate(X, Y, TK_NE);} expr(A) ::= expr(X) EQ expr(Y). {A = tSqlExprCreate(X, Y, TK_EQ);} +expr(A) ::= expr(X) BETWEEN expr(Y) AND expr(Z). { tSQLExpr* X2 = tSqlExprClone(X); A = tSqlExprCreate(tSqlExprCreate(X, Y, TK_GE), tSqlExprCreate(X2, Z, TK_LE), TK_AND);} + expr(A) ::= expr(X) AND expr(Y). {A = tSqlExprCreate(X, Y, TK_AND);} expr(A) ::= expr(X) OR expr(Y). {A = tSqlExprCreate(X, Y, TK_OR); } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index a0e5ea49bd9754f792bc45cceeab71c00de5d015..2572feb2d7e05adc9dfb9421ba165e85f5e1f2cc 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -91,8 +91,9 @@ typedef struct SSpreadInfo { typedef struct SSumInfo { union { - int64_t isum; - double dsum; + int64_t isum; + uint64_t usum; + double dsum; }; int8_t hasResult; } SSumInfo; @@ -549,7 +550,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { uint64_t *retVal = (uint64_t *)pCtx->pOutput; *retVal += (uint64_t)pCtx->preAggVals.statis.sum; - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + } else if (IS_FLOAT_TYPE(pCtx->inputType)) { double *retVal = (double*) pCtx->pOutput; *retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum)); } @@ -572,13 +573,13 @@ static void do_sum(SQLFunctionCtx *pCtx) { } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { uint64_t *retVal = (uint64_t *)pCtx->pOutput; - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { LIST_ADD_N(*retVal, pCtx, pData, uint8_t, notNullElems, pCtx->inputType); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { LIST_ADD_N(*retVal, pCtx, pData, uint16_t, notNullElems, pCtx->inputType); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { LIST_ADD_N(*retVal, pCtx, pData, uint32_t, notNullElems, pCtx->inputType); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { LIST_ADD_N(*retVal, pCtx, pData, uint64_t, notNullElems, pCtx->inputType); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { @@ -615,6 +616,18 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { *res += GET_INT32_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { *res += GET_INT64_VAL(pData); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { + uint64_t *r = (uint64_t *)pCtx->pOutput; + *r += GET_UINT8_VAL(pData); + } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { + uint64_t *r = (uint64_t *)pCtx->pOutput; + *r += GET_UINT16_VAL(pData); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { + uint64_t *r = (uint64_t *)pCtx->pOutput; + *r += GET_UINT32_VAL(pData); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { + uint64_t *r = (uint64_t *)pCtx->pOutput; + *r += GET_UINT64_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { double *retVal = (double*) pCtx->pOutput; *retVal += GET_DOUBLE_VAL(pData); @@ -664,18 +677,12 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) { notNullElems++; - switch (type) { - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_BIGINT: { - *(int64_t *)pCtx->pOutput += pInput->isum; - break; - }; - case TSDB_DATA_TYPE_FLOAT: - case TSDB_DATA_TYPE_DOUBLE: { - *(double *)pCtx->pOutput += pInput->dsum; - } + if (IS_SIGNED_NUMERIC_TYPE(type)) { + *(int64_t *)pCtx->pOutput += pInput->isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + *(uint64_t *) pCtx->pOutput += pInput->usum; + } else { + *(double *)pCtx->pOutput += pInput->dsum; } } @@ -1071,7 +1078,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, TYPED_LOOPCHECK_N(uint16_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { TYPED_LOOPCHECK_N(uint32_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { TYPED_LOOPCHECK_N(uint64_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { @@ -1611,7 +1618,9 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { avg = p->avg; } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); - assert(p != NULL); + if (p == NULL) { + return; + } avg = p->avg; } @@ -2212,19 +2221,22 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, if (pInfo->num < maxLen) { if (pInfo->num == 0 || - ((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && - val.i64 >= pList[pInfo->num - 1]->v.i64) || - ((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) && - val.dKey >= pList[pInfo->num - 1]->v.dKey)) { + (IS_SIGNED_NUMERIC_TYPE(type) && val.i64 >= pList[pInfo->num - 1]->v.i64) || + (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 >= pList[pInfo->num - 1]->v.u64) || + (IS_FLOAT_TYPE(type) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) { valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage); } else { int32_t i = pInfo->num - 1; - - if (type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { while (i >= 0 && pList[i]->v.i64 > val.i64) { VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); i -= 1; } + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + while (i >= 0 && pList[i]->v.u64 > val.u64) { + VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); + i -= 1; + } } else { while (i >= 0 && pList[i]->v.dKey > val.dKey) { VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); @@ -2238,23 +2250,29 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, pInfo->num++; } else { int32_t i = 0; - - if (((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && val.i64 > pList[0]->v.i64) || - ((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) && val.dKey > pList[0]->v.dKey)) { + + if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 > pList[0]->v.i64) || + (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 > pList[0]->v.u64) || + (IS_FLOAT_TYPE(type) && val.dKey > pList[0]->v.dKey)) { // find the appropriate the slot position - if (type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { while (i + 1 < maxLen && pList[i + 1]->v.i64 < val.i64) { VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); i += 1; } + } if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + while (i + 1 < maxLen && pList[i + 1]->v.u64 < val.u64) { + VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); + i += 1; + } } else { while (i + 1 < maxLen && pList[i + 1]->v.dKey < val.dKey) { VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); i += 1; } } - - valuePairAssign(pList[i], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage); + + valuePairAssign(pList[i], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); } } } @@ -2273,11 +2291,16 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } else { int32_t i = pInfo->num - 1; - if (type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { while (i >= 0 && pList[i]->v.i64 < val.i64) { VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); i -= 1; } + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + while (i >= 0 && pList[i]->v.u64 < val.u64) { + VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); + i -= 1; + } } else { while (i >= 0 && pList[i]->v.dKey < val.dKey) { VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); @@ -2292,14 +2315,20 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa } else { int32_t i = 0; - if (((type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) && val.i64 < pList[0]->v.i64) || - ((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) && val.dKey < pList[0]->v.dKey)) { + if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 < pList[0]->v.i64) || + (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 < pList[0]->v.u64) || + (IS_FLOAT_TYPE(type) && val.dKey < pList[0]->v.dKey)) { // find the appropriate the slot position - if (type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_BIGINT) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { while (i + 1 < maxLen && pList[i + 1]->v.i64 > val.i64) { VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); i += 1; } + } if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + while (i + 1 < maxLen && pList[i + 1]->v.u64 > val.u64) { + VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); + i += 1; + } } else { while (i + 1 < maxLen && pList[i + 1]->v.dKey > val.dKey) { VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); @@ -2329,19 +2358,24 @@ static int32_t resDataAscComparFn(const void *pLeft, const void *pRight) { tValuePair *pLeftElem = *(tValuePair **)pLeft; tValuePair *pRightElem = *(tValuePair **)pRight; - int32_t type = pLeftElem->v.nType; - if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { + if (IS_FLOAT_TYPE(pLeftElem->v.nType)) { if (pLeftElem->v.dKey == pRightElem->v.dKey) { return 0; } else { return pLeftElem->v.dKey > pRightElem->v.dKey ? 1 : -1; } - } else { + } else if (IS_SIGNED_NUMERIC_TYPE(pLeftElem->v.nType)){ if (pLeftElem->v.i64 == pRightElem->v.i64) { return 0; } else { return pLeftElem->v.i64 > pRightElem->v.i64 ? 1 : -1; } + } else { + if (pLeftElem->v.u64 == pRightElem->v.u64) { + return 0; + } else { + return pLeftElem->v.u64 > pRightElem->v.u64 ? 1 : -1; + } } } @@ -2539,9 +2573,13 @@ static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) { static void top_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - + STopBotInfo *pRes = getTopBotOutputInfo(pCtx); assert(pRes->num >= 0); + + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_DATA(pCtx, i); @@ -2614,6 +2652,10 @@ static void bottom_function(SQLFunctionCtx *pCtx) { STopBotInfo *pRes = getTopBotOutputInfo(pCtx); + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } + for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_DATA(pCtx, i); TSKEY ts = GET_TS_DATA(pCtx, i); @@ -2648,6 +2690,11 @@ static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { } STopBotInfo *pRes = getTopBotOutputInfo(pCtx); + + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } + SET_VAL(pCtx, 1, 1); do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); @@ -2744,14 +2791,14 @@ static void percentile_function(SQLFunctionCtx *pCtx) { if (pCtx->preAggVals.isSet) { double tmin = 0.0, tmax = 0.0; if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { - tmin = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.min); - tmax = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.max); + tmin = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.min); + tmax = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.max); } else if (IS_FLOAT_TYPE(pCtx->inputType)) { - tmin = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.min); - tmax = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.max); + tmin = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.min); + tmax = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.max); } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { - tmin = (double)GET_UINT64_VAL(pCtx->preAggVals.statis.min); - tmax = (double)GET_UINT64_VAL(pCtx->preAggVals.statis.max); + tmin = (double)GET_UINT64_VAL(&pCtx->preAggVals.statis.min); + tmax = (double)GET_UINT64_VAL(&pCtx->preAggVals.statis.max); } else { assert(true); } @@ -3090,17 +3137,17 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { numOfElem++; } break; - }; + } case TSDB_DATA_TYPE_BIGINT: { int64_t *p = pData; LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); break; - }; + } case TSDB_DATA_TYPE_DOUBLE: { double *p = pData; LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); break; - }; + } case TSDB_DATA_TYPE_FLOAT: { float *p = pData; LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); @@ -3110,12 +3157,32 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { int16_t *p = pData; LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); break; - }; + } case TSDB_DATA_TYPE_TINYINT: { int8_t *p = pData; LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); break; - }; + } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_UINT: { + uint32_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + } } pInfo->startVal = x; @@ -3737,6 +3804,14 @@ static void spread_function(SQLFunctionCtx *pCtx) { LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, double, pCtx->inputType, numOfElems); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, float, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint8_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint16_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint32_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint64_t, pCtx->inputType, numOfElems); } if (!pCtx->hasNull) { @@ -3998,6 +4073,58 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si } break; } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t *val = (uint8_t*) GET_INPUT_DATA(pCtx, 0); + for (; i < size && i >= 0; i += step) { + if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { + continue; + } + + SPoint1 st = {.key = tsList[i], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t *val = (uint16_t*) GET_INPUT_DATA(pCtx, 0); + for (; i < size && i >= 0; i += step) { + if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { + continue; + } + + SPoint1 st = {.key = tsList[i], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UINT: { + uint32_t *val = (uint32_t*) GET_INPUT_DATA(pCtx, 0); + for (; i < size && i >= 0; i += step) { + if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { + continue; + } + + SPoint1 st = {.key = tsList[i], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t *val = (uint64_t*) GET_INPUT_DATA(pCtx, 0); + for (; i < size && i >= 0; i += step) { + if (pCtx->hasNull && isNull((const char*) &val[i], pCtx->inputType)) { + continue; + } + + SPoint1 st = {.key = tsList[i], .val = (double) val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } default: assert(0); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 2d050388c83e65036ed2e5f2bdacbe92eadddd5f..100993dbc7530d07a0e5693cf586bbc497b2f895 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1639,10 +1639,11 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr } else if (functionId == TSDB_FUNC_ARITHM) { pCtx->param[1].pz = (char*) &pRuntimeEnv->sasArray[i]; } + } - if (i > 0) { - (*rowCellInfoOffset)[i] = (*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + pExpr[i - 1].interBytes; - } + for(int32_t i = 1; i < numOfOutput; ++i) { + (*rowCellInfoOffset)[i] = (*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + + pExpr[i - 1].interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery); } setCtxTagColumnInfo(pFuncCtx, numOfOutput); @@ -2157,7 +2158,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i // one page contains at least two rows *ps = DEFAULT_INTERN_BUF_PAGE_SIZE; while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) { - *ps = (*ps << 1u); + *ps = ((*ps) << 1u); } // pRuntimeEnv->numOfRowsPerPage = ((*ps) - sizeof(tFilePage)) / (*rowsize); @@ -2988,6 +2989,14 @@ int32_t initResultRow(SResultRow *pResultRow) { return TSDB_CODE_SUCCESS; } +/* + * The start of each column SResultRowCellInfo is denote by RowCellInfoOffset. + * Note that in case of top/bottom query, the whole multiple rows of result is treated as only one row of results. + * +------------+-----------------result column 1-----------+-----------------result column 2-----------+ + * + SResultRow | SResultRowCellInfo | intermediate buffer1 | SResultRowCellInfo | intermediate buffer 2| + * +------------+-------------------------------------------+-------------------------------------------+ + * offset[0] offset[1] + */ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid) { SQLFunctionCtx* pCtx = pInfo->pCtx; SSDataBlock* pDataBlock = pInfo->pRes; @@ -3632,7 +3641,7 @@ void queryCostStatis(SQInfo *pQInfo) { static void freeTableBlockDist(STableBlockDist *pTableBlockDist) { if (pTableBlockDist != NULL) { - taosArrayDestroy(pTableBlockDist->dataBlockInfos); + taosArrayDestroy(pTableBlockDist->dataBlockInfos); free(pTableBlockDist); } } @@ -4576,7 +4585,7 @@ static SSDataBlock* doArithmeticOperation(void* param) { SArithOperatorInfo* pArithInfo = pOperator->info; SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; SOptrBasicInfo *pInfo = &pArithInfo->binfo; - + pInfo->pRes->info.rows = 0; while(1) { @@ -4964,6 +4973,7 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); size_t tableGroup = GET_NUM_OF_TABLEGROUP(pRuntimeEnv); + pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) tableGroup); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); initResultRowInfo(&pInfo->binfo.resultRowInfo, tableGroup, TSDB_DATA_TYPE_INT); @@ -5814,8 +5824,7 @@ static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable return TSDB_CODE_SUCCESS; } -static UNUSED_FUNC int32_t ddx(SQueryTableMsg* pQueryMsg, SSqlFuncMsg** pExprMsg, SColumnInfo* pTagCols, SExprInfo* pExprs, - int32_t numOfOutput, int32_t tagLen, bool superTable) { +static int32_t updateOutputBufForTopBotQuery(SQueryTableMsg* pQueryMsg, SColumnInfo* pTagCols, SExprInfo* pExprs, int32_t numOfOutput, int32_t tagLen, bool superTable) { for (int32_t i = 0; i < numOfOutput; ++i) { int16_t functId = pExprs[i].base.functionId; @@ -5835,6 +5844,7 @@ static UNUSED_FUNC int32_t ddx(SQueryTableMsg* pQueryMsg, SSqlFuncMsg** pExprMsg return TSDB_CODE_SUCCESS; } +// TODO tag length should be passed from client int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutput, SExprInfo** pExprInfo, SSqlFuncMsg** pExprMsg, SColumnInfo* pTagCols) { *pExprInfo = NULL; @@ -5928,7 +5938,8 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu assert(isValidDataType(pExprs[i].type)); } -// ddx(pQueryMsg, pExprMsg, pTagCols, pExprs, numOfOutput, tagLen, isSuperTable); + // the tag length is affected by other tag columns, so this should be update. + updateOutputBufForTopBotQuery(pQueryMsg, pTagCols, pExprs, numOfOutput, tagLen, isSuperTable); *pExprInfo = pExprs; return TSDB_CODE_SUCCESS; diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index e4c62d90e38407954af1fe00454e6df99cb288bd..a73f38528266bfa70790414a2963eb0a8290d7b9 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -362,20 +362,10 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i return (first < second) ? -1 : 1; }; case TSDB_DATA_TYPE_DOUBLE: { - double first = GET_DOUBLE_VAL(f1); - double second = GET_DOUBLE_VAL(f2); - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; + DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); }; case TSDB_DATA_TYPE_FLOAT: { - float first = GET_FLOAT_VAL(f1); - float second = GET_FLOAT_VAL(f2); - if (first == second) { - return 0; - } - return (first < second) ? -1 : 1; + DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); }; case TSDB_DATA_TYPE_BIGINT: { int64_t first = *(int64_t *)f1; diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 10725bf737c8d9b4ab97bb0d71c2d35c29cf6abd..8db418a7f0a2c4842ae9354f56bd9c1a57485146 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -58,6 +58,15 @@ SSqlInfo qSQLParse(const char *pStr) { sqlInfo.valid = false; goto abort_parse; } + + case TK_HEX: + case TK_OCT: + case TK_BIN:{ + snprintf(sqlInfo.msg, tListLen(sqlInfo.msg), "unsupported token: \"%s\"", t0.z); + sqlInfo.valid = false; + goto abort_parse; + } + default: Parse(pParser, t0.type, t0, &sqlInfo); if (sqlInfo.valid == false) { @@ -301,6 +310,28 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { return pExpr; } + + +tSQLExpr *tSqlExprClone(tSQLExpr *pSrc) { + tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); + + memcpy(pExpr, pSrc, sizeof(*pSrc)); + + if (pSrc->pLeft) { + pExpr->pLeft = tSqlExprClone(pSrc->pLeft); + } + + if (pSrc->pRight) { + pExpr->pRight = tSqlExprClone(pSrc->pRight); + } + + //we don't clone pParam now because clone is only used for between/and + assert(pSrc->pParam == NULL); + + return pExpr; +} + + void tSqlExprNodeDestroy(tSQLExpr *pExpr) { if (pExpr == NULL) { return; @@ -321,8 +352,9 @@ void tSqlExprDestroy(tSQLExpr *pExpr) { } tSqlExprDestroy(pExpr->pLeft); + pExpr->pLeft = NULL; tSqlExprDestroy(pExpr->pRight); - + pExpr->pRight = NULL; tSqlExprNodeDestroy(pExpr); } @@ -402,7 +434,55 @@ void tSqlSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) } else { pField->bytes = pType->bytes; } +} + +static int32_t tryParseNameTwoParts(SStrToken *type) { + int32_t t = -1; + + char* str = strndup(type->z, type->n); + if (str == NULL) { + return t; + } + + char* p = strtok(str, " "); + if (p == NULL) { + tfree(str); + return t; + } else { + char* unsign = strtok(NULL, " "); + if (unsign == NULL) { + tfree(str); + return t; + } + + if (strncasecmp(unsign, "UNSIGNED", 8) == 0) { + for(int32_t j = TSDB_DATA_TYPE_TINYINT; j <= TSDB_DATA_TYPE_BIGINT; ++j) { + if (strcasecmp(p, tDataTypes[j].name) == 0) { + t = j; + break; + } + } + tfree(str); + + if (t == -1) { + return -1; + } + + switch(t) { + case TSDB_DATA_TYPE_TINYINT: return TSDB_DATA_TYPE_UTINYINT; + case TSDB_DATA_TYPE_SMALLINT: return TSDB_DATA_TYPE_USMALLINT; + case TSDB_DATA_TYPE_INT: return TSDB_DATA_TYPE_UINT; + case TSDB_DATA_TYPE_BIGINT: return TSDB_DATA_TYPE_UBIGINT; + default: + return -1; + } + + } else { + tfree(str); + return -1; + } + } } void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) { @@ -420,8 +500,12 @@ void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) { i += 1; } + // no qualified data type found, try unsigned data type if (i == tListLen(tDataTypes)) { - return; + i = tryParseNameTwoParts(type); + if (i == -1) { + return; + } } pField->type = i; @@ -508,7 +592,8 @@ static void freeVariant(void *pItem) { } void freeCreateTableInfo(void* p) { - SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; + SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; + taosArrayDestroy(pInfo->pTagNames); taosArrayDestroyEx(pInfo->pTagVals, freeVariant); tfree(pInfo->fullname); tfree(pInfo->tagdata.data); @@ -586,11 +671,12 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe return pCreate; } -SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists) { +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists) { SCreatedTableInfo info; memset(&info, 0, sizeof(SCreatedTableInfo)); info.name = *pToken; + info.pTagNames = pTagNames; info.pTagVals = pTagVals; info.stableName = *pTableName; info.igExist = (igExists->n > 0)? 1:0; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 37c890e9c64e9939d404daac6402e469f94794bd..a282041766cc0c8db6dd2658d3e4bb8590a596dc 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -382,7 +382,7 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { return 0; } - return taosArrayGetSize(pGroupResInfo->pRows); + return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); } static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow, int32_t* rowCellInfoOffset) { diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 6e51e3b5662b9749c44cd48e9b11265cae313636..a551b09bb166ef7e7583768f88306b53a750925b 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -355,6 +355,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co qDebug("QInfo:%p has more results to retrieve", pQInfo); } + // the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS + if (pQInfo->code != TSDB_CODE_SUCCESS) { + rpcFreeCont(*pRsp); + *pRsp = NULL; + } + return pQInfo->code; } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 27137483e932a1c77fb2ebd6e1820aad2f32938e..4a860e60e28083507919e28c350f94533fb1242b 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -97,27 +97,27 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 256 +#define YYNOCODE 257 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SCreateAcctInfo yy1; - SCreateTableSQL* yy8; - tSQLExprList* yy9; - SLimitVal yy24; - SSubclauseInfo* yy63; - SArray* yy141; - tSQLExpr* yy220; - SQuerySQL* yy234; - SCreatedTableInfo yy306; - SCreateDbInfo yy322; - tVariant yy326; - SIntervalVal yy340; - TAOS_FIELD yy403; - int64_t yy429; - int yy502; + SCreatedTableInfo yy32; + tSQLExpr* yy114; + TAOS_FIELD yy215; + SQuerySQL* yy216; + int64_t yy221; + SCreateDbInfo yy222; + tSQLExprList* yy258; + SIntervalVal yy264; + SCreateTableSQL* yy278; + SCreateAcctInfo yy299; + int yy348; + SArray* yy349; + SSubclauseInfo* yy417; + tVariant yy426; + SLimitVal yy454; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -127,17 +127,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 283 -#define YYNRULE 250 +#define YYNSTATE 294 +#define YYNRULE 254 #define YYNTOKEN 184 -#define YY_MAX_SHIFT 282 -#define YY_MIN_SHIFTREDUCE 463 -#define YY_MAX_SHIFTREDUCE 712 -#define YY_ERROR_ACTION 713 -#define YY_ACCEPT_ACTION 714 -#define YY_NO_ACTION 715 -#define YY_MIN_REDUCE 716 -#define YY_MAX_REDUCE 965 +#define YY_MAX_SHIFT 293 +#define YY_MIN_SHIFTREDUCE 477 +#define YY_MAX_SHIFTREDUCE 730 +#define YY_ERROR_ACTION 731 +#define YY_ACCEPT_ACTION 732 +#define YY_NO_ACTION 733 +#define YY_MIN_REDUCE 734 +#define YY_MAX_REDUCE 987 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -203,238 +203,249 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (614) +#define YY_ACTTAB_COUNT (651) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 872, 507, 159, 714, 282, 159, 15, 580, 183, 508, - /* 10 */ 158, 186, 948, 41, 42, 947, 43, 44, 26, 163, - /* 20 */ 191, 35, 507, 507, 232, 47, 45, 49, 46, 944, - /* 30 */ 508, 508, 850, 40, 39, 257, 256, 38, 37, 36, - /* 40 */ 41, 42, 118, 43, 44, 861, 943, 191, 35, 179, - /* 50 */ 280, 232, 47, 45, 49, 46, 181, 869, 847, 215, - /* 60 */ 40, 39, 957, 123, 38, 37, 36, 464, 465, 466, - /* 70 */ 467, 468, 469, 470, 471, 472, 473, 474, 475, 281, - /* 80 */ 91, 196, 205, 41, 42, 267, 43, 44, 839, 247, - /* 90 */ 191, 35, 159, 942, 232, 47, 45, 49, 46, 123, - /* 100 */ 220, 185, 948, 40, 39, 850, 62, 38, 37, 36, - /* 110 */ 20, 245, 275, 274, 244, 243, 242, 273, 241, 272, - /* 120 */ 271, 270, 240, 269, 268, 901, 233, 227, 817, 661, - /* 130 */ 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, - /* 140 */ 815, 816, 818, 819, 42, 208, 43, 44, 175, 276, - /* 150 */ 191, 35, 212, 211, 232, 47, 45, 49, 46, 229, - /* 160 */ 861, 67, 21, 40, 39, 247, 72, 38, 37, 36, - /* 170 */ 32, 43, 44, 850, 180, 191, 35, 123, 70, 232, - /* 180 */ 47, 45, 49, 46, 16, 38, 37, 36, 40, 39, - /* 190 */ 69, 123, 38, 37, 36, 190, 674, 200, 841, 665, - /* 200 */ 167, 668, 63, 671, 26, 26, 168, 836, 837, 25, - /* 210 */ 840, 103, 102, 166, 190, 674, 176, 26, 665, 677, - /* 220 */ 668, 617, 671, 20, 61, 275, 274, 187, 188, 848, - /* 230 */ 273, 231, 272, 271, 270, 601, 269, 268, 598, 900, - /* 240 */ 599, 21, 600, 194, 846, 847, 187, 188, 823, 32, - /* 250 */ 267, 821, 822, 68, 838, 195, 824, 847, 826, 827, - /* 260 */ 825, 758, 828, 829, 147, 614, 202, 203, 199, 56, - /* 270 */ 214, 22, 47, 45, 49, 46, 767, 174, 161, 147, - /* 280 */ 40, 39, 89, 93, 38, 37, 36, 57, 83, 98, - /* 290 */ 101, 92, 3, 137, 197, 48, 621, 95, 29, 78, - /* 300 */ 74, 77, 218, 153, 149, 26, 26, 27, 673, 151, - /* 310 */ 106, 105, 104, 26, 48, 40, 39, 26, 162, 38, - /* 320 */ 37, 36, 663, 672, 10, 642, 643, 673, 71, 133, - /* 330 */ 279, 278, 110, 759, 201, 189, 147, 254, 253, 116, - /* 340 */ 164, 602, 672, 250, 251, 847, 847, 32, 609, 165, - /* 350 */ 667, 255, 670, 847, 629, 259, 217, 847, 664, 120, - /* 360 */ 198, 633, 634, 249, 693, 675, 52, 18, 53, 17, - /* 370 */ 17, 666, 171, 669, 590, 235, 591, 4, 172, 27, - /* 380 */ 27, 52, 82, 81, 100, 99, 12, 11, 54, 88, - /* 390 */ 87, 59, 605, 579, 606, 115, 113, 14, 13, 603, - /* 400 */ 170, 604, 849, 157, 169, 160, 911, 910, 192, 907, - /* 410 */ 906, 193, 258, 117, 871, 33, 893, 878, 880, 863, - /* 420 */ 119, 892, 134, 135, 132, 136, 32, 769, 239, 155, - /* 430 */ 216, 30, 248, 766, 114, 58, 962, 628, 79, 961, - /* 440 */ 55, 50, 959, 138, 221, 252, 956, 860, 182, 124, - /* 450 */ 125, 225, 126, 85, 955, 230, 953, 139, 787, 31, - /* 460 */ 228, 226, 128, 28, 224, 156, 756, 94, 754, 96, - /* 470 */ 129, 97, 752, 751, 204, 148, 749, 748, 222, 747, - /* 480 */ 746, 745, 150, 152, 742, 740, 738, 736, 734, 154, - /* 490 */ 34, 219, 64, 65, 894, 90, 260, 261, 262, 263, - /* 500 */ 264, 177, 237, 265, 238, 266, 277, 712, 178, 75, - /* 510 */ 206, 173, 207, 711, 209, 210, 710, 698, 750, 213, - /* 520 */ 611, 107, 142, 788, 2, 144, 140, 141, 143, 145, - /* 530 */ 744, 146, 108, 109, 1, 845, 743, 217, 735, 60, - /* 540 */ 130, 131, 127, 234, 6, 66, 630, 121, 184, 23, - /* 550 */ 223, 7, 635, 122, 8, 676, 5, 9, 24, 19, - /* 560 */ 73, 236, 678, 548, 71, 544, 542, 541, 540, 537, - /* 570 */ 511, 246, 76, 27, 80, 51, 582, 581, 578, 84, - /* 580 */ 532, 530, 522, 528, 524, 526, 520, 518, 550, 549, - /* 590 */ 547, 546, 545, 543, 86, 539, 538, 52, 509, 479, - /* 600 */ 477, 716, 715, 715, 715, 715, 715, 715, 715, 715, - /* 610 */ 111, 715, 715, 112, + /* 0 */ 74, 521, 732, 293, 521, 165, 186, 291, 28, 522, + /* 10 */ 190, 893, 522, 43, 44, 969, 47, 48, 15, 776, + /* 20 */ 198, 37, 152, 46, 242, 51, 49, 53, 50, 854, + /* 30 */ 855, 27, 858, 42, 41, 871, 128, 40, 39, 38, + /* 40 */ 43, 44, 882, 47, 48, 882, 188, 198, 37, 868, + /* 50 */ 46, 242, 51, 49, 53, 50, 187, 128, 203, 225, + /* 60 */ 42, 41, 979, 165, 40, 39, 38, 43, 44, 890, + /* 70 */ 47, 48, 193, 970, 198, 37, 165, 46, 242, 51, + /* 80 */ 49, 53, 50, 871, 128, 192, 970, 42, 41, 258, + /* 90 */ 521, 40, 39, 38, 290, 289, 115, 239, 522, 71, + /* 100 */ 77, 43, 45, 128, 47, 48, 205, 66, 198, 37, + /* 110 */ 28, 46, 242, 51, 49, 53, 50, 40, 39, 38, + /* 120 */ 921, 42, 41, 278, 65, 40, 39, 38, 865, 678, + /* 130 */ 287, 871, 859, 210, 478, 479, 480, 481, 482, 483, + /* 140 */ 484, 485, 486, 487, 488, 489, 292, 72, 201, 215, + /* 150 */ 44, 868, 47, 48, 856, 871, 198, 37, 209, 46, + /* 160 */ 242, 51, 49, 53, 50, 869, 922, 204, 237, 42, + /* 170 */ 41, 96, 163, 40, 39, 38, 278, 21, 256, 286, + /* 180 */ 285, 255, 254, 253, 284, 252, 283, 282, 281, 251, + /* 190 */ 280, 279, 835, 594, 823, 824, 825, 826, 827, 828, + /* 200 */ 829, 830, 831, 832, 833, 834, 836, 837, 47, 48, + /* 210 */ 87, 86, 198, 37, 28, 46, 242, 51, 49, 53, + /* 220 */ 50, 268, 267, 16, 211, 42, 41, 265, 264, 40, + /* 230 */ 39, 38, 197, 691, 28, 634, 682, 123, 685, 174, + /* 240 */ 688, 22, 42, 41, 73, 175, 40, 39, 38, 34, + /* 250 */ 108, 107, 173, 197, 691, 867, 67, 682, 121, 685, + /* 260 */ 21, 688, 286, 285, 194, 195, 34, 284, 241, 283, + /* 270 */ 282, 281, 202, 280, 279, 868, 659, 660, 680, 841, + /* 280 */ 22, 857, 839, 840, 169, 194, 195, 842, 34, 844, + /* 290 */ 845, 843, 785, 846, 847, 152, 230, 631, 218, 51, + /* 300 */ 49, 53, 50, 28, 23, 222, 221, 42, 41, 224, + /* 310 */ 638, 40, 39, 38, 681, 618, 181, 10, 615, 207, + /* 320 */ 616, 76, 617, 138, 243, 932, 777, 94, 98, 152, + /* 330 */ 966, 196, 52, 88, 103, 106, 97, 28, 28, 28, + /* 340 */ 965, 261, 100, 626, 868, 690, 212, 213, 3, 142, + /* 350 */ 684, 227, 687, 52, 31, 83, 79, 82, 258, 228, + /* 360 */ 689, 158, 154, 29, 60, 964, 690, 156, 111, 110, + /* 370 */ 109, 182, 683, 4, 686, 262, 266, 270, 868, 868, + /* 380 */ 868, 689, 646, 61, 57, 208, 125, 622, 260, 623, + /* 390 */ 650, 651, 711, 692, 56, 18, 17, 17, 604, 245, + /* 400 */ 606, 870, 29, 29, 56, 58, 247, 605, 183, 26, + /* 410 */ 75, 56, 248, 105, 104, 12, 11, 694, 167, 93, + /* 420 */ 92, 619, 63, 168, 593, 14, 13, 620, 170, 621, + /* 430 */ 120, 118, 164, 931, 171, 172, 199, 122, 178, 179, + /* 440 */ 177, 162, 176, 928, 166, 927, 200, 269, 884, 892, + /* 450 */ 35, 899, 901, 124, 914, 913, 139, 864, 140, 137, + /* 460 */ 34, 141, 226, 787, 250, 119, 645, 160, 240, 231, + /* 470 */ 62, 32, 259, 784, 881, 59, 189, 235, 129, 54, + /* 480 */ 131, 984, 84, 983, 130, 238, 133, 981, 132, 143, + /* 490 */ 263, 978, 236, 234, 90, 977, 232, 134, 975, 144, + /* 500 */ 805, 135, 33, 30, 161, 774, 99, 772, 101, 95, + /* 510 */ 102, 770, 769, 214, 153, 767, 766, 765, 764, 763, + /* 520 */ 155, 157, 760, 758, 756, 754, 752, 36, 159, 271, + /* 530 */ 229, 68, 69, 915, 272, 273, 274, 275, 276, 277, + /* 540 */ 288, 730, 184, 206, 249, 216, 217, 185, 180, 729, + /* 550 */ 80, 219, 220, 728, 716, 768, 223, 227, 70, 628, + /* 560 */ 64, 147, 146, 806, 145, 148, 149, 151, 112, 150, + /* 570 */ 113, 647, 762, 761, 244, 114, 866, 753, 6, 126, + /* 580 */ 136, 1, 2, 191, 233, 24, 25, 7, 652, 127, + /* 590 */ 8, 693, 5, 9, 19, 695, 20, 78, 246, 562, + /* 600 */ 558, 76, 556, 555, 554, 551, 525, 257, 81, 85, + /* 610 */ 29, 596, 55, 595, 592, 89, 91, 546, 544, 536, + /* 620 */ 542, 538, 540, 534, 532, 564, 563, 561, 560, 559, + /* 630 */ 557, 553, 552, 56, 523, 493, 491, 734, 733, 733, + /* 640 */ 733, 733, 733, 733, 733, 733, 733, 733, 733, 116, + /* 650 */ 117, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 188, 1, 244, 185, 186, 244, 244, 5, 205, 9, - /* 10 */ 244, 253, 254, 13, 14, 254, 16, 17, 188, 244, - /* 20 */ 20, 21, 1, 1, 24, 25, 26, 27, 28, 244, - /* 30 */ 9, 9, 229, 33, 34, 33, 34, 37, 38, 39, - /* 40 */ 13, 14, 188, 16, 17, 227, 244, 20, 21, 187, - /* 50 */ 188, 24, 25, 26, 27, 28, 226, 245, 228, 241, - /* 60 */ 33, 34, 229, 188, 37, 38, 39, 45, 46, 47, - /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 80 */ 74, 205, 60, 13, 14, 79, 16, 17, 0, 77, - /* 90 */ 20, 21, 244, 244, 24, 25, 26, 27, 28, 188, - /* 100 */ 246, 253, 254, 33, 34, 229, 106, 37, 38, 39, - /* 110 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 120 */ 96, 97, 98, 99, 100, 250, 15, 252, 204, 102, - /* 130 */ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - /* 140 */ 216, 217, 218, 219, 14, 131, 16, 17, 244, 205, - /* 150 */ 20, 21, 138, 139, 24, 25, 26, 27, 28, 248, - /* 160 */ 227, 250, 101, 33, 34, 77, 193, 37, 38, 39, - /* 170 */ 109, 16, 17, 229, 241, 20, 21, 188, 193, 24, - /* 180 */ 25, 26, 27, 28, 44, 37, 38, 39, 33, 34, - /* 190 */ 230, 188, 37, 38, 39, 1, 2, 188, 225, 5, - /* 200 */ 60, 7, 242, 9, 188, 188, 66, 222, 223, 224, - /* 210 */ 225, 71, 72, 73, 1, 2, 244, 188, 5, 108, - /* 220 */ 7, 37, 9, 86, 193, 88, 89, 33, 34, 220, - /* 230 */ 93, 37, 95, 96, 97, 2, 99, 100, 5, 250, - /* 240 */ 7, 101, 9, 226, 228, 228, 33, 34, 204, 109, - /* 250 */ 79, 207, 208, 250, 223, 226, 212, 228, 214, 215, - /* 260 */ 216, 192, 218, 219, 195, 107, 33, 34, 66, 107, - /* 270 */ 130, 113, 25, 26, 27, 28, 192, 137, 244, 195, - /* 280 */ 33, 34, 61, 62, 37, 38, 39, 125, 67, 68, - /* 290 */ 69, 70, 61, 62, 66, 101, 112, 76, 67, 68, - /* 300 */ 69, 70, 102, 61, 62, 188, 188, 107, 114, 67, - /* 310 */ 68, 69, 70, 188, 101, 33, 34, 188, 244, 37, - /* 320 */ 38, 39, 1, 129, 101, 120, 121, 114, 105, 106, - /* 330 */ 63, 64, 65, 192, 132, 59, 195, 135, 136, 101, - /* 340 */ 244, 108, 129, 226, 226, 228, 228, 109, 102, 244, - /* 350 */ 5, 226, 7, 228, 102, 226, 110, 228, 37, 107, - /* 360 */ 132, 102, 102, 135, 102, 102, 107, 107, 107, 107, - /* 370 */ 107, 5, 244, 7, 102, 102, 102, 101, 244, 107, - /* 380 */ 107, 107, 133, 134, 74, 75, 133, 134, 127, 133, - /* 390 */ 134, 101, 5, 103, 7, 61, 62, 133, 134, 5, - /* 400 */ 244, 7, 229, 244, 244, 244, 221, 221, 221, 221, - /* 410 */ 221, 221, 221, 188, 188, 243, 251, 188, 188, 227, - /* 420 */ 188, 251, 188, 188, 231, 188, 109, 188, 188, 188, - /* 430 */ 227, 188, 188, 188, 59, 124, 188, 114, 188, 188, - /* 440 */ 126, 123, 188, 188, 247, 188, 188, 240, 247, 239, - /* 450 */ 238, 247, 237, 188, 188, 118, 188, 188, 188, 188, - /* 460 */ 122, 117, 235, 188, 116, 188, 188, 188, 188, 188, - /* 470 */ 234, 188, 188, 188, 188, 188, 188, 188, 115, 188, - /* 480 */ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, - /* 490 */ 128, 189, 189, 189, 189, 85, 84, 49, 81, 83, - /* 500 */ 53, 189, 189, 82, 189, 80, 77, 5, 189, 193, - /* 510 */ 140, 189, 5, 5, 140, 5, 5, 87, 189, 131, - /* 520 */ 102, 190, 197, 203, 191, 198, 202, 201, 200, 199, - /* 530 */ 189, 196, 190, 190, 194, 227, 189, 110, 189, 111, - /* 540 */ 233, 232, 236, 104, 101, 107, 102, 101, 1, 107, - /* 550 */ 101, 119, 102, 101, 119, 102, 101, 101, 107, 101, - /* 560 */ 74, 104, 108, 9, 105, 5, 5, 5, 5, 5, - /* 570 */ 78, 15, 74, 107, 134, 16, 5, 5, 102, 134, - /* 580 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 590 */ 5, 5, 5, 5, 134, 5, 5, 107, 78, 59, - /* 600 */ 58, 0, 255, 255, 255, 255, 255, 255, 255, 255, - /* 610 */ 21, 255, 255, 21, 255, 255, 255, 255, 255, 255, - /* 620 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 630 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 640 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 650 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 660 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 670 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 680 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 690 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 700 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 710 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 720 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 730 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 740 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 750 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 760 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 770 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 780 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - /* 790 */ 255, 255, 255, 255, 255, 255, 255, 255, + /* 0 */ 193, 1, 185, 186, 1, 245, 187, 188, 188, 9, + /* 10 */ 205, 188, 9, 13, 14, 255, 16, 17, 245, 192, + /* 20 */ 20, 21, 195, 23, 24, 25, 26, 27, 28, 222, + /* 30 */ 223, 224, 225, 33, 34, 230, 188, 37, 38, 39, + /* 40 */ 13, 14, 228, 16, 17, 228, 226, 20, 21, 229, + /* 50 */ 23, 24, 25, 26, 27, 28, 242, 188, 205, 242, + /* 60 */ 33, 34, 230, 245, 37, 38, 39, 13, 14, 246, + /* 70 */ 16, 17, 254, 255, 20, 21, 245, 23, 24, 25, + /* 80 */ 26, 27, 28, 230, 188, 254, 255, 33, 34, 77, + /* 90 */ 1, 37, 38, 39, 63, 64, 65, 249, 9, 251, + /* 100 */ 193, 13, 14, 188, 16, 17, 205, 107, 20, 21, + /* 110 */ 188, 23, 24, 25, 26, 27, 28, 37, 38, 39, + /* 120 */ 251, 33, 34, 79, 193, 37, 38, 39, 188, 102, + /* 130 */ 205, 230, 225, 188, 45, 46, 47, 48, 49, 50, + /* 140 */ 51, 52, 53, 54, 55, 56, 57, 251, 226, 60, + /* 150 */ 14, 229, 16, 17, 223, 230, 20, 21, 66, 23, + /* 160 */ 24, 25, 26, 27, 28, 220, 251, 227, 253, 33, + /* 170 */ 34, 74, 245, 37, 38, 39, 79, 86, 87, 88, + /* 180 */ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + /* 190 */ 99, 100, 204, 5, 206, 207, 208, 209, 210, 211, + /* 200 */ 212, 213, 214, 215, 216, 217, 218, 219, 16, 17, + /* 210 */ 133, 134, 20, 21, 188, 23, 24, 25, 26, 27, + /* 220 */ 28, 33, 34, 44, 132, 33, 34, 135, 136, 37, + /* 230 */ 38, 39, 1, 2, 188, 37, 5, 188, 7, 60, + /* 240 */ 9, 101, 33, 34, 231, 66, 37, 38, 39, 109, + /* 250 */ 71, 72, 73, 1, 2, 229, 243, 5, 101, 7, + /* 260 */ 86, 9, 88, 89, 33, 34, 109, 93, 37, 95, + /* 270 */ 96, 97, 226, 99, 100, 229, 120, 121, 1, 204, + /* 280 */ 101, 0, 207, 208, 245, 33, 34, 212, 109, 214, + /* 290 */ 215, 216, 192, 218, 219, 195, 247, 106, 131, 25, + /* 300 */ 26, 27, 28, 188, 113, 138, 139, 33, 34, 130, + /* 310 */ 112, 37, 38, 39, 37, 2, 137, 101, 5, 66, + /* 320 */ 7, 105, 9, 107, 15, 221, 192, 61, 62, 195, + /* 330 */ 245, 59, 101, 67, 68, 69, 70, 188, 188, 188, + /* 340 */ 245, 226, 76, 102, 229, 114, 33, 34, 61, 62, + /* 350 */ 5, 110, 7, 101, 67, 68, 69, 70, 77, 102, + /* 360 */ 129, 61, 62, 106, 106, 245, 114, 67, 68, 69, + /* 370 */ 70, 245, 5, 101, 7, 226, 226, 226, 229, 229, + /* 380 */ 229, 129, 102, 125, 106, 132, 106, 5, 135, 7, + /* 390 */ 102, 102, 102, 102, 106, 106, 106, 106, 102, 102, + /* 400 */ 102, 230, 106, 106, 106, 127, 102, 102, 245, 101, + /* 410 */ 106, 106, 104, 74, 75, 133, 134, 108, 245, 133, + /* 420 */ 134, 108, 101, 245, 103, 133, 134, 5, 245, 7, + /* 430 */ 61, 62, 245, 221, 245, 245, 221, 188, 245, 245, + /* 440 */ 245, 245, 245, 221, 245, 221, 221, 221, 228, 188, + /* 450 */ 244, 188, 188, 188, 252, 252, 188, 188, 188, 232, + /* 460 */ 109, 188, 228, 188, 188, 59, 114, 188, 118, 248, + /* 470 */ 124, 188, 188, 188, 241, 126, 248, 248, 240, 123, + /* 480 */ 238, 188, 188, 188, 239, 122, 236, 188, 237, 188, + /* 490 */ 188, 188, 117, 116, 188, 188, 115, 235, 188, 188, + /* 500 */ 188, 234, 188, 188, 188, 188, 188, 188, 188, 85, + /* 510 */ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, + /* 520 */ 188, 188, 188, 188, 188, 188, 188, 128, 188, 84, + /* 530 */ 189, 189, 189, 189, 49, 81, 83, 53, 82, 80, + /* 540 */ 77, 5, 189, 189, 189, 140, 5, 189, 189, 5, + /* 550 */ 193, 140, 5, 5, 87, 189, 131, 110, 106, 102, + /* 560 */ 111, 197, 201, 203, 202, 200, 198, 196, 190, 199, + /* 570 */ 190, 102, 189, 189, 104, 190, 228, 189, 101, 101, + /* 580 */ 233, 194, 191, 1, 101, 106, 106, 119, 102, 101, + /* 590 */ 119, 102, 101, 101, 101, 108, 101, 74, 104, 9, + /* 600 */ 5, 105, 5, 5, 5, 5, 78, 15, 74, 134, + /* 610 */ 106, 5, 16, 5, 102, 134, 134, 5, 5, 5, + /* 620 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 630 */ 5, 5, 5, 106, 78, 59, 58, 0, 256, 256, + /* 640 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 21, + /* 650 */ 21, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 660 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 670 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 680 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 690 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 700 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 710 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 720 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 730 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 740 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 750 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 760 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 770 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 780 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 790 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 800 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 810 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 820 */ 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + /* 830 */ 256, 256, 256, 256, 256, }; -#define YY_SHIFT_COUNT (282) +#define YY_SHIFT_COUNT (293) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (601) +#define YY_SHIFT_MAX (637) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 140, 24, 137, 12, 194, 213, 21, 21, 21, 21, - /* 10 */ 21, 21, 21, 21, 21, 0, 22, 213, 233, 233, - /* 20 */ 233, 61, 21, 21, 21, 88, 21, 21, 6, 12, - /* 30 */ 171, 171, 614, 213, 213, 213, 213, 213, 213, 213, - /* 40 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, - /* 50 */ 213, 233, 233, 2, 2, 2, 2, 2, 2, 2, - /* 60 */ 238, 21, 21, 184, 21, 21, 21, 205, 205, 158, - /* 70 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 80 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 90 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 100 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 110 */ 21, 21, 21, 21, 21, 21, 317, 375, 375, 375, - /* 120 */ 323, 323, 323, 375, 311, 314, 318, 337, 338, 344, - /* 130 */ 348, 363, 362, 317, 375, 375, 375, 12, 375, 375, - /* 140 */ 410, 412, 448, 417, 416, 447, 421, 425, 375, 429, - /* 150 */ 375, 429, 375, 429, 375, 614, 614, 27, 70, 70, - /* 160 */ 70, 130, 155, 247, 247, 247, 221, 231, 242, 282, - /* 170 */ 282, 282, 282, 202, 14, 148, 148, 223, 228, 267, - /* 180 */ 246, 200, 252, 259, 260, 262, 263, 345, 366, 321, - /* 190 */ 276, 111, 261, 162, 272, 273, 274, 249, 253, 256, - /* 200 */ 290, 264, 387, 394, 310, 334, 502, 370, 507, 508, - /* 210 */ 374, 510, 511, 430, 388, 427, 418, 428, 439, 443, - /* 220 */ 438, 444, 446, 547, 449, 450, 452, 442, 432, 451, - /* 230 */ 435, 453, 455, 454, 456, 439, 458, 457, 459, 486, - /* 240 */ 554, 560, 561, 562, 563, 564, 492, 556, 498, 440, - /* 250 */ 466, 466, 559, 445, 460, 466, 571, 572, 476, 466, - /* 260 */ 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - /* 270 */ 585, 586, 587, 588, 590, 591, 490, 520, 589, 592, - /* 280 */ 540, 542, 601, + /* 0 */ 179, 91, 174, 12, 231, 252, 3, 3, 3, 3, + /* 10 */ 3, 3, 3, 3, 3, 0, 89, 252, 313, 313, + /* 20 */ 313, 313, 140, 3, 3, 3, 3, 281, 3, 3, + /* 30 */ 97, 12, 44, 44, 651, 252, 252, 252, 252, 252, + /* 40 */ 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, + /* 50 */ 252, 252, 252, 252, 252, 313, 313, 188, 188, 188, + /* 60 */ 188, 188, 188, 188, 157, 3, 3, 198, 3, 3, + /* 70 */ 3, 156, 156, 191, 3, 3, 3, 3, 3, 3, + /* 80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 120 */ 3, 351, 406, 406, 406, 352, 352, 352, 406, 346, + /* 130 */ 349, 356, 350, 363, 375, 377, 381, 399, 351, 406, + /* 140 */ 406, 406, 12, 406, 406, 424, 445, 485, 454, 453, + /* 150 */ 484, 456, 459, 406, 463, 406, 463, 406, 463, 406, + /* 160 */ 651, 651, 27, 54, 88, 54, 54, 136, 192, 274, + /* 170 */ 274, 274, 274, 266, 287, 300, 209, 209, 209, 209, + /* 180 */ 92, 167, 80, 80, 216, 253, 31, 241, 257, 280, + /* 190 */ 288, 289, 290, 291, 345, 367, 277, 272, 309, 278, + /* 200 */ 258, 296, 297, 298, 304, 305, 308, 77, 282, 286, + /* 210 */ 321, 292, 382, 422, 339, 369, 536, 405, 541, 544, + /* 220 */ 411, 547, 548, 467, 425, 447, 457, 449, 470, 477, + /* 230 */ 452, 469, 478, 582, 483, 486, 488, 479, 468, 480, + /* 240 */ 471, 489, 491, 487, 492, 470, 493, 494, 495, 496, + /* 250 */ 523, 590, 595, 597, 598, 599, 600, 528, 592, 534, + /* 260 */ 475, 504, 504, 596, 481, 482, 504, 606, 608, 512, + /* 270 */ 504, 612, 613, 614, 615, 616, 617, 618, 619, 620, + /* 280 */ 621, 622, 623, 624, 625, 626, 627, 527, 556, 628, + /* 290 */ 629, 576, 578, 637, }; -#define YY_REDUCE_COUNT (156) -#define YY_REDUCE_MIN (-242) -#define YY_REDUCE_MAX (349) +#define YY_REDUCE_COUNT (161) +#define YY_REDUCE_MIN (-240) +#define YY_REDUCE_MAX (391) static const short yy_reduce_ofst[] = { - /* 0 */ -182, -76, 44, -15, -242, -152, -170, -125, -89, 17, - /* 10 */ 29, 117, 118, 125, 129, -188, -138, -239, -197, -124, - /* 20 */ -56, -67, -146, -11, 3, -27, 9, 16, 69, 31, - /* 30 */ 84, 141, -40, -238, -234, -225, -215, -198, -151, -96, - /* 40 */ -28, 34, 74, 96, 105, 128, 134, 156, 159, 160, - /* 50 */ 161, -167, 173, 185, 186, 187, 188, 189, 190, 191, - /* 60 */ 192, 225, 226, 172, 229, 230, 232, 165, 170, 193, - /* 70 */ 234, 235, 237, 239, 240, 241, 243, 244, 245, 248, - /* 80 */ 250, 251, 254, 255, 257, 258, 265, 266, 268, 269, - /* 90 */ 270, 271, 275, 277, 278, 279, 280, 281, 283, 284, - /* 100 */ 285, 286, 287, 288, 289, 291, 292, 293, 294, 295, - /* 110 */ 296, 297, 298, 299, 300, 301, 203, 302, 303, 304, - /* 120 */ 197, 201, 204, 305, 207, 210, 212, 215, 306, 227, - /* 130 */ 236, 307, 309, 308, 312, 313, 315, 316, 319, 322, - /* 140 */ 320, 324, 326, 325, 328, 327, 330, 335, 329, 331, - /* 150 */ 341, 342, 347, 343, 349, 340, 333, + /* 0 */ -183, -12, 75, -193, -182, -169, -180, -85, -152, -78, + /* 10 */ 46, 115, 149, 150, 151, -177, -181, -240, -195, -147, + /* 20 */ -99, -75, -186, 49, -131, -104, -60, -93, -55, 26, + /* 30 */ -173, -69, 100, 134, 13, -227, -73, 39, 85, 95, + /* 40 */ 120, 126, 163, 173, 178, 183, 187, 189, 190, 193, + /* 50 */ 194, 195, 196, 197, 199, -168, 171, 104, 212, 215, + /* 60 */ 222, 224, 225, 226, 220, 249, 261, 206, 263, 264, + /* 70 */ 265, 202, 203, 227, 268, 269, 270, 273, 275, 276, + /* 80 */ 279, 283, 284, 285, 293, 294, 295, 299, 301, 302, + /* 90 */ 303, 306, 307, 310, 311, 312, 314, 315, 316, 317, + /* 100 */ 318, 319, 320, 322, 323, 324, 325, 326, 327, 328, + /* 110 */ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, + /* 120 */ 340, 234, 341, 342, 343, 221, 228, 229, 344, 233, + /* 130 */ 238, 245, 242, 251, 250, 262, 267, 347, 348, 353, + /* 140 */ 354, 355, 357, 358, 359, 360, 362, 361, 364, 365, + /* 150 */ 368, 370, 371, 366, 378, 383, 380, 384, 385, 388, + /* 160 */ 387, 391, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 713, 768, 757, 765, 950, 950, 713, 713, 713, 713, - /* 10 */ 713, 713, 713, 713, 713, 873, 731, 950, 713, 713, - /* 20 */ 713, 713, 713, 713, 713, 765, 713, 713, 770, 765, - /* 30 */ 770, 770, 868, 713, 713, 713, 713, 713, 713, 713, - /* 40 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 50 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 60 */ 713, 713, 713, 875, 877, 879, 713, 897, 897, 866, - /* 70 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 80 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 90 */ 713, 713, 713, 713, 755, 713, 753, 713, 713, 713, - /* 100 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 110 */ 741, 713, 713, 713, 713, 713, 713, 733, 733, 733, - /* 120 */ 713, 713, 713, 733, 904, 908, 902, 890, 898, 889, - /* 130 */ 885, 884, 912, 713, 733, 733, 733, 765, 733, 733, - /* 140 */ 786, 784, 782, 774, 780, 776, 778, 772, 733, 763, - /* 150 */ 733, 763, 733, 763, 733, 804, 820, 713, 913, 949, - /* 160 */ 903, 939, 938, 945, 937, 936, 713, 713, 713, 932, - /* 170 */ 933, 935, 934, 713, 713, 941, 940, 713, 713, 713, - /* 180 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 190 */ 915, 713, 909, 905, 713, 713, 713, 713, 713, 713, - /* 200 */ 830, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 210 */ 713, 713, 713, 713, 713, 865, 713, 713, 713, 713, - /* 220 */ 876, 713, 713, 713, 713, 713, 713, 899, 713, 891, - /* 230 */ 713, 713, 713, 713, 713, 842, 713, 713, 713, 713, - /* 240 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 250 */ 960, 958, 713, 713, 713, 954, 713, 713, 713, 952, - /* 260 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 270 */ 713, 713, 713, 713, 713, 713, 789, 713, 739, 737, - /* 280 */ 713, 729, 713, + /* 0 */ 731, 786, 775, 783, 972, 972, 731, 731, 731, 731, + /* 10 */ 731, 731, 731, 731, 731, 894, 749, 972, 731, 731, + /* 20 */ 731, 731, 731, 731, 731, 731, 731, 783, 731, 731, + /* 30 */ 788, 783, 788, 788, 889, 731, 731, 731, 731, 731, + /* 40 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 50 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 60 */ 731, 731, 731, 731, 731, 731, 731, 896, 898, 900, + /* 70 */ 731, 918, 918, 887, 731, 731, 731, 731, 731, 731, + /* 80 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 90 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 773, + /* 100 */ 731, 771, 731, 731, 731, 731, 731, 731, 731, 731, + /* 110 */ 731, 731, 731, 731, 731, 759, 731, 731, 731, 731, + /* 120 */ 731, 731, 751, 751, 751, 731, 731, 731, 751, 925, + /* 130 */ 929, 923, 911, 919, 910, 906, 905, 933, 731, 751, + /* 140 */ 751, 751, 783, 751, 751, 804, 802, 800, 792, 798, + /* 150 */ 794, 796, 790, 751, 781, 751, 781, 751, 781, 751, + /* 160 */ 822, 838, 731, 934, 731, 971, 924, 961, 960, 967, + /* 170 */ 959, 958, 957, 731, 731, 731, 953, 954, 956, 955, + /* 180 */ 731, 731, 963, 962, 731, 731, 731, 731, 731, 731, + /* 190 */ 731, 731, 731, 731, 731, 731, 731, 936, 731, 930, + /* 200 */ 926, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 210 */ 848, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 220 */ 731, 731, 731, 731, 731, 886, 731, 731, 731, 731, + /* 230 */ 897, 731, 731, 731, 731, 731, 731, 920, 731, 912, + /* 240 */ 731, 731, 731, 731, 731, 860, 731, 731, 731, 731, + /* 250 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 260 */ 731, 982, 980, 731, 731, 731, 976, 731, 731, 731, + /* 270 */ 974, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 280 */ 731, 731, 731, 731, 731, 731, 731, 807, 731, 757, + /* 290 */ 755, 731, 747, 731, }; /********** End of lemon-generated parsing tables *****************************/ @@ -560,8 +571,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* UNSIGNED => nothing */ 0, /* TAGS => nothing */ 0, /* USING => nothing */ - 0, /* AS => nothing */ 0, /* COMMA => nothing */ + 0, /* AS => nothing */ 1, /* NULL => ID */ 0, /* SELECT => nothing */ 0, /* UNION => nothing */ @@ -830,8 +841,8 @@ static const char *const yyTokenName[] = { /* 103 */ "UNSIGNED", /* 104 */ "TAGS", /* 105 */ "USING", - /* 106 */ "AS", - /* 107 */ "COMMA", + /* 106 */ "COMMA", + /* 107 */ "AS", /* 108 */ "NULL", /* 109 */ "SELECT", /* 110 */ "UNION", @@ -951,34 +962,35 @@ static const char *const yyTokenName[] = { /* 224 */ "create_table_list", /* 225 */ "create_from_stable", /* 226 */ "columnlist", - /* 227 */ "select", - /* 228 */ "column", - /* 229 */ "tagitem", - /* 230 */ "selcollist", - /* 231 */ "from", - /* 232 */ "where_opt", - /* 233 */ "interval_opt", - /* 234 */ "fill_opt", - /* 235 */ "sliding_opt", - /* 236 */ "groupby_opt", - /* 237 */ "orderby_opt", - /* 238 */ "having_opt", - /* 239 */ "slimit_opt", - /* 240 */ "limit_opt", - /* 241 */ "union", - /* 242 */ "sclp", - /* 243 */ "distinct", - /* 244 */ "expr", - /* 245 */ "as", - /* 246 */ "tablelist", - /* 247 */ "tmvar", - /* 248 */ "sortlist", - /* 249 */ "sortitem", - /* 250 */ "item", - /* 251 */ "sortorder", - /* 252 */ "grouplist", - /* 253 */ "exprlist", - /* 254 */ "expritem", + /* 227 */ "tagNamelist", + /* 228 */ "select", + /* 229 */ "column", + /* 230 */ "tagitem", + /* 231 */ "selcollist", + /* 232 */ "from", + /* 233 */ "where_opt", + /* 234 */ "interval_opt", + /* 235 */ "fill_opt", + /* 236 */ "sliding_opt", + /* 237 */ "groupby_opt", + /* 238 */ "orderby_opt", + /* 239 */ "having_opt", + /* 240 */ "slimit_opt", + /* 241 */ "limit_opt", + /* 242 */ "union", + /* 243 */ "sclp", + /* 244 */ "distinct", + /* 245 */ "expr", + /* 246 */ "as", + /* 247 */ "tablelist", + /* 248 */ "tmvar", + /* 249 */ "sortlist", + /* 250 */ "sortitem", + /* 251 */ "item", + /* 252 */ "sortorder", + /* 253 */ "grouplist", + /* 254 */ "exprlist", + /* 255 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1115,127 +1127,131 @@ static const char *const yyRuleName[] = { /* 126 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", /* 127 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", /* 128 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", - /* 129 */ "create_table_args ::= ifnotexists ids cpxName AS select", - /* 130 */ "columnlist ::= columnlist COMMA column", - /* 131 */ "columnlist ::= column", - /* 132 */ "column ::= ids typename", - /* 133 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 134 */ "tagitemlist ::= tagitem", - /* 135 */ "tagitem ::= INTEGER", - /* 136 */ "tagitem ::= FLOAT", - /* 137 */ "tagitem ::= STRING", - /* 138 */ "tagitem ::= BOOL", - /* 139 */ "tagitem ::= NULL", - /* 140 */ "tagitem ::= MINUS INTEGER", - /* 141 */ "tagitem ::= MINUS FLOAT", - /* 142 */ "tagitem ::= PLUS INTEGER", - /* 143 */ "tagitem ::= PLUS FLOAT", - /* 144 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 145 */ "union ::= select", - /* 146 */ "union ::= LP union RP", - /* 147 */ "union ::= union UNION ALL select", - /* 148 */ "union ::= union UNION ALL LP select RP", - /* 149 */ "cmd ::= union", - /* 150 */ "select ::= SELECT selcollist", - /* 151 */ "sclp ::= selcollist COMMA", - /* 152 */ "sclp ::=", - /* 153 */ "selcollist ::= sclp distinct expr as", - /* 154 */ "selcollist ::= sclp STAR", - /* 155 */ "as ::= AS ids", - /* 156 */ "as ::= ids", - /* 157 */ "as ::=", - /* 158 */ "distinct ::= DISTINCT", - /* 159 */ "distinct ::=", - /* 160 */ "from ::= FROM tablelist", - /* 161 */ "tablelist ::= ids cpxName", - /* 162 */ "tablelist ::= ids cpxName ids", - /* 163 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 164 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 165 */ "tmvar ::= VARIABLE", - /* 166 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 167 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 168 */ "interval_opt ::=", - /* 169 */ "fill_opt ::=", - /* 170 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 171 */ "fill_opt ::= FILL LP ID RP", - /* 172 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 173 */ "sliding_opt ::=", - /* 174 */ "orderby_opt ::=", - /* 175 */ "orderby_opt ::= ORDER BY sortlist", - /* 176 */ "sortlist ::= sortlist COMMA item sortorder", - /* 177 */ "sortlist ::= item sortorder", - /* 178 */ "item ::= ids cpxName", - /* 179 */ "sortorder ::= ASC", - /* 180 */ "sortorder ::= DESC", - /* 181 */ "sortorder ::=", - /* 182 */ "groupby_opt ::=", - /* 183 */ "groupby_opt ::= GROUP BY grouplist", - /* 184 */ "grouplist ::= grouplist COMMA item", - /* 185 */ "grouplist ::= item", - /* 186 */ "having_opt ::=", - /* 187 */ "having_opt ::= HAVING expr", - /* 188 */ "limit_opt ::=", - /* 189 */ "limit_opt ::= LIMIT signed", - /* 190 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 191 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 192 */ "slimit_opt ::=", - /* 193 */ "slimit_opt ::= SLIMIT signed", - /* 194 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 195 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 196 */ "where_opt ::=", - /* 197 */ "where_opt ::= WHERE expr", - /* 198 */ "expr ::= LP expr RP", - /* 199 */ "expr ::= ID", - /* 200 */ "expr ::= ID DOT ID", - /* 201 */ "expr ::= ID DOT STAR", - /* 202 */ "expr ::= INTEGER", - /* 203 */ "expr ::= MINUS INTEGER", - /* 204 */ "expr ::= PLUS INTEGER", - /* 205 */ "expr ::= FLOAT", - /* 206 */ "expr ::= MINUS FLOAT", - /* 207 */ "expr ::= PLUS FLOAT", - /* 208 */ "expr ::= STRING", - /* 209 */ "expr ::= NOW", - /* 210 */ "expr ::= VARIABLE", - /* 211 */ "expr ::= BOOL", - /* 212 */ "expr ::= ID LP exprlist RP", - /* 213 */ "expr ::= ID LP STAR RP", - /* 214 */ "expr ::= expr IS NULL", - /* 215 */ "expr ::= expr IS NOT NULL", - /* 216 */ "expr ::= expr LT expr", - /* 217 */ "expr ::= expr GT expr", - /* 218 */ "expr ::= expr LE expr", - /* 219 */ "expr ::= expr GE expr", - /* 220 */ "expr ::= expr NE expr", - /* 221 */ "expr ::= expr EQ expr", - /* 222 */ "expr ::= expr AND expr", - /* 223 */ "expr ::= expr OR expr", - /* 224 */ "expr ::= expr PLUS expr", - /* 225 */ "expr ::= expr MINUS expr", - /* 226 */ "expr ::= expr STAR expr", - /* 227 */ "expr ::= expr SLASH expr", - /* 228 */ "expr ::= expr REM expr", - /* 229 */ "expr ::= expr LIKE expr", - /* 230 */ "expr ::= expr IN LP exprlist RP", - /* 231 */ "exprlist ::= exprlist COMMA expritem", - /* 232 */ "exprlist ::= expritem", - /* 233 */ "expritem ::= expr", - /* 234 */ "expritem ::=", - /* 235 */ "cmd ::= RESET QUERY CACHE", - /* 236 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 237 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 238 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 239 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 240 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 241 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 242 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 243 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 244 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 245 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 246 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 247 */ "cmd ::= KILL CONNECTION INTEGER", - /* 248 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 249 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 129 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", + /* 130 */ "tagNamelist ::= tagNamelist COMMA ids", + /* 131 */ "tagNamelist ::= ids", + /* 132 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 133 */ "columnlist ::= columnlist COMMA column", + /* 134 */ "columnlist ::= column", + /* 135 */ "column ::= ids typename", + /* 136 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 137 */ "tagitemlist ::= tagitem", + /* 138 */ "tagitem ::= INTEGER", + /* 139 */ "tagitem ::= FLOAT", + /* 140 */ "tagitem ::= STRING", + /* 141 */ "tagitem ::= BOOL", + /* 142 */ "tagitem ::= NULL", + /* 143 */ "tagitem ::= MINUS INTEGER", + /* 144 */ "tagitem ::= MINUS FLOAT", + /* 145 */ "tagitem ::= PLUS INTEGER", + /* 146 */ "tagitem ::= PLUS FLOAT", + /* 147 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 148 */ "union ::= select", + /* 149 */ "union ::= LP union RP", + /* 150 */ "union ::= union UNION ALL select", + /* 151 */ "union ::= union UNION ALL LP select RP", + /* 152 */ "cmd ::= union", + /* 153 */ "select ::= SELECT selcollist", + /* 154 */ "sclp ::= selcollist COMMA", + /* 155 */ "sclp ::=", + /* 156 */ "selcollist ::= sclp distinct expr as", + /* 157 */ "selcollist ::= sclp STAR", + /* 158 */ "as ::= AS ids", + /* 159 */ "as ::= ids", + /* 160 */ "as ::=", + /* 161 */ "distinct ::= DISTINCT", + /* 162 */ "distinct ::=", + /* 163 */ "from ::= FROM tablelist", + /* 164 */ "tablelist ::= ids cpxName", + /* 165 */ "tablelist ::= ids cpxName ids", + /* 166 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 167 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 168 */ "tmvar ::= VARIABLE", + /* 169 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 170 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 171 */ "interval_opt ::=", + /* 172 */ "fill_opt ::=", + /* 173 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 174 */ "fill_opt ::= FILL LP ID RP", + /* 175 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 176 */ "sliding_opt ::=", + /* 177 */ "orderby_opt ::=", + /* 178 */ "orderby_opt ::= ORDER BY sortlist", + /* 179 */ "sortlist ::= sortlist COMMA item sortorder", + /* 180 */ "sortlist ::= item sortorder", + /* 181 */ "item ::= ids cpxName", + /* 182 */ "sortorder ::= ASC", + /* 183 */ "sortorder ::= DESC", + /* 184 */ "sortorder ::=", + /* 185 */ "groupby_opt ::=", + /* 186 */ "groupby_opt ::= GROUP BY grouplist", + /* 187 */ "grouplist ::= grouplist COMMA item", + /* 188 */ "grouplist ::= item", + /* 189 */ "having_opt ::=", + /* 190 */ "having_opt ::= HAVING expr", + /* 191 */ "limit_opt ::=", + /* 192 */ "limit_opt ::= LIMIT signed", + /* 193 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 194 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 195 */ "slimit_opt ::=", + /* 196 */ "slimit_opt ::= SLIMIT signed", + /* 197 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 198 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 199 */ "where_opt ::=", + /* 200 */ "where_opt ::= WHERE expr", + /* 201 */ "expr ::= LP expr RP", + /* 202 */ "expr ::= ID", + /* 203 */ "expr ::= ID DOT ID", + /* 204 */ "expr ::= ID DOT STAR", + /* 205 */ "expr ::= INTEGER", + /* 206 */ "expr ::= MINUS INTEGER", + /* 207 */ "expr ::= PLUS INTEGER", + /* 208 */ "expr ::= FLOAT", + /* 209 */ "expr ::= MINUS FLOAT", + /* 210 */ "expr ::= PLUS FLOAT", + /* 211 */ "expr ::= STRING", + /* 212 */ "expr ::= NOW", + /* 213 */ "expr ::= VARIABLE", + /* 214 */ "expr ::= BOOL", + /* 215 */ "expr ::= ID LP exprlist RP", + /* 216 */ "expr ::= ID LP STAR RP", + /* 217 */ "expr ::= expr IS NULL", + /* 218 */ "expr ::= expr IS NOT NULL", + /* 219 */ "expr ::= expr LT expr", + /* 220 */ "expr ::= expr GT expr", + /* 221 */ "expr ::= expr LE expr", + /* 222 */ "expr ::= expr GE expr", + /* 223 */ "expr ::= expr NE expr", + /* 224 */ "expr ::= expr EQ expr", + /* 225 */ "expr ::= expr BETWEEN expr AND expr", + /* 226 */ "expr ::= expr AND expr", + /* 227 */ "expr ::= expr OR expr", + /* 228 */ "expr ::= expr PLUS expr", + /* 229 */ "expr ::= expr MINUS expr", + /* 230 */ "expr ::= expr STAR expr", + /* 231 */ "expr ::= expr SLASH expr", + /* 232 */ "expr ::= expr REM expr", + /* 233 */ "expr ::= expr LIKE expr", + /* 234 */ "expr ::= expr IN LP exprlist RP", + /* 235 */ "exprlist ::= exprlist COMMA expritem", + /* 236 */ "exprlist ::= expritem", + /* 237 */ "expritem ::= expr", + /* 238 */ "expritem ::=", + /* 239 */ "cmd ::= RESET QUERY CACHE", + /* 240 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 241 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 242 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 243 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 244 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 245 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 246 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 247 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 248 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 249 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 250 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 251 */ "cmd ::= KILL CONNECTION INTEGER", + /* 252 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 253 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1359,48 +1375,49 @@ static void yy_destructor( case 204: /* keep */ case 205: /* tagitemlist */ case 226: /* columnlist */ - case 234: /* fill_opt */ - case 236: /* groupby_opt */ - case 237: /* orderby_opt */ - case 248: /* sortlist */ - case 252: /* grouplist */ + case 227: /* tagNamelist */ + case 235: /* fill_opt */ + case 237: /* groupby_opt */ + case 238: /* orderby_opt */ + case 249: /* sortlist */ + case 253: /* grouplist */ { -taosArrayDestroy((yypminor->yy141)); +taosArrayDestroy((yypminor->yy349)); } break; case 224: /* create_table_list */ { -destroyCreateTableSql((yypminor->yy8)); +destroyCreateTableSql((yypminor->yy278)); } break; - case 227: /* select */ + case 228: /* select */ { -doDestroyQuerySql((yypminor->yy234)); +doDestroyQuerySql((yypminor->yy216)); } break; - case 230: /* selcollist */ - case 242: /* sclp */ - case 253: /* exprlist */ + case 231: /* selcollist */ + case 243: /* sclp */ + case 254: /* exprlist */ { -tSqlExprListDestroy((yypminor->yy9)); +tSqlExprListDestroy((yypminor->yy258)); } break; - case 232: /* where_opt */ - case 238: /* having_opt */ - case 244: /* expr */ - case 254: /* expritem */ + case 233: /* where_opt */ + case 239: /* having_opt */ + case 245: /* expr */ + case 255: /* expritem */ { -tSqlExprDestroy((yypminor->yy220)); +tSqlExprDestroy((yypminor->yy114)); } break; - case 241: /* union */ + case 242: /* union */ { -destroyAllSelectClause((yypminor->yy63)); +destroyAllSelectClause((yypminor->yy417)); } break; - case 249: /* sortitem */ + case 250: /* sortitem */ { -tVariantDestroy(&(yypminor->yy326)); +tVariantDestroy(&(yypminor->yy426)); } break; /********* End destructor definitions *****************************************/ @@ -1823,127 +1840,131 @@ static const struct { { 222, -6 }, /* (126) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { 223, -10 }, /* (127) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { 225, -10 }, /* (128) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 222, -5 }, /* (129) create_table_args ::= ifnotexists ids cpxName AS select */ - { 226, -3 }, /* (130) columnlist ::= columnlist COMMA column */ - { 226, -1 }, /* (131) columnlist ::= column */ - { 228, -2 }, /* (132) column ::= ids typename */ - { 205, -3 }, /* (133) tagitemlist ::= tagitemlist COMMA tagitem */ - { 205, -1 }, /* (134) tagitemlist ::= tagitem */ - { 229, -1 }, /* (135) tagitem ::= INTEGER */ - { 229, -1 }, /* (136) tagitem ::= FLOAT */ - { 229, -1 }, /* (137) tagitem ::= STRING */ - { 229, -1 }, /* (138) tagitem ::= BOOL */ - { 229, -1 }, /* (139) tagitem ::= NULL */ - { 229, -2 }, /* (140) tagitem ::= MINUS INTEGER */ - { 229, -2 }, /* (141) tagitem ::= MINUS FLOAT */ - { 229, -2 }, /* (142) tagitem ::= PLUS INTEGER */ - { 229, -2 }, /* (143) tagitem ::= PLUS FLOAT */ - { 227, -12 }, /* (144) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 241, -1 }, /* (145) union ::= select */ - { 241, -3 }, /* (146) union ::= LP union RP */ - { 241, -4 }, /* (147) union ::= union UNION ALL select */ - { 241, -6 }, /* (148) union ::= union UNION ALL LP select RP */ - { 186, -1 }, /* (149) cmd ::= union */ - { 227, -2 }, /* (150) select ::= SELECT selcollist */ - { 242, -2 }, /* (151) sclp ::= selcollist COMMA */ - { 242, 0 }, /* (152) sclp ::= */ - { 230, -4 }, /* (153) selcollist ::= sclp distinct expr as */ - { 230, -2 }, /* (154) selcollist ::= sclp STAR */ - { 245, -2 }, /* (155) as ::= AS ids */ - { 245, -1 }, /* (156) as ::= ids */ - { 245, 0 }, /* (157) as ::= */ - { 243, -1 }, /* (158) distinct ::= DISTINCT */ - { 243, 0 }, /* (159) distinct ::= */ - { 231, -2 }, /* (160) from ::= FROM tablelist */ - { 246, -2 }, /* (161) tablelist ::= ids cpxName */ - { 246, -3 }, /* (162) tablelist ::= ids cpxName ids */ - { 246, -4 }, /* (163) tablelist ::= tablelist COMMA ids cpxName */ - { 246, -5 }, /* (164) tablelist ::= tablelist COMMA ids cpxName ids */ - { 247, -1 }, /* (165) tmvar ::= VARIABLE */ - { 233, -4 }, /* (166) interval_opt ::= INTERVAL LP tmvar RP */ - { 233, -6 }, /* (167) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 233, 0 }, /* (168) interval_opt ::= */ - { 234, 0 }, /* (169) fill_opt ::= */ - { 234, -6 }, /* (170) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 234, -4 }, /* (171) fill_opt ::= FILL LP ID RP */ - { 235, -4 }, /* (172) sliding_opt ::= SLIDING LP tmvar RP */ - { 235, 0 }, /* (173) sliding_opt ::= */ - { 237, 0 }, /* (174) orderby_opt ::= */ - { 237, -3 }, /* (175) orderby_opt ::= ORDER BY sortlist */ - { 248, -4 }, /* (176) sortlist ::= sortlist COMMA item sortorder */ - { 248, -2 }, /* (177) sortlist ::= item sortorder */ - { 250, -2 }, /* (178) item ::= ids cpxName */ - { 251, -1 }, /* (179) sortorder ::= ASC */ - { 251, -1 }, /* (180) sortorder ::= DESC */ - { 251, 0 }, /* (181) sortorder ::= */ - { 236, 0 }, /* (182) groupby_opt ::= */ - { 236, -3 }, /* (183) groupby_opt ::= GROUP BY grouplist */ - { 252, -3 }, /* (184) grouplist ::= grouplist COMMA item */ - { 252, -1 }, /* (185) grouplist ::= item */ - { 238, 0 }, /* (186) having_opt ::= */ - { 238, -2 }, /* (187) having_opt ::= HAVING expr */ - { 240, 0 }, /* (188) limit_opt ::= */ - { 240, -2 }, /* (189) limit_opt ::= LIMIT signed */ - { 240, -4 }, /* (190) limit_opt ::= LIMIT signed OFFSET signed */ - { 240, -4 }, /* (191) limit_opt ::= LIMIT signed COMMA signed */ - { 239, 0 }, /* (192) slimit_opt ::= */ - { 239, -2 }, /* (193) slimit_opt ::= SLIMIT signed */ - { 239, -4 }, /* (194) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 239, -4 }, /* (195) slimit_opt ::= SLIMIT signed COMMA signed */ - { 232, 0 }, /* (196) where_opt ::= */ - { 232, -2 }, /* (197) where_opt ::= WHERE expr */ - { 244, -3 }, /* (198) expr ::= LP expr RP */ - { 244, -1 }, /* (199) expr ::= ID */ - { 244, -3 }, /* (200) expr ::= ID DOT ID */ - { 244, -3 }, /* (201) expr ::= ID DOT STAR */ - { 244, -1 }, /* (202) expr ::= INTEGER */ - { 244, -2 }, /* (203) expr ::= MINUS INTEGER */ - { 244, -2 }, /* (204) expr ::= PLUS INTEGER */ - { 244, -1 }, /* (205) expr ::= FLOAT */ - { 244, -2 }, /* (206) expr ::= MINUS FLOAT */ - { 244, -2 }, /* (207) expr ::= PLUS FLOAT */ - { 244, -1 }, /* (208) expr ::= STRING */ - { 244, -1 }, /* (209) expr ::= NOW */ - { 244, -1 }, /* (210) expr ::= VARIABLE */ - { 244, -1 }, /* (211) expr ::= BOOL */ - { 244, -4 }, /* (212) expr ::= ID LP exprlist RP */ - { 244, -4 }, /* (213) expr ::= ID LP STAR RP */ - { 244, -3 }, /* (214) expr ::= expr IS NULL */ - { 244, -4 }, /* (215) expr ::= expr IS NOT NULL */ - { 244, -3 }, /* (216) expr ::= expr LT expr */ - { 244, -3 }, /* (217) expr ::= expr GT expr */ - { 244, -3 }, /* (218) expr ::= expr LE expr */ - { 244, -3 }, /* (219) expr ::= expr GE expr */ - { 244, -3 }, /* (220) expr ::= expr NE expr */ - { 244, -3 }, /* (221) expr ::= expr EQ expr */ - { 244, -3 }, /* (222) expr ::= expr AND expr */ - { 244, -3 }, /* (223) expr ::= expr OR expr */ - { 244, -3 }, /* (224) expr ::= expr PLUS expr */ - { 244, -3 }, /* (225) expr ::= expr MINUS expr */ - { 244, -3 }, /* (226) expr ::= expr STAR expr */ - { 244, -3 }, /* (227) expr ::= expr SLASH expr */ - { 244, -3 }, /* (228) expr ::= expr REM expr */ - { 244, -3 }, /* (229) expr ::= expr LIKE expr */ - { 244, -5 }, /* (230) expr ::= expr IN LP exprlist RP */ - { 253, -3 }, /* (231) exprlist ::= exprlist COMMA expritem */ - { 253, -1 }, /* (232) exprlist ::= expritem */ - { 254, -1 }, /* (233) expritem ::= expr */ - { 254, 0 }, /* (234) expritem ::= */ - { 186, -3 }, /* (235) cmd ::= RESET QUERY CACHE */ - { 186, -7 }, /* (236) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 186, -7 }, /* (237) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 186, -7 }, /* (238) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 186, -7 }, /* (239) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 186, -8 }, /* (240) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 186, -9 }, /* (241) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 186, -7 }, /* (242) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 186, -7 }, /* (243) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 186, -7 }, /* (244) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 186, -7 }, /* (245) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 186, -8 }, /* (246) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 186, -3 }, /* (247) cmd ::= KILL CONNECTION INTEGER */ - { 186, -5 }, /* (248) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 186, -5 }, /* (249) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 225, -13 }, /* (129) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + { 227, -3 }, /* (130) tagNamelist ::= tagNamelist COMMA ids */ + { 227, -1 }, /* (131) tagNamelist ::= ids */ + { 222, -5 }, /* (132) create_table_args ::= ifnotexists ids cpxName AS select */ + { 226, -3 }, /* (133) columnlist ::= columnlist COMMA column */ + { 226, -1 }, /* (134) columnlist ::= column */ + { 229, -2 }, /* (135) column ::= ids typename */ + { 205, -3 }, /* (136) tagitemlist ::= tagitemlist COMMA tagitem */ + { 205, -1 }, /* (137) tagitemlist ::= tagitem */ + { 230, -1 }, /* (138) tagitem ::= INTEGER */ + { 230, -1 }, /* (139) tagitem ::= FLOAT */ + { 230, -1 }, /* (140) tagitem ::= STRING */ + { 230, -1 }, /* (141) tagitem ::= BOOL */ + { 230, -1 }, /* (142) tagitem ::= NULL */ + { 230, -2 }, /* (143) tagitem ::= MINUS INTEGER */ + { 230, -2 }, /* (144) tagitem ::= MINUS FLOAT */ + { 230, -2 }, /* (145) tagitem ::= PLUS INTEGER */ + { 230, -2 }, /* (146) tagitem ::= PLUS FLOAT */ + { 228, -12 }, /* (147) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 242, -1 }, /* (148) union ::= select */ + { 242, -3 }, /* (149) union ::= LP union RP */ + { 242, -4 }, /* (150) union ::= union UNION ALL select */ + { 242, -6 }, /* (151) union ::= union UNION ALL LP select RP */ + { 186, -1 }, /* (152) cmd ::= union */ + { 228, -2 }, /* (153) select ::= SELECT selcollist */ + { 243, -2 }, /* (154) sclp ::= selcollist COMMA */ + { 243, 0 }, /* (155) sclp ::= */ + { 231, -4 }, /* (156) selcollist ::= sclp distinct expr as */ + { 231, -2 }, /* (157) selcollist ::= sclp STAR */ + { 246, -2 }, /* (158) as ::= AS ids */ + { 246, -1 }, /* (159) as ::= ids */ + { 246, 0 }, /* (160) as ::= */ + { 244, -1 }, /* (161) distinct ::= DISTINCT */ + { 244, 0 }, /* (162) distinct ::= */ + { 232, -2 }, /* (163) from ::= FROM tablelist */ + { 247, -2 }, /* (164) tablelist ::= ids cpxName */ + { 247, -3 }, /* (165) tablelist ::= ids cpxName ids */ + { 247, -4 }, /* (166) tablelist ::= tablelist COMMA ids cpxName */ + { 247, -5 }, /* (167) tablelist ::= tablelist COMMA ids cpxName ids */ + { 248, -1 }, /* (168) tmvar ::= VARIABLE */ + { 234, -4 }, /* (169) interval_opt ::= INTERVAL LP tmvar RP */ + { 234, -6 }, /* (170) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 234, 0 }, /* (171) interval_opt ::= */ + { 235, 0 }, /* (172) fill_opt ::= */ + { 235, -6 }, /* (173) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 235, -4 }, /* (174) fill_opt ::= FILL LP ID RP */ + { 236, -4 }, /* (175) sliding_opt ::= SLIDING LP tmvar RP */ + { 236, 0 }, /* (176) sliding_opt ::= */ + { 238, 0 }, /* (177) orderby_opt ::= */ + { 238, -3 }, /* (178) orderby_opt ::= ORDER BY sortlist */ + { 249, -4 }, /* (179) sortlist ::= sortlist COMMA item sortorder */ + { 249, -2 }, /* (180) sortlist ::= item sortorder */ + { 251, -2 }, /* (181) item ::= ids cpxName */ + { 252, -1 }, /* (182) sortorder ::= ASC */ + { 252, -1 }, /* (183) sortorder ::= DESC */ + { 252, 0 }, /* (184) sortorder ::= */ + { 237, 0 }, /* (185) groupby_opt ::= */ + { 237, -3 }, /* (186) groupby_opt ::= GROUP BY grouplist */ + { 253, -3 }, /* (187) grouplist ::= grouplist COMMA item */ + { 253, -1 }, /* (188) grouplist ::= item */ + { 239, 0 }, /* (189) having_opt ::= */ + { 239, -2 }, /* (190) having_opt ::= HAVING expr */ + { 241, 0 }, /* (191) limit_opt ::= */ + { 241, -2 }, /* (192) limit_opt ::= LIMIT signed */ + { 241, -4 }, /* (193) limit_opt ::= LIMIT signed OFFSET signed */ + { 241, -4 }, /* (194) limit_opt ::= LIMIT signed COMMA signed */ + { 240, 0 }, /* (195) slimit_opt ::= */ + { 240, -2 }, /* (196) slimit_opt ::= SLIMIT signed */ + { 240, -4 }, /* (197) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 240, -4 }, /* (198) slimit_opt ::= SLIMIT signed COMMA signed */ + { 233, 0 }, /* (199) where_opt ::= */ + { 233, -2 }, /* (200) where_opt ::= WHERE expr */ + { 245, -3 }, /* (201) expr ::= LP expr RP */ + { 245, -1 }, /* (202) expr ::= ID */ + { 245, -3 }, /* (203) expr ::= ID DOT ID */ + { 245, -3 }, /* (204) expr ::= ID DOT STAR */ + { 245, -1 }, /* (205) expr ::= INTEGER */ + { 245, -2 }, /* (206) expr ::= MINUS INTEGER */ + { 245, -2 }, /* (207) expr ::= PLUS INTEGER */ + { 245, -1 }, /* (208) expr ::= FLOAT */ + { 245, -2 }, /* (209) expr ::= MINUS FLOAT */ + { 245, -2 }, /* (210) expr ::= PLUS FLOAT */ + { 245, -1 }, /* (211) expr ::= STRING */ + { 245, -1 }, /* (212) expr ::= NOW */ + { 245, -1 }, /* (213) expr ::= VARIABLE */ + { 245, -1 }, /* (214) expr ::= BOOL */ + { 245, -4 }, /* (215) expr ::= ID LP exprlist RP */ + { 245, -4 }, /* (216) expr ::= ID LP STAR RP */ + { 245, -3 }, /* (217) expr ::= expr IS NULL */ + { 245, -4 }, /* (218) expr ::= expr IS NOT NULL */ + { 245, -3 }, /* (219) expr ::= expr LT expr */ + { 245, -3 }, /* (220) expr ::= expr GT expr */ + { 245, -3 }, /* (221) expr ::= expr LE expr */ + { 245, -3 }, /* (222) expr ::= expr GE expr */ + { 245, -3 }, /* (223) expr ::= expr NE expr */ + { 245, -3 }, /* (224) expr ::= expr EQ expr */ + { 245, -5 }, /* (225) expr ::= expr BETWEEN expr AND expr */ + { 245, -3 }, /* (226) expr ::= expr AND expr */ + { 245, -3 }, /* (227) expr ::= expr OR expr */ + { 245, -3 }, /* (228) expr ::= expr PLUS expr */ + { 245, -3 }, /* (229) expr ::= expr MINUS expr */ + { 245, -3 }, /* (230) expr ::= expr STAR expr */ + { 245, -3 }, /* (231) expr ::= expr SLASH expr */ + { 245, -3 }, /* (232) expr ::= expr REM expr */ + { 245, -3 }, /* (233) expr ::= expr LIKE expr */ + { 245, -5 }, /* (234) expr ::= expr IN LP exprlist RP */ + { 254, -3 }, /* (235) exprlist ::= exprlist COMMA expritem */ + { 254, -1 }, /* (236) exprlist ::= expritem */ + { 255, -1 }, /* (237) expritem ::= expr */ + { 255, 0 }, /* (238) expritem ::= */ + { 186, -3 }, /* (239) cmd ::= RESET QUERY CACHE */ + { 186, -7 }, /* (240) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 186, -7 }, /* (241) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 186, -7 }, /* (242) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 186, -7 }, /* (243) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 186, -8 }, /* (244) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 186, -9 }, /* (245) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 186, -7 }, /* (246) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + { 186, -7 }, /* (247) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + { 186, -7 }, /* (248) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + { 186, -7 }, /* (249) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + { 186, -8 }, /* (250) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + { 186, -3 }, /* (251) cmd ::= KILL CONNECTION INTEGER */ + { 186, -5 }, /* (252) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 186, -5 }, /* (253) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2183,13 +2204,13 @@ static void yy_reduce( { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 41: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &t);} +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy222, &t);} break; case 42: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy1);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy299);} break; case 43: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy1);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy299);} break; case 44: /* ids ::= ID */ case 45: /* ids ::= STRING */ yytestcase(yyruleno==45); @@ -2201,7 +2222,7 @@ static void yy_reduce( break; case 47: /* ifexists ::= */ case 49: /* ifnotexists ::= */ yytestcase(yyruleno==49); - case 159: /* distinct ::= */ yytestcase(yyruleno==159); + case 162: /* distinct ::= */ yytestcase(yyruleno==162); { yymsp[1].minor.yy0.n = 0;} break; case 48: /* ifnotexists ::= IF NOT EXISTS */ @@ -2211,10 +2232,10 @@ static void yy_reduce( { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 51: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy1);} +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy299);} break; case 52: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &yymsp[-2].minor.yy0);} +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy222, &yymsp[-2].minor.yy0);} break; case 53: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} @@ -2243,20 +2264,20 @@ static void yy_reduce( break; case 72: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy1.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy1.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy1.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy1.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy1.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy1.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy1.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy1.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy1.stat = yymsp[0].minor.yy0; -} - yymsp[-8].minor.yy1 = yylhsminor.yy1; + yylhsminor.yy299.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy299.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy299.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy299.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy299.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy299.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy299.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy299.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy299.stat = yymsp[0].minor.yy0; +} + yymsp[-8].minor.yy299 = yylhsminor.yy299; break; case 73: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy141 = yymsp[0].minor.yy141; } +{ yymsp[-1].minor.yy349 = yymsp[0].minor.yy349; } break; case 74: /* cache ::= CACHE INTEGER */ case 75: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==75); @@ -2275,596 +2296,616 @@ static void yy_reduce( { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 88: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy322);} +{setDefaultCreateDbOption(&yymsp[1].minor.yy222);} break; case 89: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 90: /* db_optr ::= db_optr replica */ case 105: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==105); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 91: /* db_optr ::= db_optr quorum */ case 106: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==106); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 92: /* db_optr ::= db_optr days */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 93: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 94: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 95: /* db_optr ::= db_optr blocks */ case 108: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==108); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 96: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 97: /* db_optr ::= db_optr wal */ case 110: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==110); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 98: /* db_optr ::= db_optr fsync */ case 111: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==111); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 99: /* db_optr ::= db_optr comp */ case 109: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==109); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 100: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 101: /* db_optr ::= db_optr keep */ case 107: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==107); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.keep = yymsp[0].minor.yy141; } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.keep = yymsp[0].minor.yy349; } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 102: /* db_optr ::= db_optr update */ case 112: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==112); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 103: /* db_optr ::= db_optr cachelast */ case 113: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==113); -{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy322 = yylhsminor.yy322; +{ yylhsminor.yy222 = yymsp[-1].minor.yy222; yylhsminor.yy222.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy222 = yylhsminor.yy222; break; case 104: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy322);} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy222);} break; case 114: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSqlSetColumnType (&yylhsminor.yy403, &yymsp[0].minor.yy0); + tSqlSetColumnType (&yylhsminor.yy215, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy403 = yylhsminor.yy403; + yymsp[0].minor.yy215 = yylhsminor.yy215; break; case 115: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy429 <= 0) { + if (yymsp[-1].minor.yy221 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSqlSetColumnType(&yylhsminor.yy403, &yymsp[-3].minor.yy0); + tSqlSetColumnType(&yylhsminor.yy215, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy429; // negative value of name length - tSqlSetColumnType(&yylhsminor.yy403, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy221; // negative value of name length + tSqlSetColumnType(&yylhsminor.yy215, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy403 = yylhsminor.yy403; + yymsp[-3].minor.yy215 = yylhsminor.yy215; break; case 116: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSqlSetColumnType (&yylhsminor.yy403, &yymsp[-1].minor.yy0); + tSqlSetColumnType (&yylhsminor.yy215, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy403 = yylhsminor.yy403; + yymsp[-1].minor.yy215 = yylhsminor.yy215; break; case 117: /* signed ::= INTEGER */ -{ yylhsminor.yy429 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy429 = yylhsminor.yy429; +{ yylhsminor.yy221 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy221 = yylhsminor.yy221; break; case 118: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy429 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy221 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 119: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy429 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy221 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; case 123: /* cmd ::= CREATE TABLE create_table_list */ -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy8;} +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy278;} break; case 124: /* create_table_list ::= create_from_stable */ { SCreateTableSQL* pCreateTable = calloc(1, sizeof(SCreateTableSQL)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy306); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy32); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy8 = pCreateTable; + yylhsminor.yy278 = pCreateTable; } - yymsp[0].minor.yy8 = yylhsminor.yy8; + yymsp[0].minor.yy278 = yylhsminor.yy278; break; case 125: /* create_table_list ::= create_table_list create_from_stable */ { - taosArrayPush(yymsp[-1].minor.yy8->childTableInfo, &yymsp[0].minor.yy306); - yylhsminor.yy8 = yymsp[-1].minor.yy8; + taosArrayPush(yymsp[-1].minor.yy278->childTableInfo, &yymsp[0].minor.yy32); + yylhsminor.yy278 = yymsp[-1].minor.yy278; } - yymsp[-1].minor.yy8 = yylhsminor.yy8; + yymsp[-1].minor.yy278 = yylhsminor.yy278; break; case 126: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yylhsminor.yy8 = tSetCreateSqlElems(yymsp[-1].minor.yy141, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy8, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy278 = tSetCreateSqlElems(yymsp[-1].minor.yy349, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy278, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy8 = yylhsminor.yy8; + yymsp[-5].minor.yy278 = yylhsminor.yy278; break; case 127: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yylhsminor.yy8 = tSetCreateSqlElems(yymsp[-5].minor.yy141, yymsp[-1].minor.yy141, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy8, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy278 = tSetCreateSqlElems(yymsp[-5].minor.yy349, yymsp[-1].minor.yy349, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy278, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy8 = yylhsminor.yy8; + yymsp[-9].minor.yy278 = yylhsminor.yy278; break; case 128: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy306 = createNewChildTableInfo(&yymsp[-5].minor.yy0, yymsp[-1].minor.yy141, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy32 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy349, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy306 = yylhsminor.yy306; + yymsp[-9].minor.yy32 = yylhsminor.yy32; break; - case 129: /* create_table_args ::= ifnotexists ids cpxName AS select */ + case 129: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { - yylhsminor.yy8 = tSetCreateSqlElems(NULL, NULL, yymsp[0].minor.yy234, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy8, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; + yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; + yylhsminor.yy32 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy349, yymsp[-1].minor.yy349, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); +} + yymsp[-12].minor.yy32 = yylhsminor.yy32; + break; + case 130: /* tagNamelist ::= tagNamelist COMMA ids */ +{taosArrayPush(yymsp[-2].minor.yy349, &yymsp[0].minor.yy0); yylhsminor.yy349 = yymsp[-2].minor.yy349; } + yymsp[-2].minor.yy349 = yylhsminor.yy349; + break; + case 131: /* tagNamelist ::= ids */ +{yylhsminor.yy349 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy349, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy349 = yylhsminor.yy349; + break; + case 132: /* create_table_args ::= ifnotexists ids cpxName AS select */ +{ + yylhsminor.yy278 = tSetCreateSqlElems(NULL, NULL, yymsp[0].minor.yy216, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy278, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy8 = yylhsminor.yy8; + yymsp[-4].minor.yy278 = yylhsminor.yy278; break; - case 130: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy141, &yymsp[0].minor.yy403); yylhsminor.yy141 = yymsp[-2].minor.yy141; } - yymsp[-2].minor.yy141 = yylhsminor.yy141; + case 133: /* columnlist ::= columnlist COMMA column */ +{taosArrayPush(yymsp[-2].minor.yy349, &yymsp[0].minor.yy215); yylhsminor.yy349 = yymsp[-2].minor.yy349; } + yymsp[-2].minor.yy349 = yylhsminor.yy349; break; - case 131: /* columnlist ::= column */ -{yylhsminor.yy141 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy141, &yymsp[0].minor.yy403);} - yymsp[0].minor.yy141 = yylhsminor.yy141; + case 134: /* columnlist ::= column */ +{yylhsminor.yy349 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy349, &yymsp[0].minor.yy215);} + yymsp[0].minor.yy349 = yylhsminor.yy349; break; - case 132: /* column ::= ids typename */ + case 135: /* column ::= ids typename */ { - tSqlSetColumnInfo(&yylhsminor.yy403, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy403); + tSqlSetColumnInfo(&yylhsminor.yy215, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy215); } - yymsp[-1].minor.yy403 = yylhsminor.yy403; + yymsp[-1].minor.yy215 = yylhsminor.yy215; break; - case 133: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy141 = tVariantListAppend(yymsp[-2].minor.yy141, &yymsp[0].minor.yy326, -1); } - yymsp[-2].minor.yy141 = yylhsminor.yy141; + case 136: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yylhsminor.yy349 = tVariantListAppend(yymsp[-2].minor.yy349, &yymsp[0].minor.yy426, -1); } + yymsp[-2].minor.yy349 = yylhsminor.yy349; break; - case 134: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy141 = tVariantListAppend(NULL, &yymsp[0].minor.yy326, -1); } - yymsp[0].minor.yy141 = yylhsminor.yy141; + case 137: /* tagitemlist ::= tagitem */ +{ yylhsminor.yy349 = tVariantListAppend(NULL, &yymsp[0].minor.yy426, -1); } + yymsp[0].minor.yy349 = yylhsminor.yy349; break; - case 135: /* tagitem ::= INTEGER */ - case 136: /* tagitem ::= FLOAT */ yytestcase(yyruleno==136); - case 137: /* tagitem ::= STRING */ yytestcase(yyruleno==137); - case 138: /* tagitem ::= BOOL */ yytestcase(yyruleno==138); -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy326, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy326 = yylhsminor.yy326; + case 138: /* tagitem ::= INTEGER */ + case 139: /* tagitem ::= FLOAT */ yytestcase(yyruleno==139); + case 140: /* tagitem ::= STRING */ yytestcase(yyruleno==140); + case 141: /* tagitem ::= BOOL */ yytestcase(yyruleno==141); +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy426, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy426 = yylhsminor.yy426; break; - case 139: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy326, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy326 = yylhsminor.yy326; + case 142: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy426, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy426 = yylhsminor.yy426; break; - case 140: /* tagitem ::= MINUS INTEGER */ - case 141: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==141); - case 142: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==142); - case 143: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==143); + case 143: /* tagitem ::= MINUS INTEGER */ + case 144: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==144); + case 145: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==145); + case 146: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==146); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy326, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy426, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy326 = yylhsminor.yy326; + yymsp[-1].minor.yy426 = yylhsminor.yy426; break; - case 144: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 147: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy234 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy9, yymsp[-9].minor.yy141, yymsp[-8].minor.yy220, yymsp[-4].minor.yy141, yymsp[-3].minor.yy141, &yymsp[-7].minor.yy340, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy141, &yymsp[0].minor.yy24, &yymsp[-1].minor.yy24); + yylhsminor.yy216 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy258, yymsp[-9].minor.yy349, yymsp[-8].minor.yy114, yymsp[-4].minor.yy349, yymsp[-3].minor.yy349, &yymsp[-7].minor.yy264, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy349, &yymsp[0].minor.yy454, &yymsp[-1].minor.yy454); } - yymsp[-11].minor.yy234 = yylhsminor.yy234; + yymsp[-11].minor.yy216 = yylhsminor.yy216; break; - case 145: /* union ::= select */ -{ yylhsminor.yy63 = setSubclause(NULL, yymsp[0].minor.yy234); } - yymsp[0].minor.yy63 = yylhsminor.yy63; + case 148: /* union ::= select */ +{ yylhsminor.yy417 = setSubclause(NULL, yymsp[0].minor.yy216); } + yymsp[0].minor.yy417 = yylhsminor.yy417; break; - case 146: /* union ::= LP union RP */ -{ yymsp[-2].minor.yy63 = yymsp[-1].minor.yy63; } + case 149: /* union ::= LP union RP */ +{ yymsp[-2].minor.yy417 = yymsp[-1].minor.yy417; } break; - case 147: /* union ::= union UNION ALL select */ -{ yylhsminor.yy63 = appendSelectClause(yymsp[-3].minor.yy63, yymsp[0].minor.yy234); } - yymsp[-3].minor.yy63 = yylhsminor.yy63; + case 150: /* union ::= union UNION ALL select */ +{ yylhsminor.yy417 = appendSelectClause(yymsp[-3].minor.yy417, yymsp[0].minor.yy216); } + yymsp[-3].minor.yy417 = yylhsminor.yy417; break; - case 148: /* union ::= union UNION ALL LP select RP */ -{ yylhsminor.yy63 = appendSelectClause(yymsp[-5].minor.yy63, yymsp[-1].minor.yy234); } - yymsp[-5].minor.yy63 = yylhsminor.yy63; + case 151: /* union ::= union UNION ALL LP select RP */ +{ yylhsminor.yy417 = appendSelectClause(yymsp[-5].minor.yy417, yymsp[-1].minor.yy216); } + yymsp[-5].minor.yy417 = yylhsminor.yy417; break; - case 149: /* cmd ::= union */ -{ setSqlInfo(pInfo, yymsp[0].minor.yy63, NULL, TSDB_SQL_SELECT); } + case 152: /* cmd ::= union */ +{ setSqlInfo(pInfo, yymsp[0].minor.yy417, NULL, TSDB_SQL_SELECT); } break; - case 150: /* select ::= SELECT selcollist */ + case 153: /* select ::= SELECT selcollist */ { - yylhsminor.yy234 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy9, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy216 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy258, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy234 = yylhsminor.yy234; + yymsp[-1].minor.yy216 = yylhsminor.yy216; break; - case 151: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy9 = yymsp[-1].minor.yy9;} - yymsp[-1].minor.yy9 = yylhsminor.yy9; + case 154: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy258 = yymsp[-1].minor.yy258;} + yymsp[-1].minor.yy258 = yylhsminor.yy258; break; - case 152: /* sclp ::= */ -{yymsp[1].minor.yy9 = 0;} + case 155: /* sclp ::= */ +{yymsp[1].minor.yy258 = 0;} break; - case 153: /* selcollist ::= sclp distinct expr as */ + case 156: /* selcollist ::= sclp distinct expr as */ { - yylhsminor.yy9 = tSqlExprListAppend(yymsp[-3].minor.yy9, yymsp[-1].minor.yy220, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy258 = tSqlExprListAppend(yymsp[-3].minor.yy258, yymsp[-1].minor.yy114, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-3].minor.yy9 = yylhsminor.yy9; + yymsp[-3].minor.yy258 = yylhsminor.yy258; break; - case 154: /* selcollist ::= sclp STAR */ + case 157: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSqlExprIdValueCreate(NULL, TK_ALL); - yylhsminor.yy9 = tSqlExprListAppend(yymsp[-1].minor.yy9, pNode, 0, 0); + yylhsminor.yy258 = tSqlExprListAppend(yymsp[-1].minor.yy258, pNode, 0, 0); } - yymsp[-1].minor.yy9 = yylhsminor.yy9; + yymsp[-1].minor.yy258 = yylhsminor.yy258; break; - case 155: /* as ::= AS ids */ + case 158: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 156: /* as ::= ids */ + case 159: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 157: /* as ::= */ + case 160: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 158: /* distinct ::= DISTINCT */ + case 161: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 160: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy141 = yymsp[0].minor.yy141;} + case 163: /* from ::= FROM tablelist */ +{yymsp[-1].minor.yy349 = yymsp[0].minor.yy349;} break; - case 161: /* tablelist ::= ids cpxName */ + case 164: /* tablelist ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy141 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy141 = tVariantListAppendToken(yylhsminor.yy141, &yymsp[-1].minor.yy0, -1); // table alias name + yylhsminor.yy349 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yylhsminor.yy349, &yymsp[-1].minor.yy0, -1); // table alias name } - yymsp[-1].minor.yy141 = yylhsminor.yy141; + yymsp[-1].minor.yy349 = yylhsminor.yy349; break; - case 162: /* tablelist ::= ids cpxName ids */ + case 165: /* tablelist ::= ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy141 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy141 = tVariantListAppendToken(yylhsminor.yy141, &yymsp[0].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yylhsminor.yy349, &yymsp[0].minor.yy0, -1); } - yymsp[-2].minor.yy141 = yylhsminor.yy141; + yymsp[-2].minor.yy349 = yylhsminor.yy349; break; - case 163: /* tablelist ::= tablelist COMMA ids cpxName */ + case 166: /* tablelist ::= tablelist COMMA ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy141 = tVariantListAppendToken(yymsp[-3].minor.yy141, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy141 = tVariantListAppendToken(yylhsminor.yy141, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yymsp[-3].minor.yy349, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yylhsminor.yy349, &yymsp[-1].minor.yy0, -1); } - yymsp[-3].minor.yy141 = yylhsminor.yy141; + yymsp[-3].minor.yy349 = yylhsminor.yy349; break; - case 164: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 167: /* tablelist ::= tablelist COMMA ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy141 = tVariantListAppendToken(yymsp[-4].minor.yy141, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy141 = tVariantListAppendToken(yylhsminor.yy141, &yymsp[0].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yymsp[-4].minor.yy349, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy349 = tVariantListAppendToken(yylhsminor.yy349, &yymsp[0].minor.yy0, -1); } - yymsp[-4].minor.yy141 = yylhsminor.yy141; + yymsp[-4].minor.yy349 = yylhsminor.yy349; break; - case 165: /* tmvar ::= VARIABLE */ + case 168: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 166: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy340.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy340.offset.n = 0; yymsp[-3].minor.yy340.offset.z = NULL; yymsp[-3].minor.yy340.offset.type = 0;} + case 169: /* interval_opt ::= INTERVAL LP tmvar RP */ +{yymsp[-3].minor.yy264.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy264.offset.n = 0; yymsp[-3].minor.yy264.offset.z = NULL; yymsp[-3].minor.yy264.offset.type = 0;} break; - case 167: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy340.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy340.offset = yymsp[-1].minor.yy0;} + case 170: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ +{yymsp[-5].minor.yy264.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy264.offset = yymsp[-1].minor.yy0;} break; - case 168: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy340, 0, sizeof(yymsp[1].minor.yy340));} + case 171: /* interval_opt ::= */ +{memset(&yymsp[1].minor.yy264, 0, sizeof(yymsp[1].minor.yy264));} break; - case 169: /* fill_opt ::= */ -{yymsp[1].minor.yy141 = 0; } + case 172: /* fill_opt ::= */ +{yymsp[1].minor.yy349 = 0; } break; - case 170: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 173: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy141, &A, -1, 0); - yymsp[-5].minor.yy141 = yymsp[-1].minor.yy141; + tVariantListInsert(yymsp[-1].minor.yy349, &A, -1, 0); + yymsp[-5].minor.yy349 = yymsp[-1].minor.yy349; } break; - case 171: /* fill_opt ::= FILL LP ID RP */ + case 174: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy141 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy349 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 172: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 175: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 173: /* sliding_opt ::= */ + case 176: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 174: /* orderby_opt ::= */ -{yymsp[1].minor.yy141 = 0;} + case 177: /* orderby_opt ::= */ +{yymsp[1].minor.yy349 = 0;} break; - case 175: /* orderby_opt ::= ORDER BY sortlist */ -{yymsp[-2].minor.yy141 = yymsp[0].minor.yy141;} + case 178: /* orderby_opt ::= ORDER BY sortlist */ +{yymsp[-2].minor.yy349 = yymsp[0].minor.yy349;} break; - case 176: /* sortlist ::= sortlist COMMA item sortorder */ + case 179: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy141 = tVariantListAppend(yymsp[-3].minor.yy141, &yymsp[-1].minor.yy326, yymsp[0].minor.yy502); + yylhsminor.yy349 = tVariantListAppend(yymsp[-3].minor.yy349, &yymsp[-1].minor.yy426, yymsp[0].minor.yy348); } - yymsp[-3].minor.yy141 = yylhsminor.yy141; + yymsp[-3].minor.yy349 = yylhsminor.yy349; break; - case 177: /* sortlist ::= item sortorder */ + case 180: /* sortlist ::= item sortorder */ { - yylhsminor.yy141 = tVariantListAppend(NULL, &yymsp[-1].minor.yy326, yymsp[0].minor.yy502); + yylhsminor.yy349 = tVariantListAppend(NULL, &yymsp[-1].minor.yy426, yymsp[0].minor.yy348); } - yymsp[-1].minor.yy141 = yylhsminor.yy141; + yymsp[-1].minor.yy349 = yylhsminor.yy349; break; - case 178: /* item ::= ids cpxName */ + case 181: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy326, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy426, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy326 = yylhsminor.yy326; + yymsp[-1].minor.yy426 = yylhsminor.yy426; break; - case 179: /* sortorder ::= ASC */ -{ yymsp[0].minor.yy502 = TSDB_ORDER_ASC; } + case 182: /* sortorder ::= ASC */ +{ yymsp[0].minor.yy348 = TSDB_ORDER_ASC; } break; - case 180: /* sortorder ::= DESC */ -{ yymsp[0].minor.yy502 = TSDB_ORDER_DESC;} + case 183: /* sortorder ::= DESC */ +{ yymsp[0].minor.yy348 = TSDB_ORDER_DESC;} break; - case 181: /* sortorder ::= */ -{ yymsp[1].minor.yy502 = TSDB_ORDER_ASC; } + case 184: /* sortorder ::= */ +{ yymsp[1].minor.yy348 = TSDB_ORDER_ASC; } break; - case 182: /* groupby_opt ::= */ -{ yymsp[1].minor.yy141 = 0;} + case 185: /* groupby_opt ::= */ +{ yymsp[1].minor.yy349 = 0;} break; - case 183: /* groupby_opt ::= GROUP BY grouplist */ -{ yymsp[-2].minor.yy141 = yymsp[0].minor.yy141;} + case 186: /* groupby_opt ::= GROUP BY grouplist */ +{ yymsp[-2].minor.yy349 = yymsp[0].minor.yy349;} break; - case 184: /* grouplist ::= grouplist COMMA item */ + case 187: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy141 = tVariantListAppend(yymsp[-2].minor.yy141, &yymsp[0].minor.yy326, -1); + yylhsminor.yy349 = tVariantListAppend(yymsp[-2].minor.yy349, &yymsp[0].minor.yy426, -1); } - yymsp[-2].minor.yy141 = yylhsminor.yy141; + yymsp[-2].minor.yy349 = yylhsminor.yy349; break; - case 185: /* grouplist ::= item */ + case 188: /* grouplist ::= item */ { - yylhsminor.yy141 = tVariantListAppend(NULL, &yymsp[0].minor.yy326, -1); + yylhsminor.yy349 = tVariantListAppend(NULL, &yymsp[0].minor.yy426, -1); } - yymsp[0].minor.yy141 = yylhsminor.yy141; + yymsp[0].minor.yy349 = yylhsminor.yy349; + break; + case 189: /* having_opt ::= */ + case 199: /* where_opt ::= */ yytestcase(yyruleno==199); + case 238: /* expritem ::= */ yytestcase(yyruleno==238); +{yymsp[1].minor.yy114 = 0;} break; - case 186: /* having_opt ::= */ - case 196: /* where_opt ::= */ yytestcase(yyruleno==196); - case 234: /* expritem ::= */ yytestcase(yyruleno==234); -{yymsp[1].minor.yy220 = 0;} + case 190: /* having_opt ::= HAVING expr */ + case 200: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==200); +{yymsp[-1].minor.yy114 = yymsp[0].minor.yy114;} break; - case 187: /* having_opt ::= HAVING expr */ - case 197: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==197); -{yymsp[-1].minor.yy220 = yymsp[0].minor.yy220;} + case 191: /* limit_opt ::= */ + case 195: /* slimit_opt ::= */ yytestcase(yyruleno==195); +{yymsp[1].minor.yy454.limit = -1; yymsp[1].minor.yy454.offset = 0;} break; - case 188: /* limit_opt ::= */ - case 192: /* slimit_opt ::= */ yytestcase(yyruleno==192); -{yymsp[1].minor.yy24.limit = -1; yymsp[1].minor.yy24.offset = 0;} + case 192: /* limit_opt ::= LIMIT signed */ + case 196: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==196); +{yymsp[-1].minor.yy454.limit = yymsp[0].minor.yy221; yymsp[-1].minor.yy454.offset = 0;} break; - case 189: /* limit_opt ::= LIMIT signed */ - case 193: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==193); -{yymsp[-1].minor.yy24.limit = yymsp[0].minor.yy429; yymsp[-1].minor.yy24.offset = 0;} + case 193: /* limit_opt ::= LIMIT signed OFFSET signed */ +{ yymsp[-3].minor.yy454.limit = yymsp[-2].minor.yy221; yymsp[-3].minor.yy454.offset = yymsp[0].minor.yy221;} break; - case 190: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy24.limit = yymsp[-2].minor.yy429; yymsp[-3].minor.yy24.offset = yymsp[0].minor.yy429;} + case 194: /* limit_opt ::= LIMIT signed COMMA signed */ +{ yymsp[-3].minor.yy454.limit = yymsp[0].minor.yy221; yymsp[-3].minor.yy454.offset = yymsp[-2].minor.yy221;} break; - case 191: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy24.limit = yymsp[0].minor.yy429; yymsp[-3].minor.yy24.offset = yymsp[-2].minor.yy429;} + case 197: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy454.limit = yymsp[-2].minor.yy221; yymsp[-3].minor.yy454.offset = yymsp[0].minor.yy221;} break; - case 194: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy24.limit = yymsp[-2].minor.yy429; yymsp[-3].minor.yy24.offset = yymsp[0].minor.yy429;} + case 198: /* slimit_opt ::= SLIMIT signed COMMA signed */ +{yymsp[-3].minor.yy454.limit = yymsp[0].minor.yy221; yymsp[-3].minor.yy454.offset = yymsp[-2].minor.yy221;} break; - case 195: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy24.limit = yymsp[0].minor.yy429; yymsp[-3].minor.yy24.offset = yymsp[-2].minor.yy429;} + case 201: /* expr ::= LP expr RP */ +{yylhsminor.yy114 = yymsp[-1].minor.yy114; yylhsminor.yy114->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy114->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 198: /* expr ::= LP expr RP */ -{yylhsminor.yy220 = yymsp[-1].minor.yy220; yylhsminor.yy220->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy220->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 202: /* expr ::= ID */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 199: /* expr ::= ID */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 203: /* expr ::= ID DOT ID */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 200: /* expr ::= ID DOT ID */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 204: /* expr ::= ID DOT STAR */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 201: /* expr ::= ID DOT STAR */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 205: /* expr ::= INTEGER */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 202: /* expr ::= INTEGER */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 206: /* expr ::= MINUS INTEGER */ + case 207: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==207); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy114 = yylhsminor.yy114; break; - case 203: /* expr ::= MINUS INTEGER */ - case 204: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==204); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy220 = yylhsminor.yy220; + case 208: /* expr ::= FLOAT */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 205: /* expr ::= FLOAT */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 209: /* expr ::= MINUS FLOAT */ + case 210: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==210); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy114 = yylhsminor.yy114; break; - case 206: /* expr ::= MINUS FLOAT */ - case 207: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==207); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy220 = yylhsminor.yy220; + case 211: /* expr ::= STRING */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 208: /* expr ::= STRING */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 212: /* expr ::= NOW */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 209: /* expr ::= NOW */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 213: /* expr ::= VARIABLE */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 210: /* expr ::= VARIABLE */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 214: /* expr ::= BOOL */ +{ yylhsminor.yy114 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 211: /* expr ::= BOOL */ -{ yylhsminor.yy220 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 215: /* expr ::= ID LP exprlist RP */ +{ yylhsminor.yy114 = tSqlExprCreateFunction(yymsp[-1].minor.yy258, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy114 = yylhsminor.yy114; break; - case 212: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy220 = tSqlExprCreateFunction(yymsp[-1].minor.yy9, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy220 = yylhsminor.yy220; + case 216: /* expr ::= ID LP STAR RP */ +{ yylhsminor.yy114 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy114 = yylhsminor.yy114; break; - case 213: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy220 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy220 = yylhsminor.yy220; + case 217: /* expr ::= expr IS NULL */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, NULL, TK_ISNULL);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 214: /* expr ::= expr IS NULL */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, NULL, TK_ISNULL);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 218: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-3].minor.yy114, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy114 = yylhsminor.yy114; break; - case 215: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-3].minor.yy220, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy220 = yylhsminor.yy220; + case 219: /* expr ::= expr LT expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_LT);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 216: /* expr ::= expr LT expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_LT);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 220: /* expr ::= expr GT expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_GT);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 217: /* expr ::= expr GT expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_GT);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 221: /* expr ::= expr LE expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_LE);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 218: /* expr ::= expr LE expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_LE);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 222: /* expr ::= expr GE expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_GE);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 219: /* expr ::= expr GE expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_GE);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 223: /* expr ::= expr NE expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_NE);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 220: /* expr ::= expr NE expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_NE);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 224: /* expr ::= expr EQ expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_EQ);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 221: /* expr ::= expr EQ expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_EQ);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 225: /* expr ::= expr BETWEEN expr AND expr */ +{ tSQLExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy114); yylhsminor.yy114 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy114, yymsp[-2].minor.yy114, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy114, TK_LE), TK_AND);} + yymsp[-4].minor.yy114 = yylhsminor.yy114; break; - case 222: /* expr ::= expr AND expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_AND);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 226: /* expr ::= expr AND expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_AND);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 223: /* expr ::= expr OR expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_OR); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 227: /* expr ::= expr OR expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_OR); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 224: /* expr ::= expr PLUS expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_PLUS); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 228: /* expr ::= expr PLUS expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_PLUS); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 225: /* expr ::= expr MINUS expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_MINUS); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 229: /* expr ::= expr MINUS expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_MINUS); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 226: /* expr ::= expr STAR expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_STAR); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 230: /* expr ::= expr STAR expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_STAR); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 227: /* expr ::= expr SLASH expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_DIVIDE);} - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 231: /* expr ::= expr SLASH expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_DIVIDE);} + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 228: /* expr ::= expr REM expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_REM); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 232: /* expr ::= expr REM expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_REM); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 229: /* expr ::= expr LIKE expr */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-2].minor.yy220, yymsp[0].minor.yy220, TK_LIKE); } - yymsp[-2].minor.yy220 = yylhsminor.yy220; + case 233: /* expr ::= expr LIKE expr */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-2].minor.yy114, yymsp[0].minor.yy114, TK_LIKE); } + yymsp[-2].minor.yy114 = yylhsminor.yy114; break; - case 230: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy220 = tSqlExprCreate(yymsp[-4].minor.yy220, (tSQLExpr*)yymsp[-1].minor.yy9, TK_IN); } - yymsp[-4].minor.yy220 = yylhsminor.yy220; + case 234: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy114 = tSqlExprCreate(yymsp[-4].minor.yy114, (tSQLExpr*)yymsp[-1].minor.yy258, TK_IN); } + yymsp[-4].minor.yy114 = yylhsminor.yy114; break; - case 231: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy9 = tSqlExprListAppend(yymsp[-2].minor.yy9,yymsp[0].minor.yy220,0, 0);} - yymsp[-2].minor.yy9 = yylhsminor.yy9; + case 235: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy258 = tSqlExprListAppend(yymsp[-2].minor.yy258,yymsp[0].minor.yy114,0, 0);} + yymsp[-2].minor.yy258 = yylhsminor.yy258; break; - case 232: /* exprlist ::= expritem */ -{yylhsminor.yy9 = tSqlExprListAppend(0,yymsp[0].minor.yy220,0, 0);} - yymsp[0].minor.yy9 = yylhsminor.yy9; + case 236: /* exprlist ::= expritem */ +{yylhsminor.yy258 = tSqlExprListAppend(0,yymsp[0].minor.yy114,0, 0);} + yymsp[0].minor.yy258 = yylhsminor.yy258; break; - case 233: /* expritem ::= expr */ -{yylhsminor.yy220 = yymsp[0].minor.yy220;} - yymsp[0].minor.yy220 = yylhsminor.yy220; + case 237: /* expritem ::= expr */ +{yylhsminor.yy114 = yymsp[0].minor.yy114;} + yymsp[0].minor.yy114 = yylhsminor.yy114; break; - case 235: /* cmd ::= RESET QUERY CACHE */ + case 239: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 236: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 240: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy141, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy349, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 237: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 241: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2875,14 +2916,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 238: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 242: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy141, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy349, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 239: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 243: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2893,7 +2934,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 240: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 244: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2907,26 +2948,26 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 241: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 245: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy326, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy426, -1); SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 242: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 246: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy141, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy349, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 243: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 247: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2937,14 +2978,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 244: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 248: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy141, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy349, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 245: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 249: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2955,7 +2996,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 246: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 250: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2969,13 +3010,13 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 247: /* cmd ::= KILL CONNECTION INTEGER */ + case 251: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 248: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 252: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 249: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 253: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index c3798b869eb6c02008043346d70b52592239cad0..1856223391ae719ef98492160da9810826b983a9 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp index f2b457e7dd686af16ae4a1192fe52a6f8090274f..104bfb3c06a9613bafcd4e0b3f39af4f9d102b04 100644 --- a/src/query/tests/percentileTest.cpp +++ b/src/query/tests/percentileTest.cpp @@ -48,7 +48,7 @@ tMemBucket *createUnsignedDataBucket(int32_t start, int32_t end, int32_t type) { uint64_t k = i; int32_t ret = tMemBucketPut(pBucket, &k, 1); if (ret != 0) { - printf("value out of range:%f", k); + printf("value out of range:%" PRId64, k); } } @@ -245,7 +245,7 @@ void unsignedDataTest() { } // namespace TEST(testCase, percentileTest) { - // qsortTest(); +// qsortTest(); intDataTest(); bigintDataTest(); doubleDataTest(); diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp index a8500364dceaef97d215b72ef805163e726bdaf9..3406d8309023118d16cfc4dfbdab58defc6f005a 100644 --- a/src/query/tests/unitTest.cpp +++ b/src/query/tests/unitTest.cpp @@ -227,10 +227,10 @@ TEST(testCase, db_table_name) { EXPECT_EQ(testValidateName(t60_1), TSDB_CODE_TSC_INVALID_SQL); char t61[] = "' ABC '"; - EXPECT_EQ(testValidateName(t61), TSDB_CODE_SUCCESS); + EXPECT_EQ(testValidateName(t61), TSDB_CODE_TSC_INVALID_SQL); char t61_1[] = "' ABC '"; - EXPECT_EQ(testValidateName(t61_1), TSDB_CODE_SUCCESS); + EXPECT_EQ(testValidateName(t61_1), TSDB_CODE_TSC_INVALID_SQL); char t62[] = " ABC . def "; EXPECT_EQ(testValidateName(t62), TSDB_CODE_TSC_INVALID_SQL); @@ -249,13 +249,13 @@ TEST(testCase, db_table_name) { EXPECT_EQ(testValidateName(t65), TSDB_CODE_TSC_INVALID_SQL); char t66[] = "' ABC '.' DEF '"; - EXPECT_EQ(testValidateName(t66), TSDB_CODE_SUCCESS); + EXPECT_EQ(testValidateName(t66), TSDB_CODE_TSC_INVALID_SQL); char t67[] = "abc . ' DEF '"; EXPECT_EQ(testValidateName(t67), TSDB_CODE_TSC_INVALID_SQL); char t68[] = "' abc '.' DEF '"; - EXPECT_EQ(testValidateName(t68), TSDB_CODE_SUCCESS); + EXPECT_EQ(testValidateName(t68), TSDB_CODE_TSC_INVALID_SQL); // do not use key words char t69[] = "table.'DEF'"; @@ -265,7 +265,7 @@ TEST(testCase, db_table_name) { EXPECT_EQ(testValidateName(t70), TSDB_CODE_TSC_INVALID_SQL); char t71[] = "'_abXYZ1234 '.' deFF '"; - EXPECT_EQ(testValidateName(t71), TSDB_CODE_SUCCESS); + EXPECT_EQ(testValidateName(t71), TSDB_CODE_TSC_INVALID_SQL); char t72[] = "'_abDEF&^%1234'.' DIef'"; EXPECT_EQ(testValidateName(t72), TSDB_CODE_TSC_INVALID_SQL); diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt index 02a1e7c2d81d13194ca819c703657df5e1f3fb3b..f94b4aeb6d21277b6b845587cd35a2c98e0bc0b0 100644 --- a/src/rpc/CMakeLists.txt +++ b/src/rpc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 17b80a0afb6f175a73dce5cb361674030561c96e..cae227cbdb7363bc0b26c80f1cc9edcbf1574d71 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -246,8 +246,12 @@ void *rpcOpen(const SRpcInit *pInit) { if(pInit->label) tstrncpy(pRpc->label, pInit->label, sizeof(pRpc->label)); pRpc->connType = pInit->connType; + if (pRpc->connType == TAOS_CONN_CLIENT) { + pRpc->numOfThreads = pInit->numOfThreads; + } else { + pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads; + } pRpc->idleTime = pInit->idleTime; - pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads; pRpc->localPort = pInit->localPort; pRpc->afp = pInit->afp; pRpc->sessions = pInit->sessions+1; @@ -1277,7 +1281,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { SRpcConn *pConn = rpcSetupConnToServer(pContext); if (pConn == NULL) { pContext->code = terrno; - taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl); + taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl); return; } diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index e9231058600ce2135f669dd42a6c52cca81424cd..c10cea6c9dd8c53ab8608c8a736795f2318059d8 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index cc86bf704c24223bcc0ff90e3633efa8d065ac96..82d0bbf520843f5418d1004f2fe7c1be756b7b6f 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 0f7fb77da87ff7fd336d32c36c6fa846fbafeca1..956ccdc0730e345d2e0ece3bcf45e4d0e9b3c14d 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -741,11 +741,14 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { if (pTemp->role != TAOS_SYNC_ROLE_MASTER) continue; if (masterIndex < 0) { masterIndex = index; + sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, index); } else { // multiple masters, it shall not happen if (masterIndex == pNode->selfIndex) { sError("%s, peer is master, work as slave instead", pTemp->id); nodeRole = TAOS_SYNC_ROLE_SLAVE; (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); + } else { + sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, index); } } } @@ -833,7 +836,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new } if (oldPeerRole != newPeerRole || nodeRole != oldSelfRole) { - sDebug("vgId:%d, roles changed, broadcast status", pNode->vgId); + sDebug("vgId:%d, roles changed, broadcast status, replica:%d", pNode->vgId, pNode->replica); syncBroadcastStatus(pNode); } @@ -860,8 +863,12 @@ static void syncRestartPeer(SSyncPeer *pPeer) { void syncRestartConnection(SSyncPeer *pPeer) { if (pPeer->ip == 0) return; + if (syncAcquirePeer(pPeer->rid) == NULL) return; + syncRestartPeer(pPeer); syncCheckRole(pPeer, NULL, TAOS_SYNC_ROLE_OFFLINE); + + syncReleasePeer(pPeer); } static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index 76404c751e8a9bec7bcd37ccc1013655e7a38232..c0d66316cd5b802ddcaddf1015d4ceca1aa3b2c5 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -131,6 +131,11 @@ static int32_t syncProcessBufferedFwd(SSyncPeer *pPeer) { SRecvBuffer *pRecv = pNode->pRecv; int32_t forwards = 0; + if (pRecv == NULL) { + sError("%s, recv buffer is null, restart connect", pPeer->id); + return -1; + } + sDebug("%s, number of buffered forwards:%d", pPeer->id, pRecv->forwards); char *offset = pRecv->buffer; @@ -179,6 +184,7 @@ int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) { static void syncCloseRecvBuffer(SSyncNode *pNode) { if (pNode->pRecv) { + sDebug("vgId:%d, recv buffer:%p is freed", pNode->vgId, pNode->pRecv); tfree(pNode->pRecv->buffer); } @@ -203,6 +209,7 @@ static int32_t syncOpenRecvBuffer(SSyncNode *pNode) { pNode->pRecv = pRecv; + sDebug("vgId:%d, recv buffer:%p is created", pNode->vgId, pNode->pRecv); return 0; } @@ -269,7 +276,8 @@ void *syncRestoreData(void *param) { atomic_add_fetch_32(&tsSyncNum, 1); sInfo("%s, start to restore data, sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); - (*pNode->notifyRoleFp)(pNode->vgId, TAOS_SYNC_ROLE_SYNCING); + nodeRole = TAOS_SYNC_ROLE_SYNCING; + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); if (syncOpenRecvBuffer(pNode) < 0) { sError("%s, failed to allocate recv buffer, restart connection", pPeer->id); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index ec4bbb33a5988656df9a665e67d60054336e7c7f..be4073760dfa551878301bc3bd4f2405b12b77cf 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -170,14 +170,14 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead) { return sizeof(SWalHead) + pHead->len; } -static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) { +static int64_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) { int32_t sfd = open(name, O_RDONLY | O_BINARY); if (sfd < 0) { sError("%s, failed to open wal:%s for retrieve since:%s", pPeer->id, name, tstrerror(errno)); return -1; } - int32_t code = (int32_t)taosLSeek(sfd, offset, SEEK_SET); + int64_t code = taosLSeek(sfd, offset, SEEK_SET); if (code < 0) { sError("%s, failed to seek %" PRId64 " in wal:%s for retrieve since:%s", pPeer->id, offset, name, tstrerror(errno)); close(sfd); @@ -187,7 +187,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi sDebug("%s, retrieve last wal:%s, offset:%" PRId64 " fver:%" PRIu64, pPeer->id, name, offset, fversion); SWalHead *pHead = malloc(SYNC_MAX_SIZE); - int32_t bytes = 0; + int64_t bytes = 0; while (1) { code = syncReadOneWalRecord(sfd, pHead); @@ -198,13 +198,13 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi if (code == 0) { code = bytes; - sDebug("%s, read to the end of wal, bytes:%d", pPeer->id, bytes); + sDebug("%s, read to the end of wal, bytes:%" PRId64, pPeer->id, bytes); break; } - sDebug("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version); + sTrace("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version); - int32_t wsize = code; + int32_t wsize = (int32_t)code; int32_t ret = taosWriteMsg(pPeer->syncFd, pHead, wsize); if (ret != wsize) { code = -1; @@ -228,7 +228,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi return code; } -static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { +static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { SSyncNode *pNode = pPeer->pSyncNode; int32_t once = 0; // last WAL has once ever been processed int64_t offset = 0; @@ -243,9 +243,9 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) if (syncAreFilesModified(pNode, pPeer)) return -1; if (syncGetWalVersion(pNode, pPeer) < 0) return -1; - int32_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset); + int64_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset); if (bytes < 0) { - sDebug("%s, failed to retrieve last wal", pPeer->id); + sDebug("%s, failed to retrieve last wal, bytes:%" PRId64, pPeer->id, bytes); return bytes; } @@ -263,7 +263,7 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) // if all data up to fversion is read out, it is over if (pPeer->sversion >= fversion && fversion > 0) { - sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%d sver:%" PRIu64, pPeer->id, fversion, bytes, + sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%" PRId64 " sver:%" PRIu64, pPeer->id, fversion, bytes, pPeer->sversion); return 0; } @@ -277,19 +277,19 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) // if bytes > 0, file is updated, or fversion is not reached but file still open, read again once = 1; offset += bytes; - sDebug("%s, continue retrieve last wal, bytes:%d offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id, + sDebug("%s, continue retrieve last wal, bytes:%" PRId64 " offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id, bytes, offset, pPeer->sversion, fversion); } return -1; } -static int32_t syncRetrieveWal(SSyncPeer *pPeer) { +static int64_t syncRetrieveWal(SSyncPeer *pPeer) { SSyncNode * pNode = pPeer->pSyncNode; char fname[TSDB_FILENAME_LEN * 3]; char wname[TSDB_FILENAME_LEN * 2]; int32_t size; - int32_t code = -1; + int64_t code = -1; int64_t index = 0; while (1) { @@ -297,7 +297,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { wname[0] = 0; code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index); if (code < 0) { - sError("%s, failed to get wal info since:%s, code:0x%x", pPeer->id, strerror(errno), code); + sError("%s, failed to get wal info since:%s, code:0x%" PRIx64, pPeer->id, strerror(errno), code); break; } @@ -309,6 +309,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { if (code == 0) { // last wal code = syncProcessLastWal(pPeer, wname, index); + sInfo("%s, last wal processed, code:%" PRId64, pPeer->id, code); break; } @@ -319,7 +320,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { struct stat fstat; if (stat(fname, &fstat) < 0) { code = -1; - sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } @@ -329,14 +330,14 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { int32_t sfd = open(fname, O_RDONLY | O_BINARY); if (sfd < 0) { code = -1; - sError("%s, failed to open wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sError("%s, failed to open wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } code = (int32_t)taosSendFile(pPeer->syncFd, sfd, NULL, size); close(sfd); if (code < 0) { - sError("%s, failed to send wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sError("%s, failed to send wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } @@ -357,7 +358,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { code = -1; } } else { - sError("%s, failed to send wal since %s, code:0x%x", pPeer->id, strerror(errno), code); + sError("%s, failed to send wal since %s, code:0x%" PRIx64, pPeer->id, strerror(errno), code); } return code; @@ -404,9 +405,9 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { if (pPeer->sversion == 0) pPeer->sversion = 1; sInfo("%s, start to retrieve wals", pPeer->id); - int32_t code = syncRetrieveWal(pPeer); - if (code != 0) { - sError("%s, failed to retrieve wals, code:0x%x", pPeer->id, code); + int64_t code = syncRetrieveWal(pPeer); + if (code < 0) { + sError("%s, failed to retrieve wals, code:0x%" PRIx64, pPeer->id, code); return -1; } diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index ab2e6c307bd7f69f1faf2fc578b3dcf8d43c4c84..f2b05ab2263c0d80bc870981f86933922de639e4 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/tfs/src/tdisk.c b/src/tfs/src/tdisk.c index 7cdaf7fd099db85c988077296ade2da7440be8a1..37798d3a886b443a20703747ad9a99e26c1502b7 100644 --- a/src/tfs/src/tdisk.c +++ b/src/tfs/src/tdisk.c @@ -52,7 +52,7 @@ int tfsUpdateDiskInfo(SDisk *pDisk) { } pDisk->dmeta.size = diskSize.tsize; - pDisk->dmeta.free = diskSize.tsize - diskSize.avail; + pDisk->dmeta.free = diskSize.avail; return code; -} \ No newline at end of file +} diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 21e8e8379586c4258fd65ddb74f5154bfc415d15..31d52aae7d4a809044ab01a7b561801d1ad0c2eb 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/tsdb/inc/tsdbMemTable.h b/src/tsdb/inc/tsdbMemTable.h index 3b3f1dd1f6a9307bbe3954374b005a23a9f15ab0..bd64ed4a5238b3b8b60716d8732d59d27218c639 100644 --- a/src/tsdb/inc/tsdbMemTable.h +++ b/src/tsdb/inc/tsdbMemTable.h @@ -37,6 +37,7 @@ typedef struct { TSKEY keyLast; int64_t numOfRows; SSkipList* pData; + T_REF_DECLARE() } STableData; typedef struct { @@ -76,7 +77,7 @@ typedef struct { int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); -int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem); +int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem, SArray* pATable); void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem); void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); int tsdbAsyncCommit(STsdbRepo* pRepo); diff --git a/src/tsdb/inc/tsdbReadImpl.h b/src/tsdb/inc/tsdbReadImpl.h index 0efbcc55bbbdc70b3fb705370bd9330d716670fb..e7840d9723f7935c69f70f1c236a5ed49c82c146 100644 --- a/src/tsdb/inc/tsdbReadImpl.h +++ b/src/tsdb/inc/tsdbReadImpl.h @@ -49,19 +49,32 @@ typedef struct { } SBlockInfo; typedef struct { - int16_t colId; - int32_t len; - int32_t type : 8; - int32_t offset : 24; - int64_t sum; - int64_t max; - int64_t min; - int16_t maxIndex; - int16_t minIndex; - int16_t numOfNull; - char padding[2]; + int16_t colId; + int32_t len; + uint32_t type : 8; + uint32_t offset : 24; + int64_t sum; + int64_t max; + int64_t min; + int16_t maxIndex; + int16_t minIndex; + int16_t numOfNull; + uint8_t offsetH; + char padding[1]; } SBlockCol; +// Code here just for back-ward compatibility +static FORCE_INLINE void tsdbSetBlockColOffset(SBlockCol *pBlockCol, uint32_t offset) { + pBlockCol->offset = offset & ((((uint32_t)1) << 24) - 1); + pBlockCol->offsetH = (uint8_t)(offset >> 24); +} + +static FORCE_INLINE uint32_t tsdbGetBlockColOffset(SBlockCol *pBlockCol) { + uint32_t offset1 = pBlockCol->offset; + uint32_t offset2 = pBlockCol->offsetH; + return (offset1 | (offset2 << 24)); +} + typedef struct { int32_t delimiter; // For recovery usage int32_t numOfCols; // For recovery usage diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index a777b11186f064436137d267d73f9a70b5aa8701..a4cc316725d60510441c4badb77ce9a2805c08ca 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -781,11 +781,11 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo ASSERT(nColsNotAllNull >= 0 && nColsNotAllNull <= pDataCols->numOfCols); // Compress the data if neccessary - int tcol = 0; // counter of not all NULL and written columns - int32_t toffset = 0; - int32_t tsize = TSDB_BLOCK_STATIS_SIZE(nColsNotAllNull); - int32_t lsize = tsize; - int32_t keyLen = 0; + int tcol = 0; // counter of not all NULL and written columns + uint32_t toffset = 0; + int32_t tsize = TSDB_BLOCK_STATIS_SIZE(nColsNotAllNull); + int32_t lsize = tsize; + int32_t keyLen = 0; for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { // All not NULL columns finish if (ncol != 0 && tcol >= nColsNotAllNull) break; @@ -829,7 +829,7 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo tsdbUpdateDFileMagic(pDFile, POINTER_SHIFT(tptr, flen - sizeof(TSCKSUM))); if (ncol != 0) { - pBlockCol->offset = toffset; + tsdbSetBlockColOffset(pBlockCol, toffset); pBlockCol->len = flen; tcol++; } else { diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index e6815083371693a5552372bcc792e14213ae3456..cbff4fbeaa6907c83b5836d6039cbfc23a62168b 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -227,6 +227,7 @@ void *tsdbFreeFS(STsdbFS *pfs) { pfs->metaCache = NULL; pfs->cstatus = tsdbFreeFSStatus(pfs->cstatus); pthread_rwlock_destroy(&(pfs->lock)); + free(pfs); } return NULL; @@ -395,7 +396,7 @@ static int tsdbSaveFSStatus(SFSStatus *pStatus, int vid) { } (void)close(fd); - (void)rename(tfname, cfname); + (void)taosRename(tfname, cfname); taosTZfree(pBuf); return 0; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 69a35b3d7832a31528657df71d5c169f4a524974..8969f61596b33f37cabe934fdd56819b71575315 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -597,7 +597,7 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) { // Get the data in row ASSERT(pTable->lastRow == NULL); STSchema *pSchema = tsdbGetTableSchema(pTable); - pTable->lastRow = taosTMalloc(schemaTLen(pSchema)); + pTable->lastRow = taosTMalloc(dataRowMaxBytesFromSchema(pSchema)); if (pTable->lastRow == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyReadH(&readh); diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 73a127079920bcdf0cdc2d3e244998310c4d8dbc..6818f2ed14c278bd5d203d0570f52772c159dcde 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -124,17 +124,66 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { return 0; } -int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) { +int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem, SArray *pATable) { + SMemTable *tmem; + + // Get snap object if (tsdbLockRepo(pRepo) < 0) return -1; - *pMem = pRepo->mem; + tmem = pRepo->mem; *pIMem = pRepo->imem; - tsdbRefMemTable(pRepo, *pMem); + tsdbRefMemTable(pRepo, tmem); tsdbRefMemTable(pRepo, *pIMem); if (tsdbUnlockRepo(pRepo) < 0) return -1; - if (*pMem != NULL) taosRLockLatch(&((*pMem)->latch)); + // Copy mem objects and ref needed STableData + if (tmem) { + taosRLockLatch(&(tmem->latch)); + + *pMem = (SMemTable *)calloc(1, sizeof(**pMem)); + if (*pMem == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + taosRUnLockLatch(&(tmem->latch)); + tsdbUnRefMemTable(pRepo, tmem); + tsdbUnRefMemTable(pRepo, *pIMem); + *pMem = NULL; + *pIMem = NULL; + return -1; + } + + (*pMem)->tData = (STableData **)calloc(tmem->maxTables, sizeof(STableData *)); + if ((*pMem)->tData == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + taosRUnLockLatch(&(tmem->latch)); + free(*pMem); + tsdbUnRefMemTable(pRepo, tmem); + tsdbUnRefMemTable(pRepo, *pIMem); + *pMem = NULL; + *pIMem = NULL; + return -1; + } + + (*pMem)->keyFirst = tmem->keyFirst; + (*pMem)->keyLast = tmem->keyLast; + (*pMem)->numOfRows = tmem->numOfRows; + (*pMem)->maxTables = tmem->maxTables; + + for (size_t i = 0; i < taosArrayGetSize(pATable); i++) { + STable * pTable = *(STable **)taosArrayGet(pATable, i); + int32_t tid = TABLE_TID(pTable); + STableData *pTableData = (tid < tmem->maxTables) ? tmem->tData[tid] : NULL; + + if ((pTableData == NULL) || (TABLE_UID(pTable) != pTableData->uid)) continue; + + (*pMem)->tData[tid] = tmem->tData[tid]; + T_REF_INC(tmem->tData[tid]); + } + + taosRUnLockLatch(&(tmem->latch)); + } + + tsdbUnRefMemTable(pRepo, tmem); tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem); return 0; @@ -144,8 +193,14 @@ void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); if (pMem != NULL) { - taosRUnLockLatch(&(pMem->latch)); - tsdbUnRefMemTable(pRepo, pMem); + for (size_t i = 0; i < pMem->maxTables; i++) { + STableData *pTableData = pMem->tData[i]; + if (pTableData) { + tsdbFreeTableData(pTableData); + } + } + free(pMem->tData); + free(pMem); } if (pIMem != NULL) { @@ -436,7 +491,7 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) { STableData *pTableData = (STableData *)calloc(1, sizeof(*pTableData)); if (pTableData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + return NULL; } pTableData->uid = TABLE_UID(pTable); @@ -449,20 +504,22 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) { tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey); if (pTableData->pData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + free(pTableData); + return NULL; } - return pTableData; + T_REF_INC(pTableData); -_err: - tsdbFreeTableData(pTableData); - return NULL; + return pTableData; } static void tsdbFreeTableData(STableData *pTableData) { if (pTableData) { - tSkipListDestroy(pTableData->pData); - free(pTableData); + int32_t ref = T_REF_DEC(pTableData); + if (ref == 0) { + tSkipListDestroy(pTableData->pData); + free(pTableData); + } } } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 9b407dae484a5540fa4131e61428613c256f1cdf..5e2e0fce1d45dc8ffceb8c92a2475df5b4da0ad3 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -50,7 +50,8 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { STsdbMeta *pMeta = pRepo->tsdbMeta; STable * super = NULL; STable * table = NULL; - int newSuper = 0; + bool newSuper = false; + bool superChanged = false; int tid = pCfg->tableId.tid; STable * pTable = NULL; @@ -85,7 +86,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { if (pCfg->type == TSDB_CHILD_TABLE) { super = tsdbGetTableByUid(pMeta, pCfg->superUid); if (super == NULL) { // super table not exists, try to create it - newSuper = 1; + newSuper = true; super = tsdbCreateTableFromCfg(pCfg, true); if (super == NULL) goto _err; } else { @@ -93,6 +94,17 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO; goto _err; } + + if (schemaVersion(pCfg->tagSchema) > schemaVersion(super->tagSchema)) { + // tag schema out of date, need to update super table tag version + STSchema *pOldSchema = super->tagSchema; + TSDB_WLOCK_TABLE(super); + super->tagSchema = tdDupSchema(pCfg->tagSchema); + TSDB_WUNLOCK_TABLE(super); + tdFreeSchema(pOldSchema); + + superChanged = true; + } } } @@ -117,7 +129,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { // TODO: refactor duplicate codes int tlen = 0; void *pBuf = NULL; - if (newSuper) { + if (newSuper || superChanged) { tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super); pBuf = tsdbAllocBytes(pRepo, tlen); if (pBuf == NULL) goto _err; @@ -562,12 +574,13 @@ void tsdbRefTable(STable *pTable) { } void tsdbUnRefTable(STable *pTable) { - int32_t ref = T_REF_DEC(pTable); - tsdbDebug("unref table %s uid:%"PRIu64" tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); + uint64_t uid = TABLE_UID(pTable); + int32_t tid = TABLE_TID(pTable); + int32_t ref = T_REF_DEC(pTable); - if (ref == 0) { - // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); + tsdbDebug("unref table, uid:%" PRIu64 " tid:%d, refCount:%d", uid, tid, ref); + if (ref == 0) { if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) { tsdbUnRefTable(pTable->pSuper); } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 498122f0fe08cfe20bd4a91dafe220163b9b4334..5ac2ab2ca6283f6588a9cf0ffee98b68cf4426bd 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -187,13 +187,15 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS return pLocalIdList; } -static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { +static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle, SArray* psTable) { assert(pQueryHandle != NULL && pQueryHandle->pMemRef != NULL); SMemRef* pMemRef = pQueryHandle->pMemRef; if (pQueryHandle->pMemRef->ref++ == 0) { - tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem)); + tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem), psTable); } + + taosArrayDestroy(psTable); } static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { @@ -242,7 +244,7 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) { return rows; } -static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta) { +static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta, SArray** psTable) { size_t sizeOfGroup = taosArrayGetSize(pGroupList->pGroupList); assert(sizeOfGroup >= 1 && pMeta != NULL); @@ -252,6 +254,12 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa return NULL; } + SArray* pTable = taosArrayInit(4, sizeof(STable*)); + if (pTable == NULL) { + taosArrayDestroy(pTableCheckInfo); + return NULL; + } + // todo apply the lastkey of table check to avoid to load header file for (int32_t i = 0; i < sizeOfGroup; ++i) { SArray* group = *(SArray**) taosArrayGet(pGroupList->pGroupList, i); @@ -284,6 +292,17 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa } taosArraySort(pTableCheckInfo, tsdbCheckInfoCompar); + + size_t gsize = taosArrayGetSize(pTableCheckInfo); + + for (int32_t i = 0; i < gsize; ++i) { + STableCheckInfo* pInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, i); + + taosArrayPush(pTable, &pInfo->pTableObj); + } + + *psTable = pTable; + return pTableCheckInfo; } @@ -307,21 +326,26 @@ static void resetCheckInfo(STsdbQueryHandle* pQueryHandle) { } } -static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey) { +static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey, SArray** psTable) { size_t si = taosArrayGetSize(pTableCheckInfo); SArray* pNew = taosArrayInit(si, sizeof(STableCheckInfo)); if (pNew == NULL) { return NULL; } + SArray* pTable = taosArrayInit(si, sizeof(STable*)); + for (int32_t j = 0; j < si; ++j) { STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pTableCheckInfo, j); STableCheckInfo info = { .lastKey = skey, .pTableObj = pCheckInfo->pTableObj}; info.tableId = pCheckInfo->tableId; taosArrayPush(pNew, &info); + taosArrayPush(pTable, &pCheckInfo->pTableObj); } + *psTable = pTable; + // it is ordered already, no need to sort again. taosArraySort(pNew, tsdbCheckInfoCompar); return pNew; @@ -337,7 +361,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - pQueryHandle->cur.fid = -1; + pQueryHandle->cur.fid = INT32_MIN; pQueryHandle->cur.win = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = true; pQueryHandle->activeIndex = 0; // current active table index @@ -352,10 +376,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC goto out_of_memory; } - tsdbMayTakeMemSnapshot(pQueryHandle); - - // In case of data block info retrieval, the pCond->numOfCols is 0. - assert(pCond != NULL && pCond->numOfCols >= 0 && pMemRef != NULL); + //tsdbMayTakeMemSnapshot(pQueryHandle); + assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL); if (ASCENDING_TRAVERSE(pCond->order)) { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); @@ -417,14 +439,18 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); + SArray* psTable = NULL; + // todo apply the lastkey of table check to avoid to load header file - pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta); + pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &psTable); if (pQueryHandle->pTableCheckInfo == NULL) { tsdbCleanupQueryHandle(pQueryHandle); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } + tsdbMayTakeMemSnapshot(pQueryHandle, psTable); + tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); return (TsdbQueryHandleT) pQueryHandle; } @@ -482,8 +508,9 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo); + SArray* pTable = NULL; STsdbMeta* pMeta = tsdbGetMeta(pQueryHandle->pTsdb); - pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta); + pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &pTable); if (pQueryHandle->pTableCheckInfo == NULL) { tsdbCleanupQueryHandle(pQueryHandle); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -566,16 +593,18 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { pMem = pMemT->tData[pCheckInfo->tableId.tid]; if (pMem != NULL && pMem->uid == pCheckInfo->tableId.uid) { // check uid + TKEY tLastKey = keyToTkey(pCheckInfo->lastKey); pCheckInfo->iter = - tSkipListCreateIterFromVal(pMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pMem->pData, (const char*)&tLastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } if (pIMemT && pCheckInfo->tableId.tid < pIMemT->maxTables) { pIMem = pIMemT->tData[pCheckInfo->tableId.tid]; if (pIMem != NULL && pIMem->uid == pCheckInfo->tableId.uid) { // check uid + TKEY tLastKey = keyToTkey(pCheckInfo->lastKey); pCheckInfo->iiter = - tSkipListCreateIterFromVal(pIMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pIMem->pData, (const char*)&tLastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } @@ -740,7 +769,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { STsdbCfg *pCfg = &pHandle->pTsdb->config; size_t size = taosArrayGetSize(pHandle->pTableCheckInfo); assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1); - pHandle->cur.fid = -1; + pHandle->cur.fid = INT32_MIN; STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex); @@ -1205,7 +1234,12 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity if (pColInfo->info.colId == src->colId) { - if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { + if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + for (int32_t n = 0; n < num; n++) { + TKEY tkey = *(TKEY *)((char*)src->pData + bytes * start + n * sizeof(TKEY)); + *(TSKEY *)(pData + n * sizeof(TSKEY)) = tdGetKey(tkey); + } + } else if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { memmove(pData, (char*)src->pData + bytes * start, bytes * num); } else { // handle the var-string char* dst = pData; @@ -1313,7 +1347,6 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_UBIGINT: - case TSDB_DATA_TYPE_TIMESTAMP: *(uint64_t *)pData = *(uint64_t *)value; break; case TSDB_DATA_TYPE_FLOAT: @@ -1322,6 +1355,9 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, case TSDB_DATA_TYPE_DOUBLE: SET_DOUBLE_PTR(pData, value); break; + case TSDB_DATA_TYPE_TIMESTAMP: + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + break; default: memcpy(pData, value, pColInfo->info.bytes); } @@ -1981,7 +2017,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist assert(pQueryHandle->pFileGroup == NULL); } - cur->fid = -1; // denote that there are no data in file anymore + cur->fid = INT32_MIN; // denote that there are no data in file anymore *exists = false; return code; } @@ -2467,12 +2503,18 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef); tfree(cond.colList); - pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey); + + SArray* psTable = NULL; + + pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey, &psTable); if (pSecQueryHandle->pTableCheckInfo == NULL) { terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; goto out_of_memory; } + + tsdbMayTakeMemSnapshot(pSecQueryHandle, psTable); + if (!tsdbNextDataBlock((void*)pSecQueryHandle)) { // no result in current query, free the corresponding result rows structure if (type == TSDB_PREV_ROW) { @@ -2662,7 +2704,7 @@ void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* p STable* pTable = NULL; // there are data in file - if (pHandle->cur.fid >= 0) { + if (pHandle->cur.fid != INT32_MIN) { STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot]; pTable = pBlockInfo->pTableCheckInfo->pTableObj; } else { @@ -2746,7 +2788,7 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { */ STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle; - if (pHandle->cur.fid < 0) { + if (pHandle->cur.fid == INT32_MIN) { return pHandle->pColumns; } else { STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot]; diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 312f1f9b20516ffd97d674f993ddc96d9fe725b4..572706d45e6a085f44ffd0a089dc28256255e33e 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -466,14 +466,14 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat continue; } - int16_t tcolId = 0; - int32_t toffset = TSDB_KEY_COL_OFFSET; - int32_t tlen = pBlock->keyLen; + int16_t tcolId = 0; + uint32_t toffset = TSDB_KEY_COL_OFFSET; + int32_t tlen = pBlock->keyLen; if (dcol != 0) { SBlockCol *pBlockCol = &(pBlockData->cols[ccol]); tcolId = pBlockCol->colId; - toffset = pBlockCol->offset; + toffset = tsdbGetBlockColOffset(pBlockCol); tlen = pBlockCol->len; } else { ASSERT(pDataCol->colId == tcolId); @@ -488,7 +488,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat if (tsdbCheckAndDecodeColumnData(pDataCol, POINTER_SHIFT(pBlockData, tsize + toffset), tlen, pBlock->algorithm, pBlock->numOfRows, pDataCols->maxPoints, TSDB_READ_COMP_BUF(pReadh), (int)taosTSizeof(TSDB_READ_COMP_BUF(pReadh))) < 0) { - tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %d", + tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tcolId, (int64_t)pBlock->offset, toffset); return -1; } @@ -627,7 +627,7 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc if (tsdbMakeRoom((void **)(&TSDB_READ_BUF(pReadh)), pBlockCol->len) < 0) return -1; if (tsdbMakeRoom((void **)(&TSDB_READ_COMP_BUF(pReadh)), tsize) < 0) return -1; - int64_t offset = pBlock->offset + TSDB_BLOCK_STATIS_SIZE(pBlock->numOfCols) + pBlockCol->offset; + int64_t offset = pBlock->offset + TSDB_BLOCK_STATIS_SIZE(pBlock->numOfCols) + tsdbGetBlockColOffset(pBlockCol); if (tsdbSeekDFile(pDFile, offset, SEEK_SET) < 0) { tsdbError("vgId:%d failed to load block column data while seek file %s to offset %" PRId64 " since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, tstrerror(terrno)); diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c index 88ab973f5ed85589996b2a90018036a51df48897..6b8483e4a66df75db529b05db9a29e800e01427a 100644 --- a/src/tsdb/src/tsdbSync.c +++ b/src/tsdb/src/tsdbSync.c @@ -549,7 +549,7 @@ static int32_t tsdbSyncSendDFileSet(SSyncH *pSynch, SDFileSet *pSet) { bool toSend = false; if (tsdbSendDFileSetInfo(pSynch, pSet) < 0) { - tsdbError("vgId:%d, failed to send fileset:%d info since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno)); + tsdbError("vgId:%d, failed to send fileset:%d info since %s", REPO_ID(pRepo), pSet ? pSet->fid : -1, tstrerror(terrno)); return -1; } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 80e874ad92cebc267460c1e59e494fa52f004ced..a7f4f59e07021b659707c59a0c9b2ef916558d52 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 9923409885fad30cbadd9354349075708b1a7fda..fdb2595fd8d3b1659800ca3a5b88c32b6fe95e93 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -44,6 +44,7 @@ enum { TAOS_CFG_VTYPE_INT8, TAOS_CFG_VTYPE_INT16, TAOS_CFG_VTYPE_INT32, + TAOS_CFG_VTYPE_UINT16, TAOS_CFG_VTYPE_FLOAT, TAOS_CFG_VTYPE_STRING, TAOS_CFG_VTYPE_IPSTR, diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index b0d4ecd075d33024526a7121368440dd6ab1ce5e..4d18ef14e2a1985259b11ff65391616c9d4706b2 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -297,32 +297,14 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { __compar_fn_t comparFn = NULL; switch (type) { - case TSDB_DATA_TYPE_SMALLINT: { - comparFn = compareInt16Val; break; - } - - case TSDB_DATA_TYPE_INT: { - comparFn = compareInt32Val; break; - } - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_TIMESTAMP: { - comparFn = compareInt64Val; break; - } - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT:{ - comparFn = compareInt8Val; break; - } - - case TSDB_DATA_TYPE_FLOAT: { - comparFn = compareFloatVal; break; - } - - case TSDB_DATA_TYPE_DOUBLE: { - comparFn = compareDoubleVal; break; - } - + case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break; + case TSDB_DATA_TYPE_SMALLINT: comparFn = compareInt16Val; break; + case TSDB_DATA_TYPE_INT: comparFn = compareInt32Val; break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: comparFn = compareInt64Val; break; + case TSDB_DATA_TYPE_FLOAT: comparFn = compareFloatVal; break; + case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break; case TSDB_DATA_TYPE_BINARY: { if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = compareStrPatternComp; @@ -341,10 +323,14 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else { comparFn = compareLenPrefixedWStr; } - break; } - + + case TSDB_DATA_TYPE_UTINYINT: comparFn = compareUint8Val; break; + case TSDB_DATA_TYPE_USMALLINT: comparFn = compareUint16Val;break; + case TSDB_DATA_TYPE_UINT: comparFn = compareUint32Val;break; + case TSDB_DATA_TYPE_UBIGINT: comparFn = compareUint64Val;break; + default: comparFn = compareInt32Val; break; @@ -406,8 +392,8 @@ __compar_fn_t getKeyComparFunc(int32_t keyType) { int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { switch (type) { case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2)); - case TSDB_DATA_TYPE_DOUBLE: DEFAULT_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); - case TSDB_DATA_TYPE_FLOAT: DEFAULT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); + case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); + case TSDB_DATA_TYPE_FLOAT: DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2)); case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2)); case TSDB_DATA_TYPE_TINYINT: diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 7a92750f8f2e89a93557d367b0c4772a2faaa57d..c4bd57760222ac3da7d25510cc2f434fe0cf0cac 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -95,6 +95,23 @@ static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) { } } +static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) { + int32_t value = atoi(input_value); + uint16_t *option = (uint16_t *)cfg->ptr; + if (value < cfg->minValue || value > cfg->maxValue) { + uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { + *option = (uint16_t)value; + cfg->cfgStatus = TAOS_CFG_CSTATUS_FILE; + } else { + uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); int8_t *option = (int8_t *)cfg->ptr; @@ -239,6 +256,9 @@ static void taosReadConfigOption(const char *option, char *value, char *value2, case TAOS_CFG_VTYPE_INT32: taosReadInt32Config(cfg, value); break; + case TAOS_CFG_VTYPE_UINT16: + taosReadUInt16Config(cfg, value); + break; case TAOS_CFG_VTYPE_FLOAT: taosReadFloatConfig(cfg, value); break; @@ -422,6 +442,9 @@ void taosPrintGlobalCfg() { case TAOS_CFG_VTYPE_INT32: uInfo(" %s:%s%d%s", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); break; + case TAOS_CFG_VTYPE_UINT16: + uInfo(" %s:%s%d%s", cfg->option, blank, *((uint16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; case TAOS_CFG_VTYPE_FLOAT: uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); break; @@ -459,6 +482,9 @@ static void taosDumpCfg(SGlobalCfg *cfg) { case TAOS_CFG_VTYPE_INT32: printf(" %s:%s%d%s\n", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); break; + case TAOS_CFG_VTYPE_UINT16: + printf(" %s:%s%d%s\n", cfg->option, blank, *((uint16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; case TAOS_CFG_VTYPE_FLOAT: printf(" %s:%s%f%s\n", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); break; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 1eb6893cbc639be013104fe636f6af373a61b3c3..4a011b7cc7407acabe249b577280f7bbda58f79d 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -24,17 +24,416 @@ typedef struct { int32_t val; const char* str; } STaosError; - #include "os.h" #include "taoserror.h" - static threadlocal int32_t tsErrno; int32_t* taosGetErrno() { return &tsErrno; } +#ifdef TAOS_ERROR_C +#define TAOS_DEFINE_ERROR(name, msg) {.val = (name), .str=(msg)}, +#else +#define TAOS_DEFINE_ERROR(name, mod, code, msg) static const int32_t name = TAOS_DEF_ERROR_CODE(mod, code); +#endif + +#define TAOS_SYSTEM_ERROR(code) (0x80ff0000 | (code)) +#define TAOS_SUCCEEDED(err) ((err) >= 0) +#define TAOS_FAILED(err) ((err) < 0) + +#ifdef TAOS_ERROR_C +STaosError errors[] = { + {.val = 0, .str = "success"}, +#endif + +// rpc +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_ACTION_IN_PROGRESS, "Action in progress") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_REQUIRED, "Authentication required") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, "Authentication failure") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_REDIRECT, "Redirect") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NOT_READY, "System not ready") // peer is not ready to process data +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_ALREADY_PROCESSED, "Message already processed") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED, "Last session not finished") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MISMATCHED_LINK_ID, "Mismatched meter id") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TOO_SLOW, "Processing of request timed out") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "Number of sessions reached limit") // too many sessions +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, "Unable to establish connection") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_APP_ERROR, "Unexpected generic error in RPC") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_UNEXPECTED_RESPONSE, "Unexpected response") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VALUE, "Invalid value") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TRAN_ID, "Invalid transaction id") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_SESSION_ID, "Invalid session id") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_MSG_TYPE, "Invalid message type") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_RESPONSE_TYPE, "Invalid response type") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, "Client and server's time is not synchronized") +TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, "Database not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VERSION, "Invalid app version") + +//common & util +TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, "Operation not supported") +TAOS_DEFINE_ERROR(TSDB_CODE_COM_MEMORY_CORRUPTED, "Memory corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_COM_OUT_OF_MEMORY, "Out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_COM_INVALID_CFG_MSG, "Invalid config message") +TAOS_DEFINE_ERROR(TSDB_CODE_COM_FILE_CORRUPTED, "Data file corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ID_REMOVED, "Ref ID is removed") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_INVALID_ID, "Invalid Ref ID") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ALREADY_EXIST, "Ref is already there") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NOT_EXIST, "Ref is not there") + +//client +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_SQL, "Invalid SQL statement") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_QHANDLE, "Invalid qhandle") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TIME_STAMP, "Invalid combination of client/service time") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_VALUE, "Invalid value in client") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_VERSION, "Invalid client version") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_IE, "Invalid client ie") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_FQDN, "Invalid host name") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_USER_LENGTH, "Invalid user name") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PASS_LENGTH, "Invalid password") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_DB_LENGTH, "Database name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH, "Table name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_CONNECTION, "Invalid connection") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_OUT_OF_MEMORY, "System out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_DISKSPACE, "System out of disk space") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_CACHE_ERASED, "Query cache erased") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_CANCELLED, "Query terminated") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SORTED_RES_TOO_MANY, "Result set too large to be sorted") // too many result for ordered super table projection query +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_APP_ERROR, "Application error") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, "Action in progress") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, "Disconnected from service") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, "No write permission") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config") + +// mnode +TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_IN_PROGRESS, "Message is progressing") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_NEED_REPROCESSED, "Messag need to be reprocessed") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_RIGHTS, "Insufficient privilege for operation") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_APP_ERROR, "Unexpected generic error in mnode") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONNECTION, "Invalid message connection") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_VERSION, "Incompatible protocol version") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_LEN, "Invalid message length") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_MSG_TYPE, "Invalid message type") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_SHELL_CONNS, "Too many connections") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_OUT_OF_MEMORY, "Out of memory in mnode") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, "Data expired") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, "Invalid query id") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_ID, "Invalid stream id") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, "Invalid connection id") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_IS_RUNNING, "mnode is alreay running") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC, "failed to config sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_START_SYNC, "failed to start sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CREATE_DIR, "failed to create mnode dir") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_INIT_STEP, "failed to init components") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE, "Object already there") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_ERROR, "Unexpected generic error in sdb") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE, "Invalid table type") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, "Object not there") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, "Invalid meta row") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, "Invalid key type") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, "DNode already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, "DNode does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, "VGroup does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, "Master DNode cannot be removed") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, "Out of DNodes") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, "Cluster cfg inconsistent") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, "Invalid dnode cfg option") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, "Balance already enabled") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, "Vgroup not in dnode") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, "Vgroup already in dnode") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, "Dnode not avaliable") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, "Cluster id not match") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_READY, "Cluster not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED, "Dnode Id not configured") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED, "Dnode Ep not configured") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, "Account already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, "Invalid account") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, "Invalid account options") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_EXPIRED, "Account authorization has expired") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, "User already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, "Invalid user") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER_FORMAT, "Invalid user format") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_PASS_FORMAT, "Invalid password format") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_USER_FROM_CONN, "Can not get user from conn") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_USERS, "Too many users") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TABLE_ALREADY_EXIST, "Table already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table +TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_ALREAY_EXIST, "Tag already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TAG_NOT_EXIST, "Tag does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, "Field already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, "Field does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, "Super table does not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG, "Invalid create table message") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, "Database not specified or available") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, "Database already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, "Invalid database options") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB, "Invalid database name") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_MONITOR_DB_FORBIDDEN, "Cannot delete monitor database") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DATABASES, "Too many databases for account") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, "Database not available") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, "Database unsynced") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_DAYS, "Invalid database option: days out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, "Invalid database option: keep >= keep1 >= keep0 >= days") + +// dnode +TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, "Message not processed") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, "Dnode out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, "No permission for disk files in dnode") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message length") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories") + +// vnode +TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_MSG_NOT_PROCESSED, "Message not processed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_NEED_REPROCESSED, "Action need to be reprocessed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Invalid Vgroup ID") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INIT_FAILED, "Vnode initialization failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISKSPACE, "System out of disk space") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, "No write permission for disk files") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, "Missing data file") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, "Out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, "Unexpected generic error in vnode") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, "Invalid version file") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is full for commit failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing") + +// tsdb +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_TYPE, "Invalid table type") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION, "Invalid table schema version") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_ALREADY_EXIST, "Table already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CONFIG, "Invalid configuration") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INIT_FAILED, "Tsdb init failed") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISKSPACE, "No diskspace for tsdb") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISK_PERMISSIONS, "No permission for disk files") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_CORRUPTED, "Data file(s) corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_OUT_OF_MEMORY, "Out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE, "Tag too old") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE, "Timestamp data out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP, "Submit message is messed up") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, "Invalid operation") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, "Invalid creation of table") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, "No table data in memory skiplist") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, "File already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, "Need to reconfigure table") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, "Invalid information to create table") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, "No available disk") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") + +// query +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_MSG, "Invalid message") // failed to validate the sql expression msg by vnode +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NO_DISKSPACE, "No diskspace for query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_OUT_OF_MEMORY, "System out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_APP_ERROR, "Unexpected generic error in query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUP_JOIN_KEY, "Duplicated join key") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, "Tag conditon too many") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, "Query not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, "Query should response") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, "Multiple retrieval of this query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, "Too many time window in query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, "Query buffer limit has reached") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, "File inconsistance in replica") + + +// grant +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_DNODE_LIMITED, "DNode creation limited by licence") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_ACCT_LIMITED, "Account creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_TIMESERIES_LIMITED, "Table creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_DB_LIMITED, "DB creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_USER_LIMITED, "User creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CONN_LIMITED, "Conn creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_STREAM_LIMITED, "Stream creation limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_SPEED_LIMITED, "Write speed limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_STORAGE_LIMITED, "Storage capacity limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_QUERYTIME_LIMITED, "Query time limited by license") +TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, "CPU cores limited by license") + +// sync +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CONFIG, "Invalid Sync Configuration") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, "Sync module not enabled") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_VERSION, "Invalid Sync version") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_CONFIRM_EXPIRED, "Sync confirm expired") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TOO_MANY_FWDINFO, "Too many sync fwd infos") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_PROTOCOL, "Mismatched protocol") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_CLUSTERID, "Mismatched clusterId") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_SIGNATURE, "Mismatched signature") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CHECKSUM, "Invalid msg checksum") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGLEN, "Invalid msg length") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") + +// wal +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") + +// http +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_UNSUPPORT_URL, "url is not support") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_URL, "invalid url format") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_ENOUGH_MEMORY, "no enough memory") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUSET_TOO_BIG, "request size is too big") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_AUTH_INFO, "no auth info input") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_MSG_INPUT, "request is empty") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_SQL_INPUT, "no sql input") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_EXEC_USEDB, "no need to execute use db cmd") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SESSION_FULL, "session list was full") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR, "generate taosd token error") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_MULTI_REQUEST, "size of multi request is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_CREATE_GZIP_FAILED, "failed to create gzip") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_FINISH_GZIP_FAILED, "failed to finish gzip") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_LOGIN_FAILED, "failed to login") + +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_VERSION, "invalid http version") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH, "invalid content length") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_TYPE, "invalid type of Authorization") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_FORMAT, "invalid format of Authorization") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_BASIC_AUTH, "invalid basic Authorization") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_TAOSD_AUTH, "invalid taosd Authorization") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_METHOD_FAILED, "failed to parse method") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_TARGET_FAILED, "failed to parse target") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_VERSION_FAILED, "failed to parse http version") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_SP_FAILED, "failed to parse sp") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_STATUS_FAILED, "failed to parse status") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_PHRASE_FAILED, "failed to parse phrase") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CRLF_FAILED, "failed to parse crlf") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_FAILED, "failed to parse header") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED, "failed to parse header key") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED, "failed to parse header val") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED, "failed to parse chunk size") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_FAILED, "failed to parse chunk") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_END_FAILED, "failed to parse end section") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_INVALID_STATE, "invalid parse state") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_ERROR_STATE, "failed to parse error section") + +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_NULL, "query size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_SIZE, "query size can not more than 100") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR, "parse grafana json error") + +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_NOT_INPUT, "database name can not be null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_TOO_LONG, "database name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_INVALID_JSON, "invalid telegraf json fromat") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_NULL, "metrics size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_SIZE, "metrics size can not more than 1K") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NULL, "metric name not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_TYPE, "metric name type should be string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_NULL, "metric name length is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_LONG, "metric name length too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_NULL, "timestamp not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE, "timestamp type should be integer") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL, "timestamp value smaller than 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_NULL, "tags not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_0, "tags size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG, "tags size too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NULL, "tag is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_NULL, "tag name is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_SIZE, "tag name length too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE, "tag value type should be number or string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_NULL, "tag value is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_NULL, "table is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_SIZE, "table name length too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_NULL, "fields not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_0, "fields size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG, "fields size too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NULL, "field is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_NULL, "field name is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE, "field name length too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE, "field value type should be number or string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL, "field value is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_HOST_NOT_STRING, "host type should be string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST, "stable not exist") + +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_NOT_INPUT, "database name can not be null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_TOO_LONG, "database name too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_INVALID_JSON, "invalid opentsdb json fromat") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_NULL, "metrics size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_SIZE, "metrics size can not more than 10K") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NULL, "metric name not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_TYPE, "metric name type should be string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_NULL, "metric name length is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_LONG, "metric name length can not more than 22") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_NULL, "timestamp not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE, "timestamp type should be integer") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL, "timestamp value smaller than 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_NULL, "tags not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_0, "tags size is 0") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG, "tags size too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NULL, "tag is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_NULL, "tag name is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_SIZE, "tag name length too long") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE, "tag value type should be boolean, number or string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_NULL, "tag value is null") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, "tag value can not more than 64") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, "value not find") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, "value type should be boolean, number or string") + +// odbc +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, "out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, "convertion not a valid literal input") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_UNDEF, "convertion undefined") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC_FRAC, "convertion fractional truncated") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, "convertion truncated") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, "convertion not supported") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_OOR, "convertion numeric value out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, "out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, "not supported yet") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, "invalid handle") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, "no result set") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, "no fields returned") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, "invalid cursor") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, "statement not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still busy") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, "bad connection string") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, "bad argument") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_VALID_TS, "not a valid timestamp") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE, "src too large") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, "src bad sequence") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, "src incomplete") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_GENERAL, "src general") + +// tfs +TAOS_DEFINE_ERROR(TSDB_CODE_FS_OUT_OF_MEMORY, "tfs out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_CFG, "tfs invalid mount config") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_TOO_MANY_MOUNT, "tfs too many mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_DUP_PRIMARY, "tfs duplicate primary mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_PRIMARY_DISK, "tfs no primary mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_MOUNT_AT_TIER, "tfs no mount at tier") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_FILE_ALREADY_EXISTS, "tfs file already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_LEVEL, "tfs invalid level") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_VALID_DISK, "tfs no valid disk") + +#ifdef TAOS_ERROR_C +}; +#endif static int tsCompareTaosError(const void* a, const void* b) { const STaosError* x = (const STaosError*)a; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index e0fe51e22a6405273cbb60fbfa04d3a5956105b0..448bb3ecd1bfa8ef17bd7c87a464c095e1093d6d 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -28,10 +28,13 @@ #define MAX_LOGLINE_DUMP_CONTENT_SIZE (MAX_LOGLINE_DUMP_SIZE - 100) #define LOG_FILE_NAME_LEN 300 -#define TSDB_DEFAULT_LOG_BUF_SIZE (512 * 1024) // 512K -#define TSDB_MIN_LOG_BUF_SIZE 1024 // 1K -#define TSDB_MAX_LOG_BUF_SIZE (1024 * 1024) // 1M -#define TSDB_DEFAULT_LOG_BUF_UNIT 1024 // 1K +#define TSDB_DEFAULT_LOG_BUF_SIZE (20 * 1024 * 1024) // 20MB + +#define DEFAULT_LOG_INTERVAL 25 +#define LOG_INTERVAL_STEP 5 +#define MIN_LOG_INTERVAL 5 +#define MAX_LOG_INTERVAL 25 +#define LOG_MAX_WAIT_MSEC 1000 #define LOG_BUF_BUFFER(x) ((x)->buffer) #define LOG_BUF_START(x) ((x)->buffStart) @@ -44,6 +47,7 @@ typedef struct { int32_t buffStart; int32_t buffEnd; int32_t buffSize; + int32_t minBuffSize; int32_t fd; int32_t stop; pthread_t asyncThread; @@ -68,6 +72,15 @@ int8_t tsAsyncLog = 1; float tsTotalLogDirGB = 0; float tsAvailLogDirGB = 0; float tsMinimalLogDirGB = 1.0f; +int64_t asyncLogLostLines = 0; +int32_t writeInterval = DEFAULT_LOG_INTERVAL; + +int64_t dbgEmptyW = 0; +int64_t dbgWN = 0; +int64_t dbgSmallWN = 0; +int64_t dbgBigWN = 0; +int64_t dbgWSize = 0; + #ifdef _TD_POWER_ char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; #else @@ -108,7 +121,8 @@ static void taosStopLog() { void taosCloseLog() { taosStopLog(); - tsem_post(&(tsLogObj.logHandle->buffNotEmpty)); + //tsem_post(&(tsLogObj.logHandle->buffNotEmpty)); + taosMsleep(MAX_LOG_INTERVAL/1000); if (taosCheckPthreadValid(tsLogObj.logHandle->asyncThread)) { pthread_join(tsLogObj.logHandle->asyncThread, NULL); } @@ -171,7 +185,9 @@ static void *taosThreadToOpenNewFile(void *param) { int32_t fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); if (fd < 0) { - uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); + tsLogObj.openInProgress = 0; + tsLogObj.lines = tsLogObj.maxLines - 1000; + uError("open new log file fail! fd:%d reason:%s, reuse lastlog", fd, strerror(errno)); return NULL; } @@ -363,7 +379,7 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId()); len += sprintf(buffer + len, "%s", flags); @@ -449,7 +465,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, . curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId()); len += sprintf(buffer + len, "%s", flags); @@ -495,8 +511,6 @@ static void taosCloseLogByFd(int32_t fd) { static SLogBuff *taosLogBuffNew(int32_t bufSize) { SLogBuff *tLogBuff = NULL; - if (bufSize < TSDB_MIN_LOG_BUF_SIZE || bufSize > TSDB_MAX_LOG_BUF_SIZE) return NULL; - tLogBuff = calloc(1, sizeof(SLogBuff)); if (tLogBuff == NULL) return NULL; @@ -505,10 +519,11 @@ static SLogBuff *taosLogBuffNew(int32_t bufSize) { LOG_BUF_START(tLogBuff) = LOG_BUF_END(tLogBuff) = 0; LOG_BUF_SIZE(tLogBuff) = bufSize; + tLogBuff->minBuffSize = bufSize / 10; tLogBuff->stop = 0; if (pthread_mutex_init(&LOG_BUF_MUTEX(tLogBuff), NULL) < 0) goto _err; - tsem_init(&(tLogBuff->buffNotEmpty), 0, 0); + //tsem_init(&(tLogBuff->buffNotEmpty), 0, 0); return tLogBuff; @@ -527,10 +542,27 @@ static void taosLogBuffDestroy(SLogBuff *tLogBuff) { } #endif +static void taosCopyLogBuffer(SLogBuff *tLogBuff, int32_t start, int32_t end, char *msg, int32_t msgLen) { + if (start > end) { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); + } else { + if (LOG_BUF_SIZE(tLogBuff) - end < msgLen) { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, LOG_BUF_SIZE(tLogBuff) - end); + memcpy(LOG_BUF_BUFFER(tLogBuff), msg + LOG_BUF_SIZE(tLogBuff) - end, msgLen - LOG_BUF_SIZE(tLogBuff) + end); + } else { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); + } + } + LOG_BUF_END(tLogBuff) = (LOG_BUF_END(tLogBuff) + msgLen) % LOG_BUF_SIZE(tLogBuff); +} + static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen) { int32_t start = 0; int32_t end = 0; int32_t remainSize = 0; + static int64_t lostLine = 0; + char tmpBuf[40] = {0}; + int32_t tmpBufLen = 0; if (tLogBuff == NULL || tLogBuff->stop) return -1; @@ -538,79 +570,128 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen) start = LOG_BUF_START(tLogBuff); end = LOG_BUF_END(tLogBuff); - remainSize = (start > end) ? (end - start - 1) : (start + LOG_BUF_SIZE(tLogBuff) - end - 1); + remainSize = (start > end) ? (start - end - 1) : (start + LOG_BUF_SIZE(tLogBuff) - end - 1); + + if (lostLine > 0) { + sprintf(tmpBuf, "...Lost %"PRId64" lines here...\n", lostLine); + tmpBufLen = (int32_t)strlen(tmpBuf); + } - if (remainSize <= msgLen) { + if (remainSize <= msgLen || ((lostLine > 0) && (remainSize <= (msgLen + tmpBufLen)))) { + lostLine++; + asyncLogLostLines++; pthread_mutex_unlock(&LOG_BUF_MUTEX(tLogBuff)); return -1; } - if (start > end) { - memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); - } else { - if (LOG_BUF_SIZE(tLogBuff) - end < msgLen) { - memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, LOG_BUF_SIZE(tLogBuff) - end); - memcpy(LOG_BUF_BUFFER(tLogBuff), msg + LOG_BUF_SIZE(tLogBuff) - end, msgLen - LOG_BUF_SIZE(tLogBuff) + end); - } else { - memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); - } + if (lostLine > 0) { + taosCopyLogBuffer(tLogBuff, start, end, tmpBuf, tmpBufLen); + lostLine = 0; } - LOG_BUF_END(tLogBuff) = (LOG_BUF_END(tLogBuff) + msgLen) % LOG_BUF_SIZE(tLogBuff); - // TODO : put string in the buffer + taosCopyLogBuffer(tLogBuff, LOG_BUF_START(tLogBuff), LOG_BUF_END(tLogBuff), msg, msgLen); - tsem_post(&(tLogBuff->buffNotEmpty)); + //int32_t w = atomic_sub_fetch_32(&waitLock, 1); + /* + if (w <= 0 || ((remainSize - msgLen - tmpBufLen) < (LOG_BUF_SIZE(tLogBuff) * 4 /5))) { + tsem_post(&(tLogBuff->buffNotEmpty)); + dbgPostN++; + } else { + dbgNoPostN++; + } + */ pthread_mutex_unlock(&LOG_BUF_MUTEX(tLogBuff)); + return 0; } -static int32_t taosPollLogBuffer(SLogBuff *tLogBuff, char *buf, int32_t bufSize) { - int32_t start = LOG_BUF_START(tLogBuff); - int32_t end = LOG_BUF_END(tLogBuff); - int32_t pollSize = 0; +static int32_t taosGetLogRemainSize(SLogBuff *tLogBuff, int32_t start, int32_t end) { + int32_t rSize = end - start; - if (start == end) { - return 0; - } else if (start < end) { - pollSize = MIN(end - start, bufSize); + return rSize >= 0 ? rSize : LOG_BUF_SIZE(tLogBuff) + rSize; +} - memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, pollSize); - return pollSize; - } else { - pollSize = MIN(end + LOG_BUF_SIZE(tLogBuff) - start, bufSize); - if (pollSize > LOG_BUF_SIZE(tLogBuff) - start) { - int32_t tsize = LOG_BUF_SIZE(tLogBuff) - start; - memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, tsize); - memcpy(buf + tsize, LOG_BUF_BUFFER(tLogBuff), pollSize - tsize); +static void taosWriteLog(SLogBuff *tLogBuff) { + static int32_t lastDuration = 0; + int32_t remainChecked = 0; + int32_t start, end, pollSize; + + do { + if (remainChecked == 0) { + start = LOG_BUF_START(tLogBuff); + end = LOG_BUF_END(tLogBuff); + + if (start == end) { + dbgEmptyW++; + writeInterval = MAX_LOG_INTERVAL; + return; + } + pollSize = taosGetLogRemainSize(tLogBuff, start, end); + if (pollSize < tLogBuff->minBuffSize) { + lastDuration += writeInterval; + if (lastDuration < LOG_MAX_WAIT_MSEC) { + break; + } + } + + lastDuration = 0; + } + + if (start < end) { + taosWrite(tLogBuff->fd, LOG_BUF_BUFFER(tLogBuff) + start, pollSize); } else { - memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, pollSize); + int32_t tsize = LOG_BUF_SIZE(tLogBuff) - start; + taosWrite(tLogBuff->fd, LOG_BUF_BUFFER(tLogBuff) + start, tsize); + + taosWrite(tLogBuff->fd, LOG_BUF_BUFFER(tLogBuff), end); } - return pollSize; - } + + dbgWN++; + dbgWSize+=pollSize; + + if (pollSize < tLogBuff->minBuffSize) { + dbgSmallWN++; + if (writeInterval < MAX_LOG_INTERVAL) { + writeInterval += LOG_INTERVAL_STEP; + } + } else if (pollSize > LOG_BUF_SIZE(tLogBuff)/3) { + dbgBigWN++; + writeInterval = MIN_LOG_INTERVAL; + } else if (pollSize > LOG_BUF_SIZE(tLogBuff)/4) { + if (writeInterval > MIN_LOG_INTERVAL) { + writeInterval -= LOG_INTERVAL_STEP; + } + } + + LOG_BUF_START(tLogBuff) = (LOG_BUF_START(tLogBuff) + pollSize) % LOG_BUF_SIZE(tLogBuff); + + start = LOG_BUF_START(tLogBuff); + end = LOG_BUF_END(tLogBuff); + + pollSize = taosGetLogRemainSize(tLogBuff, start, end); + if (pollSize < tLogBuff->minBuffSize) { + break; + } + + writeInterval = MIN_LOG_INTERVAL; + + remainChecked = 1; + }while (1); } static void *taosAsyncOutputLog(void *param) { SLogBuff *tLogBuff = (SLogBuff *)param; - int32_t log_size = 0; - - char tempBuffer[TSDB_DEFAULT_LOG_BUF_UNIT]; - + while (1) { - tsem_wait(&(tLogBuff->buffNotEmpty)); + //tsem_wait(&(tLogBuff->buffNotEmpty)); + + taosMsleep(writeInterval); // Polling the buffer - while (1) { - log_size = taosPollLogBuffer(tLogBuff, tempBuffer, TSDB_DEFAULT_LOG_BUF_UNIT); - if (log_size) { - taosWrite(tLogBuff->fd, tempBuffer, log_size); - LOG_BUF_START(tLogBuff) = (LOG_BUF_START(tLogBuff) + log_size) % LOG_BUF_SIZE(tLogBuff); - } else { - break; - } - } + taosWriteLog(tLogBuff); if (tLogBuff->stop) break; } diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index 56f2322a7bd1e0778e2d52667663b3baf5c08c1f..c6d49dfb3d8242bdf697b3212fde479f01f8bcd9 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -248,7 +248,7 @@ void taosNotePrint(SNoteObj *pNote, const char *const format, ...) { gettimeofday(&timeSecs, NULL); curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId()); va_start(argpointer, format); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 6066d58416e8a5ad905e3b0706f03adc013fa445..0c96ed2a2f3dfb7f03268c9f8fbb1b0afa2397b9 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt index 09c4213a024bfdaf397df39c5e164b6836951a41..3fefbea05ba763dfa856dd52c195d36ce70ccd91 100644 --- a/src/vnode/CMakeLists.txt +++ b/src/vnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt index 6f35cb9ba7e1fc111bd1c2a5ddd797abf334eac6..a89024dab5060b1f18174f769e0d70c00ad00faf 100644 --- a/src/wal/CMakeLists.txt +++ b/src/wal/CMakeLists.txt @@ -1,6 +1,8 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE) + INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 0eda6ff786c9fcc8975bd2317ef304aa98013bfb..aeb49830299eb0dcddfbd39a7a838fdc5d45b081 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -39,7 +39,7 @@ int32_t walRenew(void *handle) { if (tfValid(pWal->tfd)) { tfClose(pWal->tfd); - wDebug("vgId:%d, file:%s, it is closed", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is closed while renew", pWal->vgId, pWal->name); } if (pWal->keep == TAOS_WAL_KEEP) { @@ -56,7 +56,7 @@ int32_t walRenew(void *handle) { code = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); } else { - wDebug("vgId:%d, file:%s, it is created", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is created and open while renew", pWal->vgId, pWal->name); } pthread_mutex_unlock(&pWal->mutex); @@ -95,11 +95,15 @@ void walRemoveAllOldFiles(void *handle) { int64_t fileId = -1; pthread_mutex_lock(&pWal->mutex); + + tfClose(pWal->tfd); + wDebug("vgId:%d, file:%s, it is closed before remove all wals", pWal->vgId, pWal->name); + while (walGetNextFile(pWal, &fileId) >= 0) { snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); if (remove(pWal->name) < 0) { - wError("vgId:%d, wal:%p file:%s, failed to remove", pWal->vgId, pWal, pWal->name); + wError("vgId:%d, wal:%p file:%s, failed to remove since %s", pWal->vgId, pWal, pWal->name, strerror(errno)); } else { wInfo("vgId:%d, wal:%p file:%s, it is removed", pWal->vgId, pWal, pWal->name); } @@ -107,6 +111,28 @@ void walRemoveAllOldFiles(void *handle) { pthread_mutex_unlock(&pWal->mutex); } +#if defined(WAL_CHECKSUM_WHOLE) + +static void walUpdateChecksum(SWalHead *pHead) { + pHead->sver = 1; + pHead->cksum = 0; + pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); +} + +static int walValidateChecksum(SWalHead *pHead) { + if (pHead->sver == 0) { // for compatible with wal before sver 1 + return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); + } else if (pHead->sver == 1) { + uint32_t cksum = pHead->cksum; + pHead->cksum = 0; + return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); + } + + return 0; +} + +#endif + int32_t walWrite(void *handle, SWalHead *pHead) { if (handle == NULL) return -1; @@ -119,7 +145,13 @@ int32_t walWrite(void *handle, SWalHead *pHead) { if (pHead->version <= pWal->version) return 0; pHead->signature = WAL_SIGNATURE; +#if defined(WAL_CHECKSUM_WHOLE) + walUpdateChecksum(pHead); +#else + pHead->sver = 0; taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); +#endif + int32_t contLen = pHead->len + sizeof(SWalHead); pthread_mutex_lock(&pWal->mutex); @@ -192,7 +224,7 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) { wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); return TAOS_SYSTEM_ERROR(errno); } - wDebug("vgId:%d, file:%s open success", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is created and open while restore", pWal->vgId, pWal->name); } return TSDB_CODE_SUCCESS; @@ -242,16 +274,40 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, continue; } +#if defined(WAL_CHECKSUM_WHOLE) + if (pHead->sver == 0 && walValidateChecksum(pHead)) { + wInfo("vgId:%d, wal head cksum check passed, offset:%" PRId64, pWal->vgId, pos); + *offset = pos; + return TSDB_CODE_SUCCESS; + } + + if (pHead->sver == 1) { + if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { + wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); + return TSDB_CODE_WAL_FILE_CORRUPTED; + } + + if (walValidateChecksum(pHead)) { + wInfo("vgId:%d, wal whole cksum check passed, offset:%" PRId64, pWal->vgId, pos); + *offset = pos; + return TSDB_CODE_SUCCESS; + } + } + +#else if (taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wInfo("vgId:%d, wal head cksum check passed, offset:%" PRId64, pWal->vgId, pos); *offset = pos; return TSDB_CODE_SUCCESS; } + +#endif } return TSDB_CODE_WAL_FILE_CORRUPTED; } + static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; void * buffer = tmalloc(size); @@ -265,6 +321,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); tfree(buffer); return TAOS_SYSTEM_ERROR(errno); + } else { + wDebug("vgId:%d, file:%s, open for restore", pWal->vgId, name); } int32_t code = TSDB_CODE_SUCCESS; @@ -287,6 +345,51 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch break; } +#if defined(WAL_CHECKSUM_WHOLE) + if (pHead->sver == 0 && !walValidateChecksum(pHead)) { + wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + + if (pHead->len < 0 || pHead->len > size - sizeof(SWalHead)) { + wError("vgId:%d, file:%s, wal head len out of range, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + + ret = (int32_t)tfRead(tfd, pHead->cont, pHead->len); + if (ret < 0) { + wError("vgId:%d, file:%s, failed to read wal body since %s", pWal->vgId, name, strerror(errno)); + code = TAOS_SYSTEM_ERROR(errno); + break; + } + + if (ret < pHead->len) { + wError("vgId:%d, file:%s, failed to read wal body, ret:%d len:%d", pWal->vgId, name, ret, pHead->len); + offset += sizeof(SWalHead); + continue; + } + + if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + +#else if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); @@ -320,6 +423,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } +#endif offset = offset + sizeof(SWalHead) + pHead->len; wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, @@ -332,6 +436,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch tfClose(tfd); tfree(buffer); + wDebug("vgId:%d, file:%s, it is closed after restore", pWal->vgId, name); return code; } diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index aec0602ac00c6b943b4dd6c20c219d17223ee896..f20a57899e049115ded0012c0092bf643af76187 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 57fc8b1953d155c6ab45c8adc9e4146d00fb0b39..4e7e9a87ea6810c362bd676cd9152f61bc08e29d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,7 +3,7 @@ # generate release version: # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index f5b0cf1478266cc58624c5e6c89cccfb2063b4f6..7cdcfb2e24f92a96e2d3cc15324ca4f3ac4bc7a8 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -56,6 +56,14 @@ pipeline { cd ${WKC}/tests ./test-all.sh b1 date''' + sh ''' + cd ${WKC}/tests + ./test-all.sh full jdbc + date''' + sh ''' + cd ${WKC}/tests + ./test-all.sh full unit + date''' } } @@ -109,6 +117,13 @@ pipeline { java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 ''' } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/ + cd ${JENKINS_HOME}/workspace/nodejs + node nodejsChecker.js host=localhost + ''' + } catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index a12e36ab6b9b40992c0231bd36567ab0df0d9d7c..36ed3efe191c9d949d6234bd61ffbbe28c3a33d2 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index df52acc99dcfeffacbc896d27b81efa66b45fa5f..2d78418e0ab8599f87a544b833b9b6bbd0b48ee7 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -650,6 +650,7 @@ namespace TDengineDriver tester.CloseConnection(); Console.WriteLine("End."); + CleanAndExitProgram(0); } public class InsertDataThread diff --git a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md index e14a5f7b675fa1f38514f93afd853e7a1be2a77f..17c5c8df00ab8727d1adfe493d3fbbd32891a676 100644 --- a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md +++ b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -137,7 +137,7 @@ Welcome to the TDengine shell from Linux, Client Version:2.0.1.1 Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | + name | created_time | ntables | vgroups | replica | quorum | days | keep0,keep1,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | =================================================================================================================================================================================================================================================================== test | 2020-08-19 18:43:50.731 | 1 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | log | 2020-08-19 18:40:28.064 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 61cf50ed4d8281a7bf0e829003648badc587c605..167e7e37ae410dd865aa2e08c7f885961f9ddadd 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -13,7 +13,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.20 diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 52fb8caa90b1ee8ef0566ee7e87aae5199b6ea73..bd5f7efbc0321962a3f3f41a17bc9bde5dee9b27 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -63,7 +63,9 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.20 + + diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java index 8066126d62b195d3a5c16f3c580d6ff07fe32648..8f30c299466ca1f83bc689928b08e8001a87908d 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java @@ -10,4 +10,4 @@ public class SpringbootdemoApplication { public static void main(String[] args) { SpringApplication.run(SpringbootdemoApplication.class, args); } -} +} \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index 4a4109dcf326bd82067e3ab7153547f926e9f5ae..c153e27701403b672709a3585603b8630796d178 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -6,6 +6,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.*; import java.util.List; +import java.util.Map; @RequestMapping("/weather") @RestController @@ -20,7 +21,7 @@ public class WeatherController { * @return */ @GetMapping("/init") - public boolean init() { + public int init() { return weatherService.init(); } @@ -44,19 +45,23 @@ public class WeatherController { * @return */ @PostMapping("/{temperature}/{humidity}") - public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) { + public int saveWeather(@PathVariable float temperature, @PathVariable int humidity) { return weatherService.save(temperature, humidity); } - /** - * upload multi weather info - * - * @param weatherList - * @return - */ - @PostMapping("/batch") - public int batchSaveWeather(@RequestBody List weatherList) { - return weatherService.save(weatherList); + @GetMapping("/count") + public int count() { + return weatherService.count(); + } + + @GetMapping("/subTables") + public List getSubTables() { + return weatherService.getSubTables(); + } + + @GetMapping("/avg") + public List avg() { + return weatherService.avg(); } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java index cae1a1aec03297d79bd8b7deb7ef1c387f81d740..ad6733558a9d548be196cf8c9c0c63dc96227b39 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java @@ -4,16 +4,26 @@ import com.taosdata.example.springbootdemo.domain.Weather; import org.apache.ibatis.annotations.Param; import java.util.List; +import java.util.Map; public interface WeatherMapper { + void dropDB(); + + void createDB(); + + void createSuperTable(); + + void createTable(Weather weather); + + List select(@Param("limit") Long limit, @Param("offset") Long offset); + int insert(Weather weather); - int batchInsert(List weatherList); + int count(); - List select(@Param("limit") Long limit, @Param("offset")Long offset); + List getSubTables(); - void createDB(); + List avg(); - void createTable(); } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml index a9bcda0b00ca73f133b2f31622e5d6e0b034e5bf..2d3e0540650f35c1018992795ac33fb6cb7c4837 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml @@ -4,28 +4,29 @@ - - - + + + - - create database if not exists test; + + drop database if exists test + + + + create database if not exists test - - create table if not exists test.weather(ts timestamp, temperature int, humidity float); + + create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int) - - ts, temperature, humidity - + + create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId}) + - - insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) + + insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity}) - - insert into test.weather (ts, temperature, humidity) values - - (now + #{index}a, #{weather.temperature}, #{weather.humidity}) - - + + + + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java index 60565448ad7d66bb713e46ca5f62b41bbe905893..255b2cdca920a5568338e823d0744f2e9c4db7d3 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java @@ -6,12 +6,21 @@ import java.sql.Timestamp; public class Weather { - @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8") + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") private Timestamp ts; + private float temperature; + private float humidity; + private String location; + private int groupId; - private int temperature; + public Weather() { + } - private float humidity; + public Weather(Timestamp ts, float temperature, float humidity) { + this.ts = ts; + this.temperature = temperature; + this.humidity = humidity; + } public Timestamp getTs() { return ts; @@ -21,11 +30,11 @@ public class Weather { this.ts = ts; } - public int getTemperature() { + public float getTemperature() { return temperature; } - public void setTemperature(int temperature) { + public void setTemperature(float temperature) { this.temperature = temperature; } @@ -36,4 +45,20 @@ public class Weather { public void setHumidity(float humidity) { this.humidity = humidity; } + + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + public int getGroupId() { + return groupId; + } + + public void setGroupId(int groupId) { + this.groupId = groupId; + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java index 31ce8f1dd96b7f4d006b0b10acf722f262afda33..0aef828e1cf19f9c612f9c8d0433ce1a361c7441 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java @@ -5,25 +5,41 @@ import com.taosdata.example.springbootdemo.domain.Weather; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; +import java.sql.Timestamp; import java.util.List; +import java.util.Map; +import java.util.Random; @Service public class WeatherService { @Autowired private WeatherMapper weatherMapper; + private Random random = new Random(System.currentTimeMillis()); + private String[] locations = {"北京", "上海", "广州", "深圳", "天津"}; - public boolean init() { + public int init() { + weatherMapper.dropDB(); weatherMapper.createDB(); - weatherMapper.createTable(); - return true; + weatherMapper.createSuperTable(); + long ts = System.currentTimeMillis(); + long thirtySec = 1000 * 30; + int count = 0; + for (int i = 0; i < 20; i++) { + Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100)); + weather.setLocation(locations[random.nextInt(locations.length)]); + weather.setGroupId(i % locations.length); + weatherMapper.createTable(weather); + count += weatherMapper.insert(weather); + } + return count; } public List query(Long limit, Long offset) { return weatherMapper.select(limit, offset); } - public int save(int temperature, float humidity) { + public int save(float temperature, int humidity) { Weather weather = new Weather(); weather.setTemperature(temperature); weather.setHumidity(humidity); @@ -31,8 +47,15 @@ public class WeatherService { return weatherMapper.insert(weather); } - public int save(List weatherList) { - return weatherMapper.batchInsert(weatherList); + public int count() { + return weatherMapper.count(); } + public List getSubTables() { + return weatherMapper.getSubTables(); + } + + public List avg() { + return weatherMapper.avg(); + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties index 4fb68758c454b923c0a19e2e723a86c8b56ed88d..4d7e64d10576388827502a459df9e68da2721dbb 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -1,12 +1,14 @@ # datasource config - JDBC-JNI -spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 -spring.datasource.username=root -spring.datasource.password=taosdata +#spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +#spring.datasource.username=root +#spring.datasource.password=taosdata # datasource config - JDBC-RESTful -#spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver -#spring.datasource.url=jdbc:TAOS-RS://master:6041/test?user=root&password=taosdata +spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver +spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +spring.datasource.username=root +spring.datasource.password=taosdata spring.datasource.druid.initial-size=5 spring.datasource.druid.min-idle=5 diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml index a6cbe4615e0ec132f789a1edcc63fdde6fb0ad9c..22c2f3b63e82a3a2cdcc3093c3c43b98ab534a4b 100644 --- a/tests/examples/JDBC/taosdemo/pom.xml +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.20 diff --git a/tests/examples/JDBC/taosdemo/readme.md b/tests/examples/JDBC/taosdemo/readme.md index 123affc71a48e3c933e8eafce15c49988cdc2863..451fa2960adb98e2deb8499732aefde11f4810a1 100644 --- a/tests/examples/JDBC/taosdemo/readme.md +++ b/tests/examples/JDBC/taosdemo/readme.md @@ -1,4 +1,11 @@ - +``` +cd tests/examples/JDBC/taosdemo +mvn clean package -Dmaven.test.skip=true +# 先建表,再插入的 +java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +# 不建表,直接插入的 +java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +``` 需求: 1. 可以读lowa的配置文件 diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index 4dc49fd37b1b5092b6799ec67bebd680f9642379..c361df82b0aebb0d804b1a0982a0c1cf44ef5953 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -4,6 +4,7 @@ import com.taosdata.taosdemo.components.DataSourceFactory; import com.taosdata.taosdemo.components.JdbcTaosdemoConfig; import com.taosdata.taosdemo.domain.SuperTableMeta; import com.taosdata.taosdemo.service.DatabaseService; +import com.taosdata.taosdemo.service.SqlExecuteTask; import com.taosdata.taosdemo.service.SubTableService; import com.taosdata.taosdemo.service.SuperTableService; import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; @@ -31,9 +32,21 @@ public class TaosDemoApplication { } // 初始化 final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password); + if (config.executeSql != null && !config.executeSql.isEmpty() && !config.executeSql.replaceAll("\\s", "").isEmpty()) { + Thread task = new Thread(new SqlExecuteTask(dataSource, config.executeSql)); + task.start(); + try { + task.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return; + } + final DatabaseService databaseService = new DatabaseService(dataSource); final SuperTableService superTableService = new SuperTableService(dataSource); final SubTableService subTableService = new SubTableService(dataSource); + // 创建数据库 long start = System.currentTimeMillis(); Map databaseParam = new HashMap<>(); @@ -90,6 +103,10 @@ public class TaosDemoApplication { int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config); end = System.currentTimeMillis(); logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); + /**********************************************************************************/ + // 查询 + + /**********************************************************************************/ // 删除表 if (config.dropTable) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java index c96d6f8bed68e9bb67d959ddb1d7531b4cbadeb3..a7d08e96ea373c4773e872bcaf9b3a7b98d5bf9a 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java @@ -23,7 +23,6 @@ public class DataSourceFactory { properties.load(is); HikariConfig config = new HikariConfig(); - if (properties.containsKey("jdbc.driver")) { // String driverName = properties.getProperty("jdbc.driver"); // System.out.println(">>> load driver : " + driverName); diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java index 971c10dee2889543e95a70b244ea3cda462df3a6..974a2755a5a029d3a5fc681bc8c59b0aca1a7ca4 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java @@ -42,7 +42,7 @@ public final class JdbcTaosdemoConfig { public int rate = 10; public long range = 1000l; // select task - + public String executeSql; // drop task public boolean dropTable = false; @@ -89,7 +89,7 @@ public final class JdbcTaosdemoConfig { System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10"); System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms"); // query task -// System.out.println("-sqlFile The select sql file"); + System.out.println("-executeSql execute a specific sql."); // drop task System.out.println("-dropTable Drop data before quit. Default is false"); System.out.println("--help Give this help list"); @@ -207,6 +207,9 @@ public final class JdbcTaosdemoConfig { range = Integer.parseInt(args[++i]); } // select task + if ("-executeSql".equals(args[i]) && i < args.length - 1) { + executeSql = args[++i]; + } // drop task if ("-dropTable".equals(args[i]) && i < args.length - 1) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java new file mode 100644 index 0000000000000000000000000000000000000000..efabff6afe904516ad9682cd7197412dc02765ef --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java @@ -0,0 +1,104 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.jdbc.utils.SqlSyntaxValidator; + +import javax.sql.DataSource; +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class QueryService { + + private final DataSource dataSource; + + public QueryService(DataSource dataSource) { + this.dataSource = dataSource; + } + + /* only select or show SQL Statement is valid for executeQuery */ + public Boolean[] areValidQueries(String[] sqls) { + Boolean[] ret = new Boolean[sqls.length]; + for (int i = 0; i < sqls.length; i++) { + ret[i] = true; + if (!SqlSyntaxValidator.isValidForExecuteQuery(sqls[i])) { + ret[i] = false; + continue; + } + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + stmt.executeQuery(sqls[i]); + } catch (SQLException e) { + ret[i] = false; + continue; + } + } + return ret; + } + + public String[] generateSuperTableQueries(String dbName) { + List sqls = new ArrayList<>(); + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + stmt.execute("use " + dbName); + ResultSet rs = stmt.executeQuery("show stables"); + while (rs.next()) { + String name = rs.getString("name"); + sqls.add("select count(*) from " + dbName + "." + name); + sqls.add("select first(*) from " + dbName + "." + name); + sqls.add("select last(*) from " + dbName + "." + name); + sqls.add("select last_row(*) from " + dbName + "." + name); + } + } catch (SQLException e) { + e.printStackTrace(); + } + String[] sqlArr = new String[sqls.size()]; + return sqls.toArray(sqlArr); + } + + public void querySuperTable(String[] sqls, int interval, int threadCount, long queryTimes) { + List threads = IntStream.range(0, threadCount).mapToObj(i -> new Thread(() -> { + // do query + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + long count = queryTimes; + if (count == 0) + count = Long.MAX_VALUE; + while (count > 0) { + for (String sql : sqls) { + long start = System.currentTimeMillis(); + ResultSet rs = stmt.executeQuery(sql); + printResultSet(rs); + long end = System.currentTimeMillis(); + long timecost = end - start; + if (interval - timecost > 0) { + TimeUnit.MILLISECONDS.sleep(interval - timecost); + } + } + count--; + } + + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + + })).collect(Collectors.toList()); + threads.stream().forEach(Thread::start); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + private void printResultSet(ResultSet rs) throws SQLException { + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SqlExecuteTask.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SqlExecuteTask.java new file mode 100644 index 0000000000000000000000000000000000000000..ff2e4d0af068a10e62933837817d2d2df0712a4c --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SqlExecuteTask.java @@ -0,0 +1,36 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.utils.Printer; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class SqlExecuteTask implements Runnable { + private final DataSource dataSource; + private final String sql; + + public SqlExecuteTask(DataSource dataSource, String sql) { + this.dataSource = dataSource; + this.sql = sql; + } + + @Override + public void run() { + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + long start = System.currentTimeMillis(); + boolean execute = stmt.execute(sql); + long end = System.currentTimeMillis(); + if (execute) { + ResultSet rs = stmt.getResultSet(); + Printer.printResult(rs); + } else { + Printer.printSql(sql, true, (end - start)); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java index b4ad2d17e58a3f7c04665707f0cd3e7327d7c16c..2504fdb0b4cd48ec263d94ec377e1bb8902ea9a7 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -2,7 +2,6 @@ package com.taosdata.taosdemo.service; import com.taosdata.taosdemo.dao.TableMapper; import com.taosdata.taosdemo.domain.TableMeta; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/Printer.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/Printer.java new file mode 100644 index 0000000000000000000000000000000000000000..a4627463ec5cd1bbccdb64b67506ba38f712de8f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/Printer.java @@ -0,0 +1,27 @@ +package com.taosdata.taosdemo.utils; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public class Printer { + + public static void printResult(ResultSet resultSet) throws SQLException { + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String columnLabel = metaData.getColumnLabel(i); + String value = resultSet.getString(i); + System.out.printf("%s: %s\t", columnLabel, value); + } + System.out.println(); + } + } + + public static void printSql(String sql, boolean succeed, long cost) { + System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private Printer() { + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties index 6fd38f1762e6879422cf26fe2dc617caeb5e297c..488185196f1d2325fd9896d30068cbb202180a3f 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -1,5 +1,5 @@ jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver #jdbc.driver=com.taosdata.jdbc.TSDBDriver -hikari.maximum-pool-size=1 -hikari.minimum-idle=1 +hikari.maximum-pool-size=20 +hikari.minimum-idle=20 hikari.max-lifetime=0 \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/query.json b/tests/examples/JDBC/taosdemo/src/main/resources/query.json index 53d0b319212196257aa3e84be1221bd6e2bd0d8d..cc6900d77c3941e6af3274efdfe782c42a557990 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/query.json +++ b/tests/examples/JDBC/taosdemo/src/main/resources/query.json @@ -7,10 +7,10 @@ "password": "taosdata", "databases": "db01", "super_table_query": - {"rate":1, "concurrent":1, + {"rate":1, "concurrent":1,"time":10000, "sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}] }, - "sub_table_query": + "sub_table_query": {"stblname": "stb01", "rate":1, "threads":1, "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] } diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f2ad25710c1a82136d6316ed69e379bc3925897d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java @@ -0,0 +1,41 @@ +package com.taosdata.taosdemo.service; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.junit.BeforeClass; +import org.junit.Test; + +public class QueryServiceTest { + private static QueryService queryService; + + @Test + public void areValidQueries() { + + } + + @Test + public void generateSuperTableQueries() { + String[] sqls = queryService.generateSuperTableQueries("restful_test"); + for (String sql : sqls) { + System.out.println(sql); + } + } + + @Test + public void querySuperTable() { + String[] sqls = queryService.generateSuperTableQueries("restful_test"); + queryService.querySuperTable(sqls, 1000, 10, 10); + } + + @BeforeClass + public static void beforeClass() throws ClassNotFoundException { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8"); + config.setUsername("root"); + config.setPassword("taosdata"); + HikariDataSource dataSource = new HikariDataSource(config); + queryService = new QueryService(dataSource); + } + +} \ No newline at end of file diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index e08d667d6be42bef3dc4e058fab10ac68f0809e0..930a6075cae2767c0d50ca2e1574d6441430d1b3 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -467,7 +467,6 @@ int main(int argc, char *argv[]) { const char* passwd = "taosdata"; taos_options(TSDB_OPTION_TIMEZONE, "GMT-8"); - taos_init(); TAOS* taos = taos_connect(host, user, passwd, "", 0); if (taos == NULL) { diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index be3a908f11748a5ac414157bef6b9caed389b303..16a14e96549c5cd66f747ae0c6c63f20c3e7fbfa 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -99,8 +99,6 @@ int main(int argc, char *argv[]) tableList = (STable *)malloc(size); memset(tableList, 0, size); - taos_init(); - taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) taos_error(taos); diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 45ec54680301fd2c941d22d8d67c867b8740c37b..e074e6496607bf2046496c7b6766b6f3dedf9dbc 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -61,8 +61,6 @@ int main(int argc, char *argv[]) { return 0; } - // init TAOS - taos_init(); TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); @@ -94,15 +92,14 @@ void Test(TAOS *taos, char *qstr, int index) { // printf("insert row: %i, reason:%s\n", i, taos_errstr(taos)); // } TAOS_RES *result1 = taos_query(taos, qstr); - if (result1) { - printf("insert row: %i\n", i); - } else { - printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/); + if (result1 == NULL || taos_errno(result1) != 0) { + printf("failed to insert row, reason:%s\n", taos_errstr(result1)); taos_free_result(result1); exit(1); + } else { + printf("insert row: %i\n", i); } taos_free_result(result1); - } printf("success to insert rows, total %d rows\n", i); diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index 7293a22c11d7c6049c220ca0951b5969fa344114..b06fe551dbde4b16e3ea197ca4c2bc1711ef63bb 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -6,7 +6,7 @@ TARGET=exe LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ - -msse4.2 -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 + -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 all: $(TARGET) diff --git a/tests/examples/c/prepare.c b/tests/examples/c/prepare.c index 7a70b744ee8561459318aa456160a54b8c6270a8..13d71beea6156aa32677c20ccc2d222d28826b24 100644 --- a/tests/examples/c/prepare.c +++ b/tests/examples/c/prepare.c @@ -22,9 +22,6 @@ int main(int argc, char *argv[]) return 0; } - // init TAOS - taos_init(); - taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); diff --git a/tests/examples/c/stream.c b/tests/examples/c/stream.c index 060f5b84ff276579019d3278552e424b2a4198e9..30a790f061cd8ef2b870a371c2cadfb0e2a413c1 100644 --- a/tests/examples/c/stream.c +++ b/tests/examples/c/stream.c @@ -54,9 +54,6 @@ int main(int argc, char *argv[]) exit(0); } - // init TAOS - taos_init(); - strcpy(db_name, argv[2]); strcpy(tbl_name, argv[3]); diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index cdd8ddaf7f6c4d2e5088ef36cc00ad77a0c8ebc9..1d3533fa5ee89f7425a9b0563e738b4e0703e6c8 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -216,9 +216,6 @@ int main(int argc, char *argv[]) { } } - // init TAOS - taos_init(); - TAOS* taos = taos_connect(host, user, passwd, "", 0); if (taos == NULL) { printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); diff --git a/tests/examples/python/taosdemo/README.md b/tests/examples/python/taosdemo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d48fffe8ff44fb68a1a147a2c97ca057fb360092 --- /dev/null +++ b/tests/examples/python/taosdemo/README.md @@ -0,0 +1,38 @@ +install build environment +=== +/usr/bin/python3 -m pip install -r requirements.txt + +run python version taosdemo +=== +Usage: ./taosdemo.py [OPTION...] + +Author: Shuduo Sang + + -H, --help Show usage. + + -N, --native flag, Use native interface if set. Default is using RESTful interface. + -h, --host host, The host to connect to TDengine. Default is localhost. + -p, --port port, The TCP/IP port number to use for the connection. Default is 0. + -u, --user user, The user name to use when connecting to the server. Default is 'root'. + -P, --password password, The password to use when connecting to the server. Default is 'taosdata'. + -l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3. + -d, --dbname database, Destination database. Default is 'test'. + -a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5. + -m, --tbname
    table_prefix, Table prefix name. Default is 't'. + -M, --stable flag, Use super table. Default is no + -s, --stbname stable_prefix, STable prefix name. Default is 'st' + -Q, --query query, Execute query command. set 'DEFAULT' means select * from each table + -T, --threads num_of_threads, The number of threads. Default is 1. + -C, --processes num_of_processes, The number of threads. Default is 1. + -r, --batch num_of_records_per_req, The number of records per request. Default is 1000. + -t, --numOfTb num_of_tables, The number of tables. Default is 1. + -n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1. + -c, --config config_directory, Configuration directory. Default is '/etc/taos/'. + -x, --inserOnly flag, Insert only flag. + -O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order. + -R, --rateOOOO rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50. + -D, --deleteMethod Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database. + -v, --verbose Print verbose output + -g, --debug Print debug output + -y, --skipPrompt Skip read key for continous test, default is not skip + diff --git a/tests/examples/python/taosdemo/requirements.txt b/tests/examples/python/taosdemo/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..977e8e3726a446e85635764fe9243a3c5416ea0f --- /dev/null +++ b/tests/examples/python/taosdemo/requirements.txt @@ -0,0 +1,28 @@ +## +######## example-requirements.txt ####### +## +####### Requirements without Version Specifiers ###### +requests +multipledispatch +#beautifulsoup4 +## +####### Requirements with Version Specifiers ###### +## See https://www.python.org/dev/peps/pep-0440/#version-specifiers +#docopt == 0.6.1 # Version Matching. Must be version 0.6.1 +#keyring >= 4.1.1 # Minimum version 4.1.1 +#coverage != 3.5 # Version Exclusion. Anything except version 3.5 +#Mopidy-Dirble ~= 1.1 # Compatible release. Same as >= 1.1, == 1.* +## +####### Refer to other requirements files ###### +#-r other-requirements.txt +## +## +####### A particular file ###### +#./downloads/numpy-1.9.2-cp34-none-win32.whl +#http://wxpython.org/Phoenix/snapshot-builds/wxPython_Phoenix-3.0.3.dev1820+49a8884-cp34-none-win_amd64.whl +## +####### Additional Requirements without Version Specifiers ###### +## Same as 1st section, just here to show that you can put things in any order. +#rejected +#green +## diff --git a/tests/examples/python/taosdemo/taosdemo.py b/tests/examples/python/taosdemo/taosdemo.py new file mode 100755 index 0000000000000000000000000000000000000000..d55023bdbf119544a788aa6246c9d63dbf024872 --- /dev/null +++ b/tests/examples/python/taosdemo/taosdemo.py @@ -0,0 +1,797 @@ +#!/usr/bin/python3 +# * Copyright (c) 2019 TAOS Data, Inc. +# * +# * This program is free software: you can use, redistribute, and/or modify +# * it under the terms of the GNU Affero General Public License, version 3 +# * or later ("AGPL"), as published by the Free Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, but WITHOUT +# * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# * FITNESS FOR A PARTICULAR PURPOSE. +# * +# * You should have received a copy of the GNU Affero General Public License +# * along with this program. If not, see . + +# -*- coding: utf-8 -*- + +import sys +import getopt +import requests +import json +import random +import time +import datetime +from multiprocessing import Manager, Pool, Lock +from multipledispatch import dispatch +from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED + + +@dispatch(str, str) +def v_print(msg: str, arg: str): + if verbose: + print(msg % arg) + + +@dispatch(str, str, str) +def v_print(msg: str, arg1: str, arg2: str): + if verbose: + print(msg % (arg1, arg2)) + + +@dispatch(str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str): + if verbose: + print(msg % (arg1, arg2, arg3)) + + +@dispatch(str, str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str, arg4: str): + if verbose: + print(msg % (arg1, arg2, arg3, arg4)) + + +@dispatch(str, int) +def v_print(msg: str, arg: int): + if verbose: + print(msg % int(arg)) + + +@dispatch(str, int, str) +def v_print(msg: str, arg1: int, arg2: str): + if verbose: + print(msg % (int(arg1), str(arg2))) + + +@dispatch(str, str, int) +def v_print(msg: str, arg1: str, arg2: int): + if verbose: + print(msg % (arg1, int(arg2))) + + +@dispatch(str, int, int) +def v_print(msg: str, arg1: int, arg2: int): + if verbose: + print(msg % (int(arg1), int(arg2))) + + +@dispatch(str, int, int, str) +def v_print(msg: str, arg1: int, arg2: int, arg3: str): + if verbose: + print(msg % (int(arg1), int(arg2), str(arg3))) + + +@dispatch(str, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3))) + + +@dispatch(str, int, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int, arg4: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3), int(arg4))) + + +def restful_execute(host: str, port: int, user: str, password: str, cmd: str): + url = "http://%s:%d/rest/sql" % (host, restPort) + + v_print("restful_execute - cmd: %s", cmd) + + resp = requests.post(url, cmd, auth=(user, password)) + + v_print("resp status: %d", resp.status_code) + + if debug: + v_print( + "resp text: %s", + json.dumps( + resp.json(), + sort_keys=True, + indent=2)) + else: + print("resp: %s" % json.dumps(resp.json())) + + +def query_func(process: int, thread: int, cmd: str): + v_print("%d process %d thread cmd: %s", process, thread, cmd) + + if oneMoreHost != "NotSupported" and random.randint( + 0, 1) == 1: + v_print("%s", "Send to second host") + if native: + cursor2.execute(cmd) + else: + restful_execute( + oneMoreHost, port, user, password, cmd) + else: + v_print("%s%s%s", "Send ", cmd, " to the host") + if native: + pass +# cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + +def query_data_process(cmd: str): + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + if native: + try: + cursor.execute(cmd) + cols = cursor.description + data = cursor.fetchall() + + for col in data: + print(col) + except Exception as e: + conn.close() + print("Error: %s" % e.args[0]) + sys.exit(1) + + else: + restful_execute( + host, + port, + user, + password, + cmd) + + if native: + cursor.close() + conn.close() + + +def create_stb(): + for i in range(0, numOfStb): + if native: + cursor.execute( + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i) + ) + + +def use_database(): + + if native: + cursor.execute("USE %s" % current_db) + else: + restful_execute(host, port, user, password, "USE %s" % current_db) + + +def create_databases(): + for i in range(0, numOfDb): + v_print("will create database db%d", int(i)) + + if native: + cursor.execute( + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + + +def drop_tables(): + # TODO + v_print("TODO: drop tables total %d", numOfTb) + pass + + +def drop_stable(): + # TODO + v_print("TODO: drop stables total %d", numOfStb) + pass + + +def drop_databases(): + v_print("drop databases total %d", numOfDb) + + # drop exist databases first + for i in range(0, numOfDb): + v_print("will drop database db%d", int(i)) + + if native: + cursor.execute( + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + + +def insert_func(process: int, thread: int): + v_print("%d process %d thread, insert_func ", process, thread) + + # generate uuid + uuid_int = random.randint(0, numOfTb + 1) + uuid = "%s" % uuid_int + v_print("uuid is: %s", uuid) + + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + v_print("numOfRec %d:", numOfRec) + + row = 0 + while row < numOfRec: + v_print("row: %d", row) + sqlCmd = ['INSERT INTO '] + try: + sqlCmd.append( + "%s.%s%d " % (current_db, tbName, thread)) + + if (numOfStb > 0 and autosubtable): + sqlCmd.append("USING %s.%s%d TAGS('%s') " % + (current_db, stbName, numOfStb - 1, uuid)) + + start_time = datetime.datetime( + 2021, 1, 25) + datetime.timedelta(seconds=row) + + sqlCmd.append("VALUES ") + for batchIter in range(0, batch): + sqlCmd.append("('%s', %f) " % + ( + start_time + + datetime.timedelta( + milliseconds=batchIter), + random.random())) + row = row + 1 + if row >= numOfRec: + v_print("BREAK, row: %d numOfRec:%d", row, numOfRec) + break + + except Exception as e: + print("Error: %s" % e.args[0]) + + cmd = ' '.join(sqlCmd) + + if measure: + exec_start_time = datetime.datetime.now() + + if native: + affectedRows = cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + if measure: + exec_end_time = datetime.datetime.now() + exec_delta = exec_end_time - exec_start_time + v_print( + "consume %d microseconds", + exec_delta.microseconds) + + v_print("cmd: %s, length:%d", cmd, len(cmd)) + + if native: + cursor.close() + conn.close() + + +def create_tb_using_stb(): + # TODO: + pass + + +def create_tb(): + v_print("create_tb() numOfTb: %d", numOfTb) + for i in range(0, numOfDb): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + + +def insert_data_process(lock, i: int, begin: int, end: int): + lock.acquire() + tasks = end - begin + v_print("insert_data_process:%d table from %d to %d, tasks %d", i, begin, end, tasks) + + if (threads < (end - begin)): + for j in range(begin, end, threads): + with ThreadPoolExecutor(max_workers=threads) as executor: + k = end if ((j + threads) > end) else (j + threads) + workers = [ + executor.submit( + insert_func, + i, + n) for n in range( + j, + k)] + wait(workers, return_when=ALL_COMPLETED) + else: + with ThreadPoolExecutor(max_workers=threads) as executor: + workers = [ + executor.submit( + insert_func, + i, + j) for j in range( + begin, + end)] + wait(workers, return_when=ALL_COMPLETED) + + lock.release() + + +def query_db(i): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "SELECT COUNT(*) FROM %s%d" % (tbName, j)) + else: + restful_execute( + host, port, user, password, "SELECT COUNT(*) FROM %s%d" % + (tbName, j)) + + +def printConfig(): + + print("###################################################################") + print("# Use native interface: %s" % native) + print("# Server IP: %s" % host) + if native: + print("# Server port: %s" % port) + else: + print("# Server port: %s" % restPort) + + print("# Configuration Dir: %s" % configDir) + print("# User: %s" % user) + print("# Password: %s" % password) + print("# Number of Columns per record: %s" % colsPerRecord) + print("# Number of Threads: %s" % threads) + print("# Number of Processes: %s" % processes) + print("# Number of Tables: %s" % numOfTb) + print("# Number of records per Table: %s" % numOfRec) + print("# Records/Request: %s" % batch) + print("# Database name: %s" % dbName) + print("# Replica: %s" % replica) + print("# Use STable: %s" % useStable) + print("# Table prefix: %s" % tbName) + if useStable: + print("# STable prefix: %s" % stbName) + + print("# Data order: %s" % outOfOrder) + print("# Data out of order rate: %s" % rateOOOO) + print("# Delete method: %s" % deleteMethod) + print("# Query command: %s" % queryCmd) + print("# Insert Only: %s" % insertOnly) + print("# Verbose output %s" % verbose) + print("# Test time: %s" % + datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")) + print("###################################################################") + + +if __name__ == "__main__": + + native = False + verbose = False + debug = False + measure = True + dropDbOnly = False + colsPerRecord = 3 + numOfDb = 1 + dbName = "test" + replica = 1 + batch = 1 + numOfTb = 1 + tbName = "tb" + useStable = False + numOfStb = 0 + stbName = "stb" + numOfRec = 10 + ieration = 1 + host = "127.0.0.1" + configDir = "/etc/taos" + oneMoreHost = "NotSupported" + port = 6030 + restPort = 6041 + user = "root" + defaultPass = "taosdata" + processes = 1 + threads = 1 + insertOnly = False + autosubtable = False + queryCmd = "NO" + outOfOrder = 0 + rateOOOO = 0 + deleteMethod = 0 + skipPrompt = False + + try: + opts, args = getopt.gnu_getopt(sys.argv[1:], + 'Nh:p:u:P:d:a:m:Ms:Q:T:C:r:l:t:n:c:xOR:D:vgyH', + [ + 'native', 'host', 'port', 'user', 'password', 'dbname', 'replica', 'tbname', + 'stable', 'stbname', 'query', 'threads', 'processes', + 'recPerReq', 'colsPerRecord', 'numOfTb', 'numOfRec', 'config', + 'insertOnly', 'outOfOrder', 'rateOOOO', 'deleteMethod', + 'verbose', 'debug', 'skipPrompt', 'help' + ]) + except getopt.GetoptError as err: + print('ERROR:', err) + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + if bool(opts) is False: + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + for key, value in opts: + if key in ['-H', '--help']: + print('') + print( + 'taosdemo.py for TDengine') + print('') + print('Author: Shuduo Sang ') + print('') + + print('\t-H, --help Show usage.') + print('') + + print('\t-N, --native flag, Use native interface if set. Default is using RESTful interface.') + print('\t-h, --host host, The host to connect to TDengine. Default is localhost.') + print('\t-p, --port port, The TCP/IP port number to use for the connection. Default is 0.') + print('\t-u, --user user, The user name to use when connecting to the server. Default is \'root\'.') + print('\t-P, --password password, The password to use when connecting to the server. Default is \'taosdata\'.') + print('\t-l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3.') + print( + '\t-d, --dbname database, Destination database. Default is \'test\'.') + print('\t-a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5.') + print( + '\t-m, --tbname
    table_prefix, Table prefix name. Default is \'t\'.') + print( + '\t-M, --stable flag, Use super table. Default is no') + print( + '\t-s, --stbname stable_prefix, STable prefix name. Default is \'st\'') + print('\t-Q, --query [NO|EACHTB|command] query, Execute query command. set \'EACHTB\' means select * from each table') + print( + '\t-T, --threads num_of_threads, The number of threads. Default is 1.') + print( + '\t-C, --processes num_of_processes, The number of threads. Default is 1.') + print('\t-r, --batch num_of_records_per_req, The number of records per request. Default is 1000.') + print( + '\t-t, --numOfTb num_of_tables, The number of tables. Default is 1.') + print('\t-n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1.') + print('\t-c, --config config_directory, Configuration directory. Default is \'/etc/taos/\'.') + print('\t-x, --inserOnly flag, Insert only flag.') + print('\t-O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order.') + print('\t-R, --rateOOOO rate, Out of order data\'s rate--if order=1 Default 10, min: 0, max: 50.') + print('\t-D, --deleteMethod Delete data methods 0: don\'t delete, 1: delete by table, 2: delete by stable, 3: delete by database.') + print('\t-v, --verbose Print verbose output') + print('\t-g, --debug Print debug output') + print( + '\t-y, --skipPrompt Skip read key for continous test, default is not skip') + print('') + sys.exit(0) + + if key in ['-N', '--native']: + try: + import taos + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + native = True + + if key in ['-h', '--host']: + host = value + + if key in ['-p', '--port']: + port = int(value) + + if key in ['-u', '--user']: + user = value + + if key in ['-P', '--password']: + password = value + else: + password = defaultPass + + if key in ['-d', '--dbname']: + dbName = value + + if key in ['-a', '--replica']: + replica = int(value) + if replica < 1: + print("FATAL: number of replica need > 0") + sys.exit(1) + + if key in ['-m', '--tbname']: + tbName = value + + if key in ['-M', '--stable']: + useStable = True + numOfStb = 1 + + if key in ['-s', '--stbname']: + stbName = value + + if key in ['-Q', '--query']: + queryCmd = str(value) + + if key in ['-T', '--threads']: + threads = int(value) + if threads < 1: + print("FATAL: number of threads must be larger than 0") + sys.exit(1) + + if key in ['-C', '--processes']: + processes = int(value) + if processes < 1: + print("FATAL: number of processes must be larger than 0") + sys.exit(1) + + if key in ['-r', '--batch']: + batch = int(value) + + if key in ['-l', '--colsPerRec']: + colsPerRec = int(value) + + if key in ['-t', '--numOfTb']: + numOfTb = int(value) + v_print("numOfTb is %d", numOfTb) + + if key in ['-n', '--numOfRec']: + numOfRec = int(value) + v_print("numOfRec is %d", numOfRec) + if numOfRec < 1: + print("FATAL: number of records must be larger than 0") + sys.exit(1) + + + if key in ['-c', '--config']: + configDir = value + v_print("config dir: %s", configDir) + + if key in ['-x', '--insertOnly']: + insertOnly = True + v_print("insert only: %d", insertOnly) + + if key in ['-O', '--outOfOrder']: + outOfOrder = int(value) + v_print("out of order is %d", outOfOrder) + + if key in ['-R', '--rateOOOO']: + rateOOOO = int(value) + v_print("the rate of out of order is %d", rateOOOO) + + if key in ['-D', '--deleteMethod']: + deleteMethod = int(value) + if (deleteMethod < 0) or (deleteMethod > 3): + print( + "inputed delete method is %d, valid value is 0~3, set to default 0" % + deleteMethod) + deleteMethod = 0 + v_print("the delete method is %d", deleteMethod) + + if key in ['-v', '--verbose']: + verbose = True + + if key in ['-g', '--debug']: + debug = True + + if key in ['-y', '--skipPrompt']: + skipPrompt = True + + if verbose: + printConfig() + + if not skipPrompt: + input("Press any key to continue..") + + # establish connection first if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + # drop data only if delete method be set + if deleteMethod > 0: + if deleteMethod == 1: + drop_tables() + print("Drop tables done.") + elif deleteMethod == 2: + drop_stables() + print("Drop super tables done.") + elif deleteMethod == 3: + drop_databases() + print("Drop Database done.") + sys.exit(0) + + # create databases + drop_databases() + create_databases() + + # use last database + current_db = "%s%d" % (dbName, (numOfDb - 1)) + use_database() + + if measure: + start_time_begin = time.time() + + if numOfStb > 0: + create_stb() + if (autosubtable == False): + create_tb_using_stb() + else: + create_tb() + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for create table.".format( + (end_time - start_time_begin))) + + if native: + cursor.close() + conn.close() + + # start insert data + if measure: + start_time = time.time() + + manager = Manager() + lock = manager.Lock() + pool = Pool(processes) + + begin = 0 + end = 0 + + quotient = numOfTb // processes + if quotient < 1: + processes = numOfTb + quotient = 1 + + remainder = numOfTb % processes + v_print( + "num of tables: %d, quotient: %d, remainder: %d", + numOfTb, + quotient, + remainder) + + for i in range(processes): + begin = end + + if i < remainder: + end = begin + quotient + 1 + else: + end = begin + quotient + pool.apply_async(insert_data_process, args=(lock, i, begin, end,)) + + pool.close() + pool.join() + time.sleep(1) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for insert data.".format( + (end_time - start_time))) + + + # query data + if queryCmd != "NO": + print("queryCmd: %s" % queryCmd) + query_data_process(queryCmd) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds.".format( + (end_time - start_time_begin))) + + print("done") diff --git a/tests/examples/rust b/tests/examples/rust new file mode 160000 index 0000000000000000000000000000000000000000..1c8924dc668e6aa848214c2fc54e3ace3f5bf8df --- /dev/null +++ b/tests/examples/rust @@ -0,0 +1 @@ +Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df diff --git a/tests/perftest-scripts/coverage_test.sh b/tests/perftest-scripts/coverage_test.sh index 5085ec89d00def1a2147f24ccbeb488406541775..1be2053f24f28dd825c040912783675b1eab94f9 100755 --- a/tests/perftest-scripts/coverage_test.sh +++ b/tests/perftest-scripts/coverage_test.sh @@ -56,9 +56,9 @@ function runGeneralCaseOneByOne { case=`echo $line | grep sim$ |awk '{print $NF}'` if [ -n "$case" ]; then - ./test.sh -f $case > /dev/null 2>&1 && \ - echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \ - echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT + date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && ./test.sh -f $case > /dev/null 2>&1 && \ + ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT ) \ + || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT fi fi done < $1 diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 8498094181857095fa52123b364119801dc93d56..e43a45ed5e87ec81fc86e1af4733c0e859af69f0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -39,6 +39,8 @@ function buildTDengine { cd $WORK_DIR/TDengine git remote update > /dev/null + git reset --hard HEAD + git checkout develop REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` LOCAL_COMMIT=`git rev-parse --short @` @@ -54,22 +56,23 @@ function buildTDengine { cd debug rm -rf * cmake .. > /dev/null - make > /dev/null - make install + make && make install > /dev/null fi } function runQueryPerfTest { [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 & - echoInfo "Run Performance Test" + echoInfo "Wait TDengine to start" + sleep 120 + echoInfo "Run Performance Test" cd $WORK_DIR/TDengine/tests/pytest python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT - yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -t 1000 -n 1000 > taosdemoperf.txt + yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -x > taosdemoperf.txt CREATETABLETIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` INSERTRECORDSTIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` @@ -104,6 +107,7 @@ function sendReport { stopTaosd buildTDengine runQueryPerfTest +stopTaosd echoInfo "Send Report" sendReport diff --git a/tests/pytest/alter/alter_debugFlag.py b/tests/pytest/alter/alter_debugFlag.py new file mode 100644 index 0000000000000000000000000000000000000000..38d972b58252a704420af0dda7c091c22c894ecd --- /dev/null +++ b/tests/pytest/alter/alter_debugFlag.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"] + + for flag in flagList: + tdSql.execute("alter local %s 131" % flag) + tdSql.execute("alter local %s 135" % flag) + tdSql.execute("alter local %s 143" % flag) + randomFlag = random.randint(100, 250) + if randomFlag != 131 and randomFlag != 135 and randomFlag != 143: + tdSql.error("alter local %s %d" % (flag, randomFlag)) + + tdSql.query("show dnodes") + dnodeId = tdSql.getData(0, 0) + + for flag in flagList: + tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/alter/alter_stable.py b/tests/pytest/alter/alter_stable.py index 6852f4ef4ab6730f6423ba388d4f088ca229312e..6dad43313248388ab34a51f6dd40414f67ebdb7f 100644 --- a/tests/pytest/alter/alter_stable.py +++ b/tests/pytest/alter/alter_stable.py @@ -19,6 +19,10 @@ class TDTestCase: "double", "smallint", "tinyint", + "int unsigned", + "bigint unsigned", + "smallint unsigned", + "tinyint unsigned", "binary(10)", "nchar(10)", "timestamp"] diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py index 48b0154361803c9f614e64b1f66e416613e83131..a5acb7a73e0844ac2235590cbc8b7793ef40746c 100644 --- a/tests/pytest/alter/alter_table.py +++ b/tests/pytest/alter/alter_table.py @@ -19,6 +19,10 @@ class TDTestCase: "double", "smallint", "tinyint", + "int unsigned", + "bigint unsigned", + "smallint unsigned", + "tinyint unsigned", "binary(10)", "nchar(10)", "timestamp"] @@ -126,7 +130,6 @@ class TDTestCase: for i in range(2, size): tdSql.checkData(0, i, self.rowNum * (size - i)) - tdSql.error("alter local debugflag 143") tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") tdSql.execute("create table t0 using st tags(null)") diff --git a/tests/pytest/alter/alter_table_crash.py b/tests/pytest/alter/alter_table_crash.py index d1af022e35e636b15bc6e0fa175c7c41ed1ff640..2ec24860db9c078445cd186807a4c1834469f909 100644 --- a/tests/pytest/alter/alter_table_crash.py +++ b/tests/pytest/alter/alter_table_crash.py @@ -66,6 +66,14 @@ class TDTestCase: "alter table dt add column tbcol8 nchar(20)") tdSql.execute( "alter table dt add column tbcol9 binary(20)") + tdSql.execute( + "alter table dt add column tbcol10 tinyint unsigned") + tdSql.execute( + "alter table dt add column tbcol11 int unsigned") + tdSql.execute( + "alter table dt add column tbcol12 smallint unsigned") + tdSql.execute( + "alter table dt add column tbcol13 bigint unsigned") # restart taosd tdDnodes.forcestop(1) diff --git a/tests/pytest/cluster/clusterEnvSetup/Dockerfile b/tests/pytest/cluster/clusterEnvSetup/Dockerfile index b699e8a23ff0e1ff329ffaf4b97aa8f34350eaf7..c9c4d79be981e45609e040bf5835e275fc446260 100644 --- a/tests/pytest/cluster/clusterEnvSetup/Dockerfile +++ b/tests/pytest/cluster/clusterEnvSetup/Dockerfile @@ -2,15 +2,20 @@ FROM ubuntu:latest AS builder ARG PACKAGE=TDengine-server-1.6.5.10-Linux-x64.tar.gz ARG EXTRACTDIR=TDengine-enterprise-server +ARG TARBITRATORPKG=TDengine-tarbitrator-1.6.5.10-Linux-x64.tar.gz +ARG EXTRACTDIR2=TDengine-enterprise-arbitrator ARG CONTENT=taos.tar.gz WORKDIR /root - + COPY ${PACKAGE} . +COPY ${TARBITRATORPKG} . RUN tar -zxf ${PACKAGE} +RUN tar -zxf ${TARBITRATORPKG} RUN mv ${EXTRACTDIR}/driver ./lib RUN tar -zxf ${EXTRACTDIR}/${CONTENT} +RUN mv ${EXTRACTDIR2}/bin/* /root/bin FROM ubuntu:latest @@ -19,8 +24,10 @@ WORKDIR /root RUN apt-get update RUN apt-get install -y vim tmux net-tools RUN echo 'alias ll="ls -l --color=auto"' >> /root/.bashrc - +RUN ulimit -c unlimited + COPY --from=builder /root/bin/taosd /usr/bin +COPY --from=builder /root/bin/tarbitrator /usr/bin COPY --from=builder /root/bin/taos /usr/bin COPY --from=builder /root/cfg/taos.cfg /etc/taos/ COPY --from=builder /root/lib/libtaos.so.* /usr/lib/libtaos.so.1 @@ -29,8 +36,8 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" ENV LC_CTYPE=en_US.UTF-8 ENV LANG=en_US.UTF-8 -EXPOSE 6030-6041/tcp 6060/tcp 6030-6039/udp +EXPOSE 6030-6042/tcp 6060/tcp 6030-6039/udp # VOLUME [ "/var/lib/taos", "/var/log/taos", "/etc/taos" ] -CMD [ "bash" ] +CMD [ "bash" ] \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/basic.py b/tests/pytest/cluster/clusterEnvSetup/basic.py index 10ba91ab06704f9373b7c7a4c1d2cab72287d0dd..d9b8e9ce4a7bc144839334332268ac0f09f78f0d 100644 --- a/tests/pytest/cluster/clusterEnvSetup/basic.py +++ b/tests/pytest/cluster/clusterEnvSetup/basic.py @@ -12,15 +12,89 @@ # -*- coding: utf-8 -*- import os +import taos import random +import argparse -class ClusterTestcase: +class BuildDockerCluser: + + def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion): + self.hostName = hostName + self.user = user + self.password = password + self.configDir = configDir + self.numOfNodes = numOfNodes + self.clusterVersion = clusterVersion + + def getConnection(self): + self.conn = taos.connect( + host = self.hostName, + user = self.user, + password = self.password, + config = self.configDir) + + def createDondes(self): + self.cursor = self.conn.cursor() + for i in range(2, self.numOfNodes + 1): + self.cursor.execute("create dnode tdnode%d" % i) + + def startArbitrator(self): + print("start arbitrator") + os.system("docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator") - def run(self): - os.system("./buildClusterEnv.sh -n 3 -v 2.0.14.1") - os.system("yes|taosdemo -h 172.27.0.7 -n 100 -t 100 -x") - os.system("python3 ../../concurrent_inquiry.py -H 172.27.0.7 -T 4 -t 4 -l 10") + if self.numOfNodes < 2 or self.numOfNodes > 5: + print("the number of nodes must be between 2 and 5") + exit(0) + os.system("./buildClusterEnv.sh -n %d -v %s" % (self.numOfNodes, self.clusterVersion)) + self.getConnection() + self.createDondes() + self.startArbitrator() + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host', + action='store', + default='tdnode1', + type=str, + help='host name to be connected (default: tdnode1)') +parser.add_argument( + '-u', + '--user', + action='store', + default='root', + type=str, + help='user (default: root)') +parser.add_argument( + '-p', + '--password', + action='store', + default='taosdata', + type=str, + help='password (default: taosdata)') +parser.add_argument( + '-c', + '--config-dir', + action='store', + default='/etc/taos', + type=str, + help='configuration directory (default: /etc/taos)') +parser.add_argument( + '-n', + '--num-of-nodes', + action='store', + default=2, + type=int, + help='number of nodes in the cluster (default: 2, min: 2, max: 5)') +parser.add_argument( + '-v', + '--version', + action='store', + default='2.0.14.1', + type=str, + help='the version of the cluster to be build, Default is 2.0.14.1') -clusterTest = ClusterTestcase() -clusterTest.run() \ No newline at end of file +args = parser.parse_args() +cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version) +cluster.run() \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh index 973d000a0ad582d3aba90fc1f7ff8f7d88812473..968cdd1c1c81b9f6dba68bc2cca542038ada8606 100755 --- a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh +++ b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh @@ -27,18 +27,28 @@ do esac done +function addTaoscfg { + for i in {1..5} + do + touch /data/node$i/cfg/taos.cfg + echo 'firstEp tdnode1:6030' > /data/node$i/cfg/taos.cfg + echo 'fqdn tdnode'$i >> /data/node$i/cfg/taos.cfg + echo 'arbitrator tdnode1:6042' >> /data/node$i/cfg/taos.cfg + done +} function createDIR { - for i in {1.. $2} + for i in {1..5} do mkdir -p /data/node$i/data mkdir -p /data/node$i/log mkdir -p /data/node$i/cfg + mkdir -p /data/node$i/core done } function cleanEnv { - for i in {1..3} + for i in {1..5} do echo /data/node$i/data/* rm -rf /data/node$i/data/* @@ -54,49 +64,60 @@ function prepareBuild { rm -rf $CURR_DIR/../../../../release/* fi - cd $CURR_DIR/../../../../packaging - ./release.sh -v edge -n $VERSION >> /dev/null + if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + cd $CURR_DIR/../../../../packaging + echo "generating TDeninger packages" + ./release.sh -v edge -n $VERSION >> /dev/null - if [ ! -f $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then - echo "no TDengine install package found" - exit 1 - fi + if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then + echo "no TDengine install package found" + exit 1 + fi + + if [ ! -e $CURR_DIR/../../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + echo "no arbitrator install package found" + exit 1 + fi - cd $CURR_DIR/../../../../release - mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + cd $CURR_DIR/../../../../release + mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + fi rm -rf $DOCKER_DIR/*.yml cd $CURR_DIR - cp docker-compose.yml $DOCKER_DIR + cp *.yml $DOCKER_DIR cp Dockerfile $DOCKER_DIR - - if [ $NUM_OF_NODES -eq 4 ]; then - cp ../node4.yml $DOCKER_DIR - fi - - if [ $NUM_OF_NODES -eq 5 ]; then - cp ../node5.yml $DOCKER_DIR - fi } function clusterUp { + echo "docker compose start" - cd $DOCKER_DIR + cd $DOCKER_DIR + + if [ $NUM_OF_NODES -eq 2 ]; then + echo "create 2 dnodes" + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d + fi if [ $NUM_OF_NODES -eq 3 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d fi if [ $NUM_OF_NODES -eq 4 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node4.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d fi if [ $NUM_OF_NODES -eq 5 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node4.yml -f node5.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d fi + + echo "docker compose finish" } -cleanEnv +createDIR +cleanEnv +addTaoscfg prepareBuild clusterUp \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml b/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml index c45a09582b5cd46a20ce052f3f535def67c46232..cb35abd9a1497c92dee10e1e6fb95027fb21710c 100644 --- a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml +++ b/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml @@ -1,14 +1,16 @@ version: '3.7' services: - td2.0-node1: - build: + td2.0-node1: + build: context: . args: - PACKAGE=${PACKAGE} + - TARBITRATORPKG=${TARBITRATORPKG} - EXTRACTDIR=${DIR} + - EXTRACTDIR2=${DIR2} image: 'tdengine:${VERSION}' - container_name: 'td2.0-node1' + container_name: 'tdnode1' cap_add: - ALL stdin_open: true @@ -18,7 +20,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,14 +42,18 @@ services: - type: bind source: /data/node1/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node1/core + target: /coredump - type: bind source: /data target: /root - hostname: node1 + hostname: tdnode1 networks: taos_update_net: ipv4_address: 172.27.0.7 - command: taosd + command: taosd td2.0-node2: build: @@ -48,7 +62,7 @@ services: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} image: 'tdengine:${VERSION}' - container_name: 'td2.0-node2' + container_name: 'tdnode2' cap_add: - ALL stdin_open: true @@ -58,7 +72,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode1:172.27.0.7" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -72,52 +94,19 @@ services: - type: bind source: /data/node2/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node2/core + target: /coredump - type: bind source: /data target: /root + hostname: tdnode2 networks: taos_update_net: ipv4_address: 172.27.0.8 command: taosd - td2.0-node3: - build: - context: . - args: - - PACKAGE=${PACKAGE} - - EXTRACTDIR=${DIR} - image: 'tdengine:${VERSION}' - container_name: 'td2.0-node3' - cap_add: - - ALL - stdin_open: true - tty: true - environment: - TZ: "Asia/Shanghai" - command: > - sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && - echo $TZ > /etc/timezone && - exec my-main-application" - volumes: - # bind data directory - - type: bind - source: /data/node3/data - target: /var/lib/taos - # bind log directory - - type: bind - source: /data/node3/log - target: /var/log/taos - # bind configuration - - type: bind - source: /data/node3/cfg - target: /etc/taos - - type: bind - source: /data - target: /root - networks: - taos_update_net: - ipv4_address: 172.27.0.9 - command: taosd networks: taos_update_net: diff --git a/src/kit/taosdemox/insert.json b/tests/pytest/cluster/clusterEnvSetup/insert.json similarity index 96% rename from src/kit/taosdemox/insert.json rename to tests/pytest/cluster/clusterEnvSetup/insert.json index aa071c115d60d78797b4c36456adfacd0d345af7..56a64b7b8561877cb26b4ef2336ab8b98f26c02c 100644 --- a/src/kit/taosdemox/insert.json +++ b/tests/pytest/cluster/clusterEnvSetup/insert.json @@ -8,6 +8,7 @@ "thread_count": 4, "thread_count_create_tbl": 1, "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", "databases": [{ "dbinfo": { "name": "db", diff --git a/tests/pytest/cluster/clusterEnvSetup/node3.yml b/tests/pytest/cluster/clusterEnvSetup/node3.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f4f3a6f991f0ffff51265fee4c5a3b8941b5d85 --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/node3.yml @@ -0,0 +1,54 @@ +version: '3.7' + +services: + td2.0-node3: + build: + context: . + args: + - PACKAGE=${PACKAGE} + - EXTRACTDIR=${DIR} + image: 'tdengine:${VERSION}' + container_name: 'tdnode3' + cap_add: + - ALL + stdin_open: true + tty: true + environment: + TZ: "Asia/Shanghai" + command: > + sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && + echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && + exec my-main-application" + extra_hosts: + - "tdnode1:172.27.0.7" + - "tdnode2:172.27.0.8" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" + volumes: + # bind data directory + - type: bind + source: /data/node3/data + target: /var/lib/taos + # bind log directory + - type: bind + source: /data/node3/log + target: /var/log/taos + # bind configuration + - type: bind + source: /data/node3/cfg + target: /etc/taos + # bind core dump path + - type: bind + source: /data/node3/core + target: /coredump + - type: bind + source: /data + target: /root + hostname: tdnode3 + networks: + taos_update_net: + ipv4_address: 172.27.0.9 + command: taosd \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/node4.yml b/tests/pytest/cluster/clusterEnvSetup/node4.yml index 542dc4cac1626ebc4387e6138067a5d91d64434d..c82a174cb883b14c885de7c5e8f19d98263b22b7 100644 --- a/tests/pytest/cluster/clusterEnvSetup/node4.yml +++ b/tests/pytest/cluster/clusterEnvSetup/node4.yml @@ -7,8 +7,8 @@ services: args: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} - image: 'tdengine:2.0.13.1' - container_name: 'td2.0-node4' + image: 'tdengine:${VERSION}' + container_name: 'tdnode4' cap_add: - ALL stdin_open: true @@ -18,7 +18,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,9 +40,14 @@ services: - type: bind source: /data/node4/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node4/core + target: /coredump - type: bind source: /data - target: /root + target: /root + hostname: tdnode4 networks: taos_update_net: ipv4_address: 172.27.0.10 diff --git a/tests/pytest/cluster/clusterEnvSetup/node5.yml b/tests/pytest/cluster/clusterEnvSetup/node5.yml index 832cc65e0888e5fc51feae7f13a7cd24b813878b..2e37e47512430ac99244f6b2f0e2d309a2145edc 100644 --- a/tests/pytest/cluster/clusterEnvSetup/node5.yml +++ b/tests/pytest/cluster/clusterEnvSetup/node5.yml @@ -7,8 +7,8 @@ services: args: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} - image: 'tdengine:2.0.13.1' - container_name: 'td2.0-node5' + image: 'tdengine:${VERSION}' + container_name: 'tdnode5' cap_add: - ALL stdin_open: true @@ -18,7 +18,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,9 +40,14 @@ services: - type: bind source: /data/node5/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node5/core + target: /coredump - type: bind source: /data - target: /root + target: /root + hostname: tdnode5 networks: taos_update_net: ipv4_address: 172.27.0.11 diff --git a/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..457dd4ee5aa5919951adbcea834d34cd367d3080 --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py @@ -0,0 +1,142 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import random +import argparse + +class taosdemoWrapper: + + def __init__(self, host, metadata, database, tables, threads, configDir, replica, + columnType, columnsPerTable, rowsPerTable, disorderRatio, disorderRange, charTypeLen): + self.host = host + self.metadata = metadata + self.database = database + self.tables = tables + self.threads = threads + self.configDir = configDir + self.replica = replica + self.columnType = columnType + self.columnsPerTable = columnsPerTable + self.rowsPerTable = rowsPerTable + self.disorderRatio = disorderRatio + self.disorderRange = disorderRange + self.charTypeLen = charTypeLen + + def run(self): + if self.metadata is None: + os.system("taosdemo -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y" + % (self.host, self.database, self.tables, self.threads, self.configDir, self.replica, self.columnType, + self.rowsPerTable, self.disorderRatio, self.disorderRange, self.charTypeLen)) + else: + os.system("taosdemo -f %s" % self.metadata) + + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='tdnode1', + type=str, + help='host name to be connected (default: tdnode1)') +parser.add_argument( + '-f', + '--metadata', + action='store', + default=None, + type=str, + help='The meta data to execution procedure, if use -f, all other options invalid, Default is NULL') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--num-of-tables', + action='store', + default=10, + type=int, + help='Number of tables (default: 10000)') +parser.add_argument( + '-T', + '--num-of-threads', + action='store', + default=10, + type=int, + help='Number of rest threads (default: 10)') +parser.add_argument( + '-c', + '--config-dir', + action='store', + default='/etc/taos/', + type=str, + help='Configuration directory. (Default is /etc/taos/)') +parser.add_argument( + '-a', + '--replica', + action='store', + default=100, + type=int, + help='Set the replica parameters of the database (default: 1, min: 1, max: 3)') +parser.add_argument( + '-b', + '--column-type', + action='store', + default='int', + type=str, + help='the data_type of columns (default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP)') +parser.add_argument( + '-l', + '--num-of-cols', + action='store', + default=10, + type=int, + help='The number of columns per record (default: 10)') +parser.add_argument( + '-n', + '--num-of-rows', + action='store', + default=1000, + type=int, + help='Number of subtales per stable (default: 1000)') +parser.add_argument( + '-O', + '--disorder-ratio', + action='store', + default=0, + type=int, + help=' (0: in order, > 0: disorder ratio, default: 0)') +parser.add_argument( + '-R', + '--disorder-range', + action='store', + default=0, + type=int, + help='Out of order datas range, ms (default: 1000)') +parser.add_argument( + '-w', + '--char-type-length', + action='store', + default=16, + type=int, + help='Out of order datas range, ms (default: 16)') + +args = parser.parse_args() +taosdemo = taosdemoWrapper(args.host_name, args.metadata, args.db_name, args.num_of_tables, + args.num_of_threads, args.config_dir, args.replica, args.column_type, args.num_of_cols, + args.num_of_rows, args.disorder_ratio, args.disorder_range, args.char_type_length) +taosdemo.run() diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index dbda5657b6528402cc898762cccc78d74a196cd8..8a264270219232447a598768b45a37779edba698 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -11,15 +11,9 @@ # -*- coding: utf-8 -*- -import os -import sys -sys.path.insert(0, os.getcwd()) from fabric import Connection -from util.sql import * -from util.log import * -import taos import random -import threading +import time import logging class Node: @@ -76,6 +70,19 @@ class Node: print("remove taosd error for node %d " % self.index) logging.exception(e) + def forceStopOneTaosd(self): + try: + self.conn.run("kill -9 $(ps -ax|grep taosd|awk '{print $1}')") + except Exception as e: + print("kill taosd error on node%d " % self.index) + + def startOneTaosd(self): + try: + self.conn.run("nohup taosd -c /etc/taos/ > /dev/null 2>&1 &") + except Exception as e: + print("start taosd error on node%d " % self.index) + logging.exception(e) + def installTaosd(self, packagePath): self.conn.put(packagePath, self.homeDir) self.conn.cd(self.homeDir) @@ -122,100 +129,51 @@ class Node: class Nodes: def __init__(self): - self.node1 = Node(1, 'root', '52.151.60.239', 'node1', 'r', '/root/') - self.node2 = Node(2, 'root', '52.183.32.246', 'node1', 'r', '/root/') - self.node3 = Node(3, 'root', '51.143.46.79', 'node1', 'r', '/root/') - self.node4 = Node(4, 'root', '52.183.2.76', 'node1', 'r', '/root/') - self.node5 = Node(5, 'root', '13.66.225.87', 'node1', 'r', '/root/') + self.tdnodes = [] + self.tdnodes.append(Node(0, 'root', '52.143.103.7', 'node1', 'a', '/root/')) + self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/')) + self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/')) + self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/')) + self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/')) + + def stopOneNode(self, index): + self.tdnodes[index].forceStopOneTaosd() + + def startOneNode(self, index): + self.tdnodes[index].startOneTaosd() def stopAllTaosd(self): - self.node1.stopTaosd() - self.node2.stopTaosd() - self.node3.stopTaosd() - + for i in range(len(self.tdnodes)): + self.tdnodes[i].stopTaosd() + def startAllTaosd(self): - self.node1.startTaosd() - self.node2.startTaosd() - self.node3.startTaosd() + for i in range(len(self.tdnodes)): + self.tdnodes[i].startTaosd() def restartAllTaosd(self): - self.node1.restartTaosd() - self.node2.restartTaosd() - self.node3.restartTaosd() + for i in range(len(self.tdnodes)): + self.tdnodes[i].restartTaosd() def addConfigs(self, configKey, configValue): - self.node1.configTaosd(configKey, configValue) - self.node2.configTaosd(configKey, configValue) - self.node3.configTaosd(configKey, configValue) + for i in range(len(self.tdnodes)): + self.tdnodes[i].configTaosd(configKey, configValue) - def removeConfigs(self, configKey, configValue): - self.node1.removeTaosConfig(configKey, configValue) - self.node2.removeTaosConfig(configKey, configValue) - self.node3.removeTaosConfig(configKey, configValue) + def removeConfigs(self, configKey, configValue): + for i in range(len(self.tdnodes)): + self.tdnodes[i].removeTaosConfig(configKey, configValue) def removeAllDataFiles(self): - self.node1.removeData() - self.node2.removeData() - self.node3.removeData() - -class ClusterTest: - def __init__(self, hostName): - self.host = hostName - self.user = "root" - self.password = "taosdata" - self.config = "/etc/taos" - self.dbName = "mytest" - self.stbName = "meters" - self.numberOfThreads = 20 - self.numberOfTables = 10000 - self.numberOfRecords = 1000 - self.tbPrefix = "t" - self.ts = 1538548685000 - self.repeat = 1 - - def connectDB(self): - self.conn = taos.connect( - host=self.host, - user=self.user, - password=self.password, - config=self.config) - - def createSTable(self, replica): - cursor = self.conn.cursor() - tdLog.info("drop database if exists %s" % self.dbName) - cursor.execute("drop database if exists %s" % self.dbName) - tdLog.info("create database %s replica %d" % (self.dbName, replica)) - cursor.execute("create database %s replica %d" % (self.dbName, replica)) - tdLog.info("use %s" % self.dbName) - cursor.execute("use %s" % self.dbName) - tdLog.info("drop table if exists %s" % self.stbName) - cursor.execute("drop table if exists %s" % self.stbName) - tdLog.info("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) - cursor.execute("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) - cursor.close() - - def insertData(self, threadID): - print("Thread %d: starting" % threadID) - cursor = self.conn.cursor() - tablesPerThread = int(self.numberOfTables / self.numberOfThreads) - baseTableID = tablesPerThread * threadID - for i in range (tablesPerThread): - cursor.execute("create table %s%d using %s tags(%d)" % (self.tbPrefix, baseTableID + i, self.stbName, baseTableID + i)) - query = "insert into %s%d values" % (self.tbPrefix, baseTableID + i) - base = self.numberOfRecords * i - for j in range(self.numberOfRecords): - query += "(%d, %f, %d, %d)" % (self.ts + base + j, random.random(), random.randint(210, 230), random.randint(0, 10)) - cursor.execute(query) - cursor.close() - print("Thread %d: finishing" % threadID) - - def run(self): - threads = [] - tdLog.info("Inserting data") - for i in range(self.numberOfThreads): - thread = threading.Thread(target=self.insertData, args=(i,)) - threads.append(thread) - thread.start() - - for i in range(self.numberOfThreads): - threads[i].join() \ No newline at end of file + for i in range(len(self.tdnodes)): + self.tdnodes[i].removeData() + +# kill taosd randomly every 10 mins +nodes = Nodes() +loop = 0 +while True: + loop = loop + 1 + index = random.randint(0, 4) + print("loop: %d, kill taosd on node%d" %(loop, index)) + nodes.stopOneNode(index) + time.sleep(60) + nodes.startOneNode(index) + time.sleep(600) \ No newline at end of file diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index e832c9a74e1c8b6c42a882a59931bff6d481f445..333c2a0a578f6f399e742b0967db0185a22b5fde 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -40,7 +40,7 @@ class ConcurrentInquiry: # stableNum = 2,subtableNum = 1000,insertRows = 100): def __init__(self,ts,host,user,password,dbname, stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop, - stableNum ,subtableNum ,insertRows ,mix_table): + stableNum ,subtableNum ,insertRows ,mix_table, replay): self.n_numOfTherads = n_Therads self.r_numOfTherads = r_Therads self.ts=ts @@ -65,6 +65,7 @@ class ConcurrentInquiry: self.mix_table = mix_table self.max_ts = datetime.datetime.now() self.min_ts = datetime.datetime.now() - datetime.timedelta(days=5) + self.replay = replay def SetThreadsNum(self,num): self.numOfTherads=num @@ -349,18 +350,27 @@ class ConcurrentInquiry: cl.execute("create database if not exists %s;" %self.dbname) cl.execute("use %s" % self.dbname) for k in range(stableNum): - sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20)) \ - tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20))" % (self.stb_prefix+str(k)) + sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \ + tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20), t11 int unsigned , t12 smallint unsigned , t13 tinyint unsigned , t14 bigint unsigned)" % (self.stb_prefix+str(k)) cl.execute(sql) for j in range(subtableNum): - sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s')" % \ - (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j)) + if j % 100 == 0: + sql = "create table %s using %s tags(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % \ + (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k)) + else: + sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % \ + (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j), j%43, j%23 , j%17 , j%3167) print(sql) cl.execute(sql) for i in range(insertRows): - ret = cl.execute( - "insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s')" % - (self.subtb_prefix+str(k)+'_'+str(j),t0+i,i%100,i/2.0,i%41,i%51,i%53,i*1.0,i%2,'taos'+str(i),'涛思'+str(i))) + if i % 100 == 0 : + ret = cl.execute( + "insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % + (self.subtb_prefix+str(k)+'_'+str(j), t0+i)) + else: + ret = cl.execute( + "insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % + (self.subtb_prefix+str(k)+'_'+str(j), t0+i, i%100, i/2.0, i%41, i%51, i%53, i*1.0, i%2,'taos'+str(i),'涛思'+str(i), i%43, i%23 , i%17 , i%3167)) cl.close() conn.close() @@ -412,7 +422,7 @@ class ConcurrentInquiry: ) cl = conn.cursor() cl.execute("use %s;" % self.dbname) - + fo = open('bak_sql_n_%d'%threadID,'w+') print("Thread %d: starting" % threadID) loop = self.loop while loop: @@ -423,6 +433,7 @@ class ConcurrentInquiry: else: sql=self.gen_query_join() print("sql is ",sql) + fo.write(sql+'\n') start = time.time() cl.execute(sql) cl.fetchall() @@ -438,13 +449,49 @@ class ConcurrentInquiry: exit(-1) loop -= 1 if loop == 0: break - + fo.close() cl.close() conn.close() print("Thread %d: finishing" % threadID) + + def query_thread_nr(self,threadID): #使用原生python接口进行重放 + host = self.host + user = self.user + password = self.password + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + cl.execute("use %s;" % self.dbname) + replay_sql = [] + with open('bak_sql_n_%d'%threadID,'r') as f: + replay_sql = f.readlines() + print("Replay Thread %d: starting" % threadID) + for sql in replay_sql: + try: + print("sql is ",sql) + start = time.time() + cl.execute(sql) + cl.fetchall() + end = time.time() + print("time cost :",end-start) + except Exception as e: + print('-'*40) + print( + "Failure thread%d, sql: %s \nexception: %s" % + (threadID, str(sql),str(e))) + err_uec='Unable to establish connection' + if err_uec in str(e) and loop >0: + exit(-1) + cl.close() + conn.close() + print("Replay Thread %d: finishing" % threadID) def query_thread_r(self,threadID): #使用rest接口查询 print("Thread %d: starting" % threadID) + fo = open('bak_sql_r_%d'%threadID,'w+') loop = self.loop while loop: try: @@ -453,6 +500,7 @@ class ConcurrentInquiry: else: sql=self.gen_query_join() print("sql is ",sql) + fo.write(sql+'\n') start = time.time() self.rest_query(sql) end = time.time() @@ -467,20 +515,53 @@ class ConcurrentInquiry: exit(-1) loop -= 1 if loop == 0: break - - print("Thread %d: finishing" % threadID) + fo.close() + print("Thread %d: finishing" % threadID) + + def query_thread_rr(self,threadID): #使用rest接口重放 + print("Replay Thread %d: starting" % threadID) + replay_sql = [] + with open('bak_sql_r_%d'%threadID,'r') as f: + replay_sql = f.readlines() + + for sql in replay_sql: + try: + print("sql is ",sql) + start = time.time() + self.rest_query(sql) + end = time.time() + print("time cost :",end-start) + except Exception as e: + print('-'*40) + print( + "Failure thread%d, sql: %s \nexception: %s" % + (threadID, str(sql),str(e))) + err_uec='Unable to establish connection' + if err_uec in str(e) and loop >0: + exit(-1) + print("Replay Thread %d: finishing" % threadID) def run(self): print(self.n_numOfTherads,self.r_numOfTherads) threads = [] - for i in range(self.n_numOfTherads): - thread = threading.Thread(target=self.query_thread_n, args=(i,)) - threads.append(thread) - thread.start() - for i in range(self.r_numOfTherads): - thread = threading.Thread(target=self.query_thread_r, args=(i,)) - threads.append(thread) - thread.start() + if self.replay: #whether replay + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_nr, args=(i,)) + threads.append(thread) + thread.start() + for i in range(self.r_numOfTherads): + thread = threading.Thread(target=self.query_thread_rr, args=(i,)) + threads.append(thread) + thread.start() + else: + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_n, args=(i,)) + threads.append(thread) + thread.start() + for i in range(self.r_numOfTherads): + thread = threading.Thread(target=self.query_thread_r, args=(i,)) + threads.append(thread) + thread.start() parser = argparse.ArgumentParser() parser.add_argument( @@ -595,13 +676,20 @@ parser.add_argument( default=0, type=int, help='0:stable & substable ,1:subtable ,2:stable (default: 0)') +parser.add_argument( + '-R', + '--replay', + action='store', + default=0, + type=int, + help='0:not replay ,1:replay (default: 0)') args = parser.parse_args() q = ConcurrentInquiry( args.ts,args.host_name,args.user,args.password,args.db_name, args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads, args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records, - args.mix_stable_subtable ) + args.mix_stable_subtable, args.replay ) if args.create_table: q.gen_data() diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh new file mode 100755 index 0000000000000000000000000000000000000000..f426fbbcec3070789209eb787dba61d95571f0e5 --- /dev/null +++ b/tests/pytest/concurrent_inquiry.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# This is the script for us to try to cause the TDengine server or client to crash +# +# PREPARATION +# +# 1. Build an compile the TDengine source code that comes with this script, in the same directory tree +# 2. Please follow the direction in our README.md, and build TDengine in the build/ directory +# 3. Adjust the configuration file if needed under build/test/cfg/taos.cfg +# 4. Run the TDengine server instance: cd build; ./build/bin/taosd -c test/cfg +# 5. Make sure you have a working Python3 environment: run /usr/bin/python3 --version, and you should get 3.6 or above +# 6. Make sure you have the proper Python packages: # sudo apt install python3-setuptools python3-pip python3-distutils +# +# RUNNING THIS SCRIPT +# +# This script assumes the source code directory is intact, and that the binaries has been built in the +# build/ directory, as such, will will load the Python libraries in the directory tree, and also load +# the TDengine client shared library (so) file, in the build/directory, as evidenced in the env +# variables below. +# +# Running the script is simple, no parameter is needed (for now, but will change in the future). +# +# Happy Crashing... + + +# Due to the heavy path name assumptions/usage, let us require that the user be in the current directory +EXEC_DIR=`dirname "$0"` +if [[ $EXEC_DIR != "." ]] +then + echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)" + exit -1 +fi + +CURR_DIR=`pwd` +IN_TDINTERNAL="community" +if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + TAOS_DIR=$CURR_DIR/../../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6,7|rev`/lib +else + TAOS_DIR=$CURR_DIR/../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib +fi + +# Now getting ready to execute Python +# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk +PYTHON_EXEC=python3.8 + +# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. +export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) + +# Then let us set up the library path so that our compiled SO file can be loaded by Python +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR + +# Now we are all let, and let's see if we can find a crash. Note we pass all params +CONCURRENT_INQUIRY=concurrent_inquiry.py +if [[ $1 == '--valgrind' ]]; then + shift + export PYTHONMALLOC=malloc + VALGRIND_OUT=valgrind.out + VALGRIND_ERR=valgrind.err + # How to generate valgrind suppression file: https://stackoverflow.com/questions/17159578/generating-suppressions-for-memory-leaks + # valgrind --leak-check=full --gen-suppressions=all --log-fd=9 python3.8 ./concurrent_inquiry.py $@ 9>>memcheck.log + echo Executing under VALGRIND, with STDOUT/ERR going to $VALGRIND_OUT and $VALGRIND_ERR, please watch them from a different terminal. + valgrind \ + --leak-check=yes \ + --suppressions=crash_gen/valgrind_taos.supp \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $VALGRIND_OUT 2> $VALGRIND_ERR +elif [[ $1 == '--helgrind' ]]; then + shift + HELGRIND_OUT=helgrind.out + HELGRIND_ERR=helgrind.err + valgrind \ + --tool=helgrind \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $HELGRIND_OUT 2> $HELGRIND_ERR +else + $PYTHON_EXEC $CONCURRENT_INQUIRY $@ +fi + diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index c6b857b097671b8faf650f4659c2b2990ef67d97..309c0df9108f340bf96d73529ccf8bb49c1c9692 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -354,10 +354,11 @@ class ThreadCoordinator: # end, and maybe signal them to stop if isinstance(err, CrashGenError): # our own transition failure Logging.info("State transition error") + # TODO: saw an error here once, let's print out stack info for err? traceback.print_stack() transitionFailed = True self._te = None # Not running any more - self._execStats.registerFailure("State transition error") + self._execStats.registerFailure("State transition error: {}".format(err)) else: raise # return transitionFailed # Why did we have this??!! @@ -882,8 +883,12 @@ class StateMechine: self._stateWeights = [1, 2, 10, 40] def init(self, dbc: DbConn): # late initailization, don't save the dbConn - self._curState = self._findCurrentState(dbc) # starting state - Logging.debug("Found Starting State: {}".format(self._curState)) + try: + self._curState = self._findCurrentState(dbc) # starting state + except taos.error.ProgrammingError as err: + Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) + traceback.print_stack() + raise # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -951,6 +956,8 @@ class StateMechine: # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): + global gSvcMgr + if (len(tasks) == 0): # before 1st step, or otherwise empty Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing @@ -1276,6 +1283,7 @@ class Task(): 0x510, # vnode not in ready state 0x14, # db not ready, errno changed 0x600, # Invalid table ID, why? + 0x218, # Table does not exist 1000 # REST catch-all error ]: return True # These are the ALWAYS-ACCEPTABLE ones @@ -2369,7 +2377,7 @@ class MainExec: '-n', '--dynamic-db-table-names', action='store_true', - help='Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false)') + help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') parser.add_argument( '-o', '--num-dnodes', diff --git a/tests/pytest/crash_gen/db.py b/tests/pytest/crash_gen/db.py index dc072d7abce68debd69cb162a4d784fcc5b68c4e..e38692dbe1e5c33ffe162015e3e60630fd51fa38 100644 --- a/tests/pytest/crash_gen/db.py +++ b/tests/pytest/crash_gen/db.py @@ -15,6 +15,7 @@ from util.log import * from .misc import Logging, CrashGenError, Helper, Dice import os import datetime +import traceback # from .service_manager import TdeInstance class DbConn: @@ -349,6 +350,7 @@ class DbConnNative(DbConn): def execute(self, sql): if (not self.isOpen): + traceback.print_stack() raise CrashGenError( "Cannot exec SQL unless db connection is open", CrashGenError.DB_CONNECTION_NOT_OPEN) Logging.debug("[SQL] Executing SQL: {}".format(sql)) @@ -361,6 +363,7 @@ class DbConnNative(DbConn): def query(self, sql): # return rows affected if (not self.isOpen): + traceback.print_stack() raise CrashGenError( "Cannot query database until connection is open, restarting?", CrashGenError.DB_CONNECTION_NOT_OPEN) Logging.debug("[SQL] Executing SQL: {}".format(sql)) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index aee8f7502c354628d9cb0ca9df55ab4a7ad17c6f..14f2b6e07e39322a3ce62f6176034fd2fba9edbc 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -3,12 +3,16 @@ ulimit -c unlimited python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py +python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/float.py python3 ./test.py -f insert/bigint.py +python3 ./test.py -f insert/unsignedBigint.py python3 ./test.py -f insert/bool.py python3 ./test.py -f insert/double.py python3 ./test.py -f insert/smallint.py +python3 ./test.py -f insert/unsignedSmallint.py python3 ./test.py -f insert/tinyint.py +python3 ./test.py -f insert/unsignedTinyint.py python3 ./test.py -f insert/date.py python3 ./test.py -f insert/binary.py python3 ./test.py -f insert/nchar.py @@ -22,6 +26,7 @@ python3 ./test.py -f insert/insertIntoTwoTables.py #python3 ./test.py -f insert/before_1970.py python3 ./test.py -f insert/metadataUpdate.py python3 bug2265.py +python3 ./test.py -f insert/boundary2.py #table python3 ./test.py -f table/alter_wal0.py @@ -66,7 +71,7 @@ python3 ./test.py -f tag_lite/int.py python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py - +python3 ./test.py -f tag_lite/alter_tag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 ./test.py -f import_merge/importBlock1HO.py @@ -150,6 +155,7 @@ python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/queryNormal.py python3 ./test.py -f query/queryError.py python3 ./test.py -f query/filterAllIntTypes.py +python3 ./test.py -f query/filterAllUnsignedIntTypes.py python3 ./test.py -f query/filterFloatAndDouble.py python3 ./test.py -f query/filterOtherTypes.py python3 ./test.py -f query/querySort.py @@ -193,6 +199,7 @@ python3 ./test.py -f stream/table_n.py #alter table python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f alter/alter_debugFlag.py # client python3 ./test.py -f client/client.py diff --git a/tests/pytest/functions/function_avg.py b/tests/pytest/functions/function_avg.py index 9481550ba3f9d912b734d19b7c794c495fffdecd..c8a1cca77b1568b760b0208e0fa71583ae2e3645 100644 --- a/tests/pytest/functions/function_avg.py +++ b/tests/pytest/functions/function_avg.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -63,7 +63,15 @@ class TDTestCase: tdSql.query("select avg(col5) from test") tdSql.checkData(0, 0, np.average(floatData)) tdSql.query("select avg(col6) from test") - tdSql.checkData(0, 0, np.average(floatData)) + tdSql.checkData(0, 0, np.average(floatData)) + tdSql.query("select avg(col11) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col12) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col13) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col14) from test") + tdSql.checkData(0, 0, np.average(intData)) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_avg_restart.py b/tests/pytest/functions/function_avg_restart.py index 56b99cdf910869b5bc4f5bd93da208943b1b026e..60f3ba7d4e2708053425b4866e072516b5ff961c 100644 --- a/tests/pytest/functions/function_avg_restart.py +++ b/tests/pytest/functions/function_avg_restart.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] #tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') #tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - #tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + #tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -63,7 +63,15 @@ class TDTestCase: tdSql.query("select avg(col5) from test") tdSql.checkData(0, 0, np.average(floatData)) tdSql.query("select avg(col6) from test") - tdSql.checkData(0, 0, np.average(floatData)) + tdSql.checkData(0, 0, np.average(floatData)) + tdSql.query("select avg(col11) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col12) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col13) from test") + tdSql.checkData(0, 0, np.average(intData)) + tdSql.query("select avg(col14) from test") + tdSql.checkData(0, 0, np.average(intData)) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_bottom.py b/tests/pytest/functions/function_bottom.py index 3cc389221845b38d92ca130e086d50fddc344d6d..abb9ac48e70ac876741c3da2a9f223d537c8644b 100644 --- a/tests/pytest/functions/function_bottom.py +++ b/tests/pytest/functions/function_bottom.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # bottom verifacation tdSql.error("select bottom(ts, 10) from test") @@ -84,6 +84,26 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkData(0, 1, 0.1) tdSql.checkData(1, 1, 1.1) + + tdSql.query("select bottom(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) #TD-2457 bottom + interval + order by tdSql.error('select top(col2,1) from test interval(1y) order by col2;') diff --git a/tests/pytest/functions/function_bottom_restart.py b/tests/pytest/functions/function_bottom_restart.py index 74eb044645ffa6a54e286dd11d022672575a38db..b4d987975c3660e2ea875a03f3406eaebcafea6d 100644 --- a/tests/pytest/functions/function_bottom_restart.py +++ b/tests/pytest/functions/function_bottom_restart.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.execute("use db") #tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') #tdSql.execute("create table test1 using test tags('beijing')") #for i in range(self.rowNum): - # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # bottom verifacation tdSql.error("select bottom(ts, 10) from test") @@ -75,6 +75,26 @@ class TDTestCase: tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 2) + tdSql.query("select bottom(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + + tdSql.query("select bottom(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + tdSql.query("select bottom(col5, 2) from test") tdSql.checkRows(2) tdSql.checkData(0, 1, 0.1) diff --git a/tests/pytest/functions/function_count.py b/tests/pytest/functions/function_count.py index 4795a6b2dfe72bb9fa747b550be9f56c0c255d88..9812473d6496a94447734f1f3507ac806e392294 100644 --- a/tests/pytest/functions/function_count.py +++ b/tests/pytest/functions/function_count.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # Count verifacation tdSql.query("select count(*) from test") @@ -62,11 +62,20 @@ class TDTestCase: tdSql.query("select count(col9) from test") tdSql.checkData(0, 0, 10) + tdSql.query("select count(col11) from test") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(col12) from test") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(col13) from test") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(col14) from test") + tdSql.checkData(0, 0, 10) + tdSql.execute("alter table test add column col10 int") tdSql.query("select count(col10) from test") tdSql.checkRows(0) - tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' 1)") + tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' , 1, 1, 1, 1, 1)") tdSql.query("select count(col10) from test") tdSql.checkData(0, 0, 1) diff --git a/tests/pytest/functions/function_count_restart.py b/tests/pytest/functions/function_count_restart.py index 5eabb47d952a1a461b6fd5c976f3f2a8c294222b..565d85f4c0712b436e2dc24335aa13f60d3c87dd 100644 --- a/tests/pytest/functions/function_count_restart.py +++ b/tests/pytest/functions/function_count_restart.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.execute("use db") #tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') #tdSql.execute("create table test1 using test tags('beijing')") #for i in range(self.rowNum): - # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # Count verifacation tdSql.query("select count(*) from test") @@ -62,6 +62,15 @@ class TDTestCase: tdSql.query("select count(col9) from test") tdSql.checkData(0, 0, 11) + tdSql.query("select count(col11) from test") + tdSql.checkData(0, 0, 11) + tdSql.query("select count(col12) from test") + tdSql.checkData(0, 0, 11) + tdSql.query("select count(col13) from test") + tdSql.checkData(0, 0, 11) + tdSql.query("select count(col14) from test") + tdSql.checkData(0, 0, 11) + #tdSql.execute("alter table test add column col10 int") #tdSql.query("select count(col10) from test") #tdSql.checkRows(0) diff --git a/tests/pytest/functions/function_diff.py b/tests/pytest/functions/function_diff.py index b6f496d7d68ebaa0f19dfc947d7745802198a3dc..46ea3a86deee75d26f0b635ae0d3b2c5673fb6da 100644 --- a/tests/pytest/functions/function_diff.py +++ b/tests/pytest/functions/function_diff.py @@ -31,9 +31,9 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") - tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1)) + tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) # diff verifacation tdSql.query("select diff(col1) from test1") @@ -55,8 +55,8 @@ class TDTestCase: tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.error("select diff(ts) from test") tdSql.error("select diff(ts) from test1") @@ -72,7 +72,16 @@ class TDTestCase: tdSql.error("select diff(col8) from test1") tdSql.error("select diff(col9) from test") tdSql.error("select diff(col9) from test1") - + tdSql.error("select diff(col11) from test1") + tdSql.error("select diff(col12) from test1") + tdSql.error("select diff(col13) from test1") + tdSql.error("select diff(col14) from test1") + tdSql.error("select diff(col11) from test") + tdSql.error("select diff(col12) from test") + tdSql.error("select diff(col13) from test") + tdSql.error("select diff(col14) from test") + + tdSql.query("select diff(col1) from test1") tdSql.checkRows(10) diff --git a/tests/pytest/functions/function_diff_restart.py b/tests/pytest/functions/function_diff_restart.py index 870ee03fc94cbd8dbf5e858726cd52b09010a884..e03b892477eba46c007163049d91012af236e730 100644 --- a/tests/pytest/functions/function_diff_restart.py +++ b/tests/pytest/functions/function_diff_restart.py @@ -31,7 +31,7 @@ class TDTestCase: tdSql.execute("use db") #tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') #tdSql.execute("create table test1 using test tags('beijing')") #tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1)) @@ -55,8 +55,8 @@ class TDTestCase: #tdSql.checkRows(0) #for i in range(self.rowNum): - # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + # tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + # % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.error("select diff(ts) from test") tdSql.error("select diff(ts) from test1") @@ -71,7 +71,14 @@ class TDTestCase: tdSql.error("select diff(col8) from test") tdSql.error("select diff(col8) from test1") tdSql.error("select diff(col9) from test") - tdSql.error("select diff(col9) from test1") + tdSql.error("select diff(col11) from test1") + tdSql.error("select diff(col12) from test1") + tdSql.error("select diff(col13) from test1") + tdSql.error("select diff(col14) from test1") + tdSql.error("select diff(col11) from test") + tdSql.error("select diff(col12) from test") + tdSql.error("select diff(col13) from test") + tdSql.error("select diff(col14) from test") tdSql.query("select diff(col1) from test1") tdSql.checkRows(10) diff --git a/tests/pytest/functions/function_first.py b/tests/pytest/functions/function_first.py index 5b2aacb779a64377baa0417d5ba0ba5ef0a27290..f1a916b168271e67d0001c5d4444966f8c07a2d1 100644 --- a/tests/pytest/functions/function_first.py +++ b/tests/pytest/functions/function_first.py @@ -31,7 +31,7 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) @@ -52,6 +52,18 @@ class TDTestCase: tdSql.query("select first(col4) from test1") tdSql.checkRows(0) + tdSql.query("select first(col11) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(0) + tdSql.query("select first(col5) from test1") tdSql.checkRows(0) @@ -68,8 +80,8 @@ class TDTestCase: tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.query("select first(*) from test1") tdSql.checkRows(1) @@ -91,6 +103,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 1) + tdSql.query("select first(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.query("select first(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 0.1) diff --git a/tests/pytest/functions/function_last.py b/tests/pytest/functions/function_last.py index 337c18a28c2cd99d556e5b017377edc56671c490..991ac96a800803440a2e662c163622af95c556e3 100644 --- a/tests/pytest/functions/function_last.py +++ b/tests/pytest/functions/function_last.py @@ -31,7 +31,7 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) @@ -52,6 +52,18 @@ class TDTestCase: tdSql.query("select last(col4) from test1") tdSql.checkRows(0) + tdSql.query("select last(col11) from test1") + tdSql.checkRows(0) + + tdSql.query("select last(col12) from test1") + tdSql.checkRows(0) + + tdSql.query("select last(col13) from test1") + tdSql.checkRows(0) + + tdSql.query("select last(col14) from test1") + tdSql.checkRows(0) + tdSql.query("select last(col5) from test1") tdSql.checkRows(0) @@ -68,8 +80,8 @@ class TDTestCase: tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.query("select last(*) from test1") tdSql.checkRows(1) @@ -91,6 +103,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 10) + tdSql.query("select last(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1) diff --git a/tests/pytest/functions/function_last_row.py b/tests/pytest/functions/function_last_row.py index ea5cf661eb9e70e2ed06ebd712b2f65dfa62beeb..d075eeeca55c7e6575ab9bd526ddb6177bf61791 100644 --- a/tests/pytest/functions/function_last_row.py +++ b/tests/pytest/functions/function_last_row.py @@ -31,7 +31,7 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) @@ -56,6 +56,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, None) + tdSql.query("select last_row(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query("select last_row(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query("select last_row(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query("select last_row(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + tdSql.query("select last_row(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, None) @@ -77,8 +93,8 @@ class TDTestCase: tdSql.checkData(0, 0, None) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.query("select last_row(*) from test1") tdSql.checkRows(1) @@ -100,6 +116,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 10) + tdSql.query("select last_row(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last_row(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last_row(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select last_row(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last_row(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1) diff --git a/tests/pytest/functions/function_leastsquares.py b/tests/pytest/functions/function_leastsquares.py index 0ef0a2b78612dca604f7a7a7f70841c917376a17..3b2a8486d5e6d8bbc2464b8ee391181e10e6a285 100644 --- a/tests/pytest/functions/function_leastsquares.py +++ b/tests/pytest/functions/function_leastsquares.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # leastsquares verifacation tdSql.error("select leastsquares(ts, 1, 1) from test1") @@ -48,6 +48,10 @@ class TDTestCase: tdSql.error("select leastsquares(col7, 1, 1) from test1") tdSql.error("select leastsquares(col8, 1, 1) from test1") tdSql.error("select leastsquares(col9, 1, 1) from test1") + tdSql.error("select leastsquares(col11, 1, 1) from test") + tdSql.error("select leastsquares(col12, 1, 1) from test") + tdSql.error("select leastsquares(col13, 1, 1) from test") + tdSql.error("select leastsquares(col14, 1, 1) from test") tdSql.query("select leastsquares(col1, 1, 1) from test1") tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') @@ -61,6 +65,18 @@ class TDTestCase: tdSql.query("select leastsquares(col4, 1, 1) from test1") tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + tdSql.query("select leastsquares(col11, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col12, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col13, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col14, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + tdSql.query("select leastsquares(col5, 1, 1) from test1") tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}') diff --git a/tests/pytest/functions/function_leastsquares_restart.py b/tests/pytest/functions/function_leastsquares_restart.py index 0221dc0aed865882e375f90c44f52b391d1ffee5..8c38d5dd95ccd4537af623253b08fb2253e4d140 100644 --- a/tests/pytest/functions/function_leastsquares_restart.py +++ b/tests/pytest/functions/function_leastsquares_restart.py @@ -54,6 +54,18 @@ class TDTestCase: tdSql.query("select leastsquares(col4, 1, 1) from test1") tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + tdSql.query("select leastsquares(col11, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col12, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col13, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + + tdSql.query("select leastsquares(col14, 1, 1) from test1") + tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}') + tdSql.query("select leastsquares(col5, 1, 1) from test1") tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}') diff --git a/tests/pytest/functions/function_max.py b/tests/pytest/functions/function_max.py index d1f8f75892ce2e61388c107eb7cad1ed00582d2f..c322b6af26362949f7e8b45a58e4c9cef3d9de19 100644 --- a/tests/pytest/functions/function_max.py +++ b/tests/pytest/functions/function_max.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -63,6 +63,18 @@ class TDTestCase: tdSql.query("select max(col4) from test1") tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col11) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col12) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col13) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col14) from test1") + tdSql.checkData(0, 0, np.max(intData)) tdSql.query("select max(col5) from test1") tdSql.checkData(0, 0, np.max(floatData)) diff --git a/tests/pytest/functions/function_max_restart.py b/tests/pytest/functions/function_max_restart.py index 8d1827561758f1ceebffab877aabd6cd79166d42..a04fcedd8b908b030932d5d182b8cf20c28e2685 100644 --- a/tests/pytest/functions/function_max_restart.py +++ b/tests/pytest/functions/function_max_restart.py @@ -34,7 +34,7 @@ class TDTestCase: floatData = [] #tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - # col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + # col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') #tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): intData.append(i + 1) @@ -61,6 +61,18 @@ class TDTestCase: tdSql.query("select max(col4) from test1") tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col11) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col12) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col13) from test1") + tdSql.checkData(0, 0, np.max(intData)) + + tdSql.query("select max(col14) from test1") + tdSql.checkData(0, 0, np.max(intData)) tdSql.query("select max(col5) from test1") tdSql.checkData(0, 0, np.max(floatData)) diff --git a/tests/pytest/functions/function_min.py b/tests/pytest/functions/function_min.py index c779744ced63d83fd5b2116fb61dfa347ec4dd5a..b4d6d58f7ce01b97dd7f5f569409a0288fb0389b 100644 --- a/tests/pytest/functions/function_min.py +++ b/tests/pytest/functions/function_min.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -63,6 +63,18 @@ class TDTestCase: tdSql.query("select min(col4) from test1") tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col11) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col12) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col13) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col14) from test1") + tdSql.checkData(0, 0, np.min(intData)) tdSql.query("select min(col5) from test1") tdSql.checkData(0, 0, np.min(floatData)) diff --git a/tests/pytest/functions/function_min_restart.py b/tests/pytest/functions/function_min_restart.py index c8329bffa5525c2386d264010acf7231059e344d..fc2cb761cae164c8c04992768133d1e00a16fccb 100644 --- a/tests/pytest/functions/function_min_restart.py +++ b/tests/pytest/functions/function_min_restart.py @@ -58,6 +58,18 @@ class TDTestCase: tdSql.query("select min(col4) from test1") tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col11) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col12) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col13) from test1") + tdSql.checkData(0, 0, np.min(intData)) + + tdSql.query("select min(col14) from test1") + tdSql.checkData(0, 0, np.min(intData)) tdSql.query("select min(col5) from test1") tdSql.checkData(0, 0, np.min(floatData)) diff --git a/tests/pytest/functions/function_operations.py b/tests/pytest/functions/function_operations.py index 930c23d8a60d84bcc57fa8952e4d8c8e847ec138..e703147b6722f848263f170b89ce9f972f7a2435 100644 --- a/tests/pytest/functions/function_operations.py +++ b/tests/pytest/functions/function_operations.py @@ -31,11 +31,11 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # min verifacation tdSql.error("select ts + col1 from test") @@ -51,9 +51,9 @@ class TDTestCase: tdSql.checkRows(10) tdSql.checkData(0, 0, 2.0) - tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1") + tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1") tdSql.checkRows(10) - tdSql.checkData(0, 0, 3.2) + tdSql.checkData(0, 0, 7.2) tdSql.execute("insert into test1(ts, col1) values(%d, 11)" % (self.ts + 11)) tdSql.query("select col1 + col2 from test1") @@ -64,11 +64,42 @@ class TDTestCase: tdSql.checkRows(11) tdSql.checkData(10, 0, None) - tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1") + tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1") tdSql.checkRows(11) tdSql.checkData(10, 0, None) + # test for tarithoperator.c coverage + tdSql.execute("insert into test1 values(1537146000010,1,NULL,9,8,1.2,1.3,0,1,1,5,4,3,2)") + tdSql.execute("insert into test1 values(1537146000011,2,1,NULL,9,1.2,1.3,1,2,2,6,5,4,3)") + tdSql.execute("insert into test1 values(1537146000012,3,2,1,NULL,1.2,1.3,0,3,3,7,6,5,4)") + tdSql.execute("insert into test1 values(1537146000013,4,3,2,1,1.2,1.3,1,4,4,8,7,6,5)") + tdSql.execute("insert into test1 values(1537146000014,5,4,3,2,1.2,1.3,0,5,5,9,8,7,6)") + tdSql.execute("insert into test1 values(1537146000015,6,5,4,3,1.2,1.3,1,6,6,NULL,9,8,7)") + tdSql.execute("insert into test1 values(1537146000016,7,6,5,4,1.2,1.3,0,7,7,1,NULL,9,8)") + tdSql.execute("insert into test1 values(1537146000017,8,7,6,5,1.2,1.3,1,8,8,2,1,NULL,9)") + tdSql.execute("insert into test1 values(1537146000018,9,8,7,6,1.2,1.3,0,9,9,3,2,1,NULL)") + tdSql.execute("insert into test1 values(1537146000019,NULL,9,8,7,1.2,1.3,1,10,10,4,3,2,1)") + self.ts = self.ts + self.rowNum + 10 + + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 1 )) + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 )) + tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 )) + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 )) + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 )) + self.rowNum = self.rowNum + 5 + + col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' ] + op_list = [ '+' , '-' , '*' , '/' , '%' ] + err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ] + for i in col_list : + for j in col_list : + for k in op_list : + sql = " select %s %s %s from test1 " % ( i , k , j ) + if i in err_list or j in err_list: + tdSql.error(sql) + else: + tdSql.query(sql) def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/functions/function_operations_restart.py b/tests/pytest/functions/function_operations_restart.py index 6e3990264cb91d8c063fe638a5f77231ef7be424..3ba787a4a0f3f8af74f461f4e668078d726a428d 100644 --- a/tests/pytest/functions/function_operations_restart.py +++ b/tests/pytest/functions/function_operations_restart.py @@ -41,24 +41,24 @@ class TDTestCase: tdSql.error("select col1 + col9 from test1") tdSql.query("select col1 + col2 from test1") - tdSql.checkRows(11) + tdSql.checkRows(25) tdSql.checkData(0, 0, 2.0) - tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1") - tdSql.checkRows(11) - tdSql.checkData(0, 0, 3.2) + tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1") + tdSql.checkRows(25) + tdSql.checkData(0, 0, 7.2) #tdSql.execute("insert into test1(ts, col1) values(%d, 11)" % (self.ts + 11)) tdSql.query("select col1 + col2 from test1") - tdSql.checkRows(11) + tdSql.checkRows(25) tdSql.checkData(10, 0, None) tdSql.query("select col1 + col2 * col3 from test1") - tdSql.checkRows(11) + tdSql.checkRows(25) tdSql.checkData(10, 0, None) - tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1") - tdSql.checkRows(11) + tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1") + tdSql.checkRows(25) tdSql.checkData(10, 0, None) diff --git a/tests/pytest/functions/function_percentile.py b/tests/pytest/functions/function_percentile.py index 688e91eefa79348a88f37f3b7cb0cc1ac63152d8..9b5e209d33d1f8e97c5baf83898dd62746c11dd8 100644 --- a/tests/pytest/functions/function_percentile.py +++ b/tests/pytest/functions/function_percentile.py @@ -34,10 +34,10 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.rowNum): - tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -103,6 +103,58 @@ class TDTestCase: tdSql.query("select apercentile(col4, 100) from test") print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col11, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col11, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col11, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col12, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col12, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col12, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col12, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col12, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col12, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col13, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col13, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col13, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col13, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col13, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col13, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col14, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col14, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col14, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col14, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col14, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col14, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col5, 0) from test") print("query result: %s" % tdSql.getData(0, 0)) print("array result: %s" % np.percentile(floatData, 0)) diff --git a/tests/pytest/functions/function_percentile_restart.py b/tests/pytest/functions/function_percentile_restart.py index ade10282dedd655bc7ccb912a30a2a3e88a72223..5e6898784b55bbffb5e01946fff9dc60e281ebff 100644 --- a/tests/pytest/functions/function_percentile_restart.py +++ b/tests/pytest/functions/function_percentile_restart.py @@ -99,6 +99,58 @@ class TDTestCase: tdSql.query("select apercentile(col4, 100) from test") print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col11, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col11, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col11, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col11, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col12, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col12, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col12, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col12, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col12, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col12, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col13, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col13, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col13, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col13, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col13, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col13, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + + tdSql.query("select percentile(col14, 0) from test") + tdSql.checkData(0, 0, np.percentile(intData, 0)) + tdSql.query("select apercentile(col14, 0) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col14, 50) from test") + tdSql.checkData(0, 0, np.percentile(intData, 50)) + tdSql.query("select apercentile(col14, 50) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col14, 100) from test") + tdSql.checkData(0, 0, np.percentile(intData, 100)) + tdSql.query("select apercentile(col14, 100) from test") + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.query("select percentile(col5, 0) from test") print("query result: %s" % tdSql.getData(0, 0)) print("array result: %s" % np.percentile(floatData, 0)) diff --git a/tests/pytest/functions/function_spread.py b/tests/pytest/functions/function_spread.py index 462933597388090f069f9319751aaea032ea4133..b40f57b78da0f937e06bd304764c6b660f82488e 100644 --- a/tests/pytest/functions/function_spread.py +++ b/tests/pytest/functions/function_spread.py @@ -31,9 +31,9 @@ class TDTestCase: tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") - tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1)) + tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) # spread verifacation tdSql.query("select spread(ts) from test1") @@ -55,6 +55,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 0) + tdSql.query("select spread(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query("select spread(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query("select spread(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query("select spread(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.query("select spread(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -64,8 +80,8 @@ class TDTestCase: tdSql.checkData(0, 0, 0) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.error("select spread(col7) from test") tdSql.error("select spread(col7) from test1") @@ -90,6 +106,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 10) + tdSql.query("select spread(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select spread(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1) diff --git a/tests/pytest/functions/function_spread_restart.py b/tests/pytest/functions/function_spread_restart.py index ba71da0a18ac2e1e2a1ead6ab3de9ff5cae6a600..134d92c9c38c1ae37e9ea1a0a91d0e93957a7e2a 100644 --- a/tests/pytest/functions/function_spread_restart.py +++ b/tests/pytest/functions/function_spread_restart.py @@ -36,7 +36,7 @@ class TDTestCase: tdSql.error("select spread(col8) from test1") tdSql.error("select spread(col9) from test") tdSql.error("select spread(col9) from test1") - + tdSql.query("select spread(col1) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 10) @@ -53,6 +53,23 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 10) + tdSql.query("select spread(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + + tdSql.query("select spread(col5) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, 9.1) diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py index a5b2d31dd1c6ef6c86e988c97c6dd94968731f16..51e005936183853ee2a4c9cc58bd5dd192b99006 100644 --- a/tests/pytest/functions/function_stddev.py +++ b/tests/pytest/functions/function_stddev.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -68,6 +68,18 @@ class TDTestCase: tdSql.query("select stddev(col4) from test1") tdSql.checkData(0, 0, np.std(intData)) + tdSql.query("select stddev(col11) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col12) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col13) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col14) from test1") + tdSql.checkData(0, 0, np.std(intData)) + tdSql.query("select stddev(col5) from test1") tdSql.checkData(0, 0, np.std(floatData)) diff --git a/tests/pytest/functions/function_stddev_restart.py b/tests/pytest/functions/function_stddev_restart.py index 837f8bb4066618187529f68893666e97bf65b37c..48f9821d89473b66134b5ece06f1d0c417c24b36 100644 --- a/tests/pytest/functions/function_stddev_restart.py +++ b/tests/pytest/functions/function_stddev_restart.py @@ -55,6 +55,18 @@ class TDTestCase: tdSql.query("select stddev(col4) from test1") tdSql.checkData(0, 0, np.std(intData)) + tdSql.query("select stddev(col11) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col12) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col13) from test1") + tdSql.checkData(0, 0, np.std(intData)) + + tdSql.query("select stddev(col14) from test1") + tdSql.checkData(0, 0, np.std(intData)) + tdSql.query("select stddev(col5) from test1") tdSql.checkData(0, 0, np.std(floatData)) diff --git a/tests/pytest/functions/function_stddev_td2555.py b/tests/pytest/functions/function_stddev_td2555.py index a4da7d5e29b53ac9051dc39aa8af0708197dacd2..d7c820c37644397d18cc090afb1ee82efeaecf6c 100644 --- a/tests/pytest/functions/function_stddev_td2555.py +++ b/tests/pytest/functions/function_stddev_td2555.py @@ -32,25 +32,31 @@ class TDTestCase: self.clist4 = [] self.clist5 = [] self.clist6 = [] + self.clist11 = [] + self.clist12 = [] + self.clist13 = [] + self.clist14 = [] def getData(self): for i in range(tdSql.queryRows): for j in range(6): exec('self.clist{}.append(tdSql.queryResult[i][j+1])'.format(j+1)) + for j in range(11,15): + exec('self.clist{}.append(tdSql.queryResult[i][j-1])'.format(j)) def run(self): tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(cid int,gbid binary(20),loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(cid int,gbid binary(20),loc nchar(20))''') tdSql.execute("create table test1 using test tags(1,'beijing','北京')") tdSql.execute("create table test2 using test tags(2,'shanghai','深圳')") tdSql.execute("create table test3 using test tags(2,'shenzhen','深圳')") tdSql.execute("create table test4 using test tags(1,'shanghai','上海')") for j in range(4): for i in range(self.rowNum): - tdSql.execute("insert into test%d values(now-%dh, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (j+1,i, i + 1, i + 1, i + 1, i + 1, i + i * 0.1, i * 1.5, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test%d values(now-%dh, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (j+1,i, i + 1, i + 1, i + 1, i + 1, i + i * 0.1, i * 1.5, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) # stddev verifacation tdSql.error("select stddev(ts) from test") @@ -70,6 +76,10 @@ class TDTestCase: exec('tdSql.query("select stddev(col{}) from test {}")'.format(i+1,condition)) exec('tdSql.checkData(0, 0, np.std(self.clist{}))'.format(i+1)) exec('self.clist{}.clear()'.format(i+1)) + for i in range(11,15): + exec('tdSql.query("select stddev(col{}) from test {}")'.format(i,condition)) + exec('tdSql.checkData(0, 0, np.std(self.clist{}))'.format(i)) + exec('self.clist{}.clear()'.format(i)) print('step 2') con_group_list = { ' cid = 2 and ts >=now - 1d and ts %d and ts < %d" % (self.ts, self.ts + self.rowNum)) tdSql.query("select twa(col4) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.error("select twa(col11) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col11) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col12) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col12) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col13) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col13) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col14) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col14) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.error("select twa(col5) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) tdSql.query("select twa(col5) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) diff --git a/tests/pytest/functions/function_twa_restart.py b/tests/pytest/functions/function_twa_restart.py index 56242c29535646f1baf12948ad06ebbe1a7f08ac..a066a5a8f3a4e7f28c577fec0ace725c920d5911 100644 --- a/tests/pytest/functions/function_twa_restart.py +++ b/tests/pytest/functions/function_twa_restart.py @@ -34,11 +34,11 @@ class TDTestCase: floatData = [] tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''') + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table test1 using test tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) @@ -52,7 +52,15 @@ class TDTestCase: tdSql.error("select twa(col3) from test") - tdSql.error("select twa(col4) from test") + tdSql.error("select twa(col4) from test") + + tdSql.error("select twa(col11) from test") + + tdSql.error("select twa(col12) from test") + + tdSql.error("select twa(col13) from test") + + tdSql.error("select twa(col14) from test") tdSql.error("select twa(col5) from test") @@ -79,6 +87,18 @@ class TDTestCase: tdSql.error("select twa(col4) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) tdSql.query("select twa(col4) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.error("select twa(col11) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col11) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col12) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col12) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col13) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col13) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + + tdSql.error("select twa(col14) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.query("select twa(col14) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) + tdSql.error("select twa(col5) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) tdSql.query("select twa(col5) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum)) diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 528316700d184171641f7f686a3c12102b6c1feb..502c859dad85a8fcceee7e0efaaa46ab9bded02a 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -16,7 +16,7 @@ TOP_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep -v community|head -n1` nohup $TAOSD_DIR >/dev/null & cd - -./crash_gen.sh --valgrind -p -t 10 -s 350 -b 4 +./crash_gen.sh --valgrind -p -t 10 -s 500 -b 4 pidof taosd|xargs kill -9 grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log @@ -36,11 +36,13 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 3 -a "$defiMemError" -lt 1013 ]; then - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + cat valgrind.err + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 elif [ "$defiMemError" -gt 1013 ];then #add for azure + cat valgrind.err echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 diff --git a/tests/pytest/insert/basic_unsigned.py b/tests/pytest/insert/basic_unsigned.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7e0e5e4ad57a35d574d88ea179f713c8a8b611 --- /dev/null +++ b/tests/pytest/insert/basic_unsigned.py @@ -0,0 +1,56 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.execute( + 'create table tb (ts timestamp, speed tinyint unsigned)') + + insertRows = 10 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into tb values (now + %dm, %d)' % + (i, i)) + + tdLog.info("insert earlier data") + tdSql.execute('insert into tb values (now - 5m , NULL)') + tdSql.execute('insert into tb values (now - 6m , 10)') + tdSql.execute('insert into tb values (now - 7m , NULL)') + tdSql.execute('insert into tb values (now - 8m , 254)') + + tdSql.error('insert into tb values (now - 9m, -1)') + tdSql.error('insert into tb values (now - 9m, 255)') + + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/before_1970.py b/tests/pytest/insert/before_1970.py index cb17b657aad1a3bfdb915c9661bad291b75d6f04..b2c4dc57c7a37b4b119308335d326521fe986f08 100644 --- a/tests/pytest/insert/before_1970.py +++ b/tests/pytest/insert/before_1970.py @@ -41,7 +41,7 @@ class TDTestCase: #TODO : should add more testcases tdSql.execute("insert into test values('1930-12-12 01:19:20.345', 1);") tdSql.execute("insert into test values('1969-12-30 23:59:59.999', 2);") - tdSql.execute("insert into test values(-3600, 3);") + tdSql.execute("insert into test values(-3600001, 3);") tdSql.execute("insert into test values('2020-10-20 14:02:53.770', 4);") print("==============insert data") @@ -61,11 +61,11 @@ class TDTestCase: print("==============step4") tdSql.execute("use demo;") tdSql.query("select * from test;") - # print(tdSql.queryResult) + print(tdSql.queryResult) tdSql.checkRows(4) tdSql.checkData(0,0,'1930-12-12 01:19:20.345000') tdSql.checkData(1,0,'1969-12-30 23:59:59.999000') - tdSql.checkData(2,0,'1970-01-01 07:00:00.000000') + tdSql.checkData(2,0,'1970-01-01 06:59:59.999000') tdSql.checkData(3,0,'2020-10-20 14:02:53.770000') print("==============check data") diff --git a/tests/pytest/insert/boundary2.py b/tests/pytest/insert/boundary2.py new file mode 100644 index 0000000000000000000000000000000000000000..99784b7cd9cfa7fc8d32720442cefa7946f6511f --- /dev/null +++ b/tests/pytest/insert/boundary2.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import time +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def run(self): + tdSql.prepare() + + startTime = time.time() + print("==============step1") + sql = "create table stb(ts timestamp, " + for i in range(1022): + sql += "col%d binary(14), " % (i + 1) + sql += "col1023 binary(22))" + tdSql.execute(sql) + + for i in range(4096): + sql = "insert into stb values(%d, " + for j in range(1022): + str = "'%s', " % self.get_random_string(14) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + + tdSql.query("select * from stb") + tdSql.checkRows(4096) + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select * from stb") + tdSql.checkRows(4096) + + endTime = time.time() + + print("total time %ds" % (endTime - startTime)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/metadataUpdate.py b/tests/pytest/insert/metadataUpdate.py index c3ea5c6a7e7b5dbff9ded75394793e7fc4a7fd6c..76b9a3ae8f818e3bf3784eb99974ce56b1071af5 100644 --- a/tests/pytest/insert/metadataUpdate.py +++ b/tests/pytest/insert/metadataUpdate.py @@ -52,8 +52,8 @@ class TDTestCase: p.start() p.join() p.terminate() - - tdSql.execute("insert into tb values(%d, 1, 2)" % (self.ts + 1)) + + tdSql.execute("insert into tb(ts, col1, col2) values(%d, 1, 2)" % (self.ts + 2)) print("==============step2") tdSql.query("select * from tb") diff --git a/tests/pytest/insert/unsignedBigint.py b/tests/pytest/insert/unsignedBigint.py new file mode 100644 index 0000000000000000000000000000000000000000..b222f2cd0195e14fddf1f10662447cef97f0e841 --- /dev/null +++ b/tests/pytest/insert/unsignedBigint.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table tb (ts timestamp, speed bigint unsigned)') + tdSql.execute('create table tb (ts timestamp, speed bigint unsigned)') + tdLog.info("insert into tb values (now, NULL)") + tdSql.execute("insert into tb values (now, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step2') + tdLog.info("insert into tb values (now+1m, -1) -x step2") + tdSql.error("insert into tb values (now+1m, -1) ") + tdLog.info("insert into tb values (now+1m, NULL)") + tdSql.execute("insert into tb values (now+1m, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(2)') + tdSql.checkRows(2) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step3') + tdLog.info("insert into tb values (now+2m, 18446744073709551614)") + tdSql.execute("insert into tb values (now+2m, 18446744073709551614)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(3)') + tdSql.checkRows(3) + tdLog.info('tdSql.checkData(0, 1, 18446744073709551614)') + tdSql.checkData(0, 1, 18446744073709551614) + tdLog.info('=============== step4') + tdLog.info("insert into tb values (now+3m, 18446744073709551615) -x step4") + tdSql.error("insert into tb values (now+3m, 18446744073709551615)") + tdLog.info("insert into tb values (now+3m, NULL)") + tdSql.execute("insert into tb values (now+3m, NULL)") + tdLog.info('select * from tb') + tdSql.query('select * from tb') + tdLog.info('tdSql.checkRow(4)') + tdSql.checkRows(4) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step5') + tdLog.info("insert into tb values (now+4m, a2)") + tdSql.error("insert into tb values (now+4m, a2)") + tdLog.info("insert into tb values (now-4m, -1)") + tdSql.error("insert into tb values (now-4m, -1)") + tdLog.info("insert into tb values (now+4m, 0)") + tdSql.execute("insert into tb values (now+4m, 0)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + tdLog.info('tdSql.checkData(0, 1, 0)') + tdSql.checkData(0, 1, 0) + tdLog.info('=============== step6') + tdLog.info("insert into tb values (now+5m, 2a)") + tdSql.error("insert into tb values (now+5m, 2a)") + tdLog.info("insert into tb values (now+5m, 2)") + tdSql.execute("insert into tb values (now+5m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(6)') + tdSql.checkRows(6) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('=============== step7') + tdLog.info("insert into tb values (now+6m, 2a'1)") + tdSql.error("insert into tb values (now+6m, 2a'1)") + tdLog.info("insert into tb values (now+6m, 2)") + tdSql.execute("insert into tb values (now+6m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(7)') + tdSql.checkRows(7) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('drop database db') + tdSql.execute('drop database db') + tdLog.info('show databases') + tdSql.query('show databases') + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) +# convert end + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/unsignedInt.py b/tests/pytest/insert/unsignedInt.py new file mode 100644 index 0000000000000000000000000000000000000000..ed18999bc415022da34788a79e9045a5f67cf8ee --- /dev/null +++ b/tests/pytest/insert/unsignedInt.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table tb (ts timestamp, speed int unsigned)') + tdSql.execute('create table tb (ts timestamp, speed int unsigned)') + tdLog.info("insert into tb values (now, NULL)") + tdSql.execute("insert into tb values (now, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step2') + tdLog.info("insert into tb values (now+1m, -1) -x step2") + tdSql.error("insert into tb values (now+1m, -1) ") + tdLog.info("insert into tb values (now+1m, NULL)") + tdSql.execute("insert into tb values (now+1m, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(2)') + tdSql.checkRows(2) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step3') + tdLog.info("insert into tb values (now+2m, 4294967294)") + tdSql.execute("insert into tb values (now+2m, 4294967294)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(3)') + tdSql.checkRows(3) + tdLog.info('tdSql.checkData(0, 1, 4294967294)') + tdSql.checkData(0, 1, 4294967294) + tdLog.info('=============== step4') + tdLog.info("insert into tb values (now+3m, 4294967295) -x step4") + tdSql.error("insert into tb values (now+3m, 4294967295)") + tdLog.info("insert into tb values (now+3m, NULL)") + tdSql.execute("insert into tb values (now+3m, NULL)") + tdLog.info('select * from tb') + tdSql.query('select * from tb') + tdLog.info('tdSql.checkRow(4)') + tdSql.checkRows(4) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step5') + tdLog.info("insert into tb values (now+4m, a2)") + tdSql.error("insert into tb values (now+4m, a2)") + tdLog.info("insert into tb values (now-4m, -1)") + tdSql.error("insert into tb values (now-4m, -1)") + tdLog.info("insert into tb values (now+4m, 0)") + tdSql.execute("insert into tb values (now+4m, 0)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + tdLog.info('tdSql.checkData(0, 1, 0)') + tdSql.checkData(0, 1, 0) + tdLog.info('=============== step6') + tdLog.info("insert into tb values (now+5m, 2a)") + tdSql.error("insert into tb values (now+5m, 2a)") + tdLog.info("insert into tb values (now+5m, 2)") + tdSql.execute("insert into tb values (now+5m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(6)') + tdSql.checkRows(6) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('=============== step7') + tdLog.info("insert into tb values (now+6m, 2a'1)") + tdSql.error("insert into tb values (now+6m, 2a'1)") + tdLog.info("insert into tb values (now+6m, 2)") + tdSql.execute("insert into tb values (now+6m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(7)') + tdSql.checkRows(7) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('drop database db') + tdSql.execute('drop database db') + tdLog.info('show databases') + tdSql.query('show databases') + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) +# convert end + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/unsignedSmallint.py b/tests/pytest/insert/unsignedSmallint.py new file mode 100644 index 0000000000000000000000000000000000000000..9893c470ce1a53a05138dd9d5c18e2b0a9f21374 --- /dev/null +++ b/tests/pytest/insert/unsignedSmallint.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table tb (ts timestamp, speed smallint unsigned)') + tdSql.execute('create table tb (ts timestamp, speed smallint unsigned)') + tdLog.info("insert into tb values (now, NULL)") + tdSql.execute("insert into tb values (now, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step2') + tdLog.info("insert into tb values (now+1m, -1) -x step2") + tdSql.error("insert into tb values (now+1m, -1) ") + tdLog.info("insert into tb values (now+1m, NULL)") + tdSql.execute("insert into tb values (now+1m, NULL)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(2)') + tdSql.checkRows(2) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step3') + tdLog.info("insert into tb values (now+2m, 65534)") + tdSql.execute("insert into tb values (now+2m, 65534)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(3)') + tdSql.checkRows(3) + tdLog.info('tdSql.checkData(0, 1, 65534)') + tdSql.checkData(0, 1, 65534) + tdLog.info('=============== step4') + tdLog.info("insert into tb values (now+3m, 65535) -x step4") + tdSql.error("insert into tb values (now+3m, 65535)") + tdLog.info("insert into tb values (now+3m, NULL)") + tdSql.execute("insert into tb values (now+3m, NULL)") + tdLog.info('select * from tb') + tdSql.query('select * from tb') + tdLog.info('tdSql.checkRow(4)') + tdSql.checkRows(4) + tdLog.info('tdSql.checkData(0, 1, null)') + tdSql.checkData(0, 1, None) + tdLog.info('=============== step5') + tdLog.info("insert into tb values (now+4m, a2)") + tdSql.error("insert into tb values (now+4m, a2)") + tdLog.info("insert into tb values (now-4m, -1)") + tdSql.error("insert into tb values (now-4m, -1)") + tdLog.info("insert into tb values (now+4m, 0)") + tdSql.execute("insert into tb values (now+4m, 0)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + tdLog.info('tdSql.checkData(0, 1, 0)') + tdSql.checkData(0, 1, 0) + tdLog.info('=============== step6') + tdLog.info("insert into tb values (now+5m, 2a)") + tdSql.error("insert into tb values (now+5m, 2a)") + tdLog.info("insert into tb values (now+5m, 2)") + tdSql.execute("insert into tb values (now+5m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(6)') + tdSql.checkRows(6) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('=============== step7') + tdLog.info("insert into tb values (now+6m, 2a'1)") + tdSql.error("insert into tb values (now+6m, 2a'1)") + tdLog.info("insert into tb values (now+6m, 2)") + tdSql.execute("insert into tb values (now+6m, 2)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(7)') + tdSql.checkRows(7) + tdLog.info('tdSql.checkData(0, 1, 2)') + tdSql.checkData(0, 1, 2) + tdLog.info('drop database db') + tdSql.execute('drop database db') + tdLog.info('show databases') + tdSql.query('show databases') + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) +# convert end + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/unsigenedTinyint.py b/tests/pytest/insert/unsignedTinyint.py similarity index 99% rename from tests/pytest/insert/unsigenedTinyint.py rename to tests/pytest/insert/unsignedTinyint.py index 1c0634b69a105c72030cdc5d15a31b8af656d48d..5bdfe7580b198ccaffcab7005e88aa72e1c437af 100644 --- a/tests/pytest/insert/unsigenedTinyint.py +++ b/tests/pytest/insert/unsignedTinyint.py @@ -95,7 +95,7 @@ class TDTestCase: tdSql.query('show databases') tdLog.info('tdSql.checkRow(0)') tdSql.checkRows(0) -# convert end + def stop(self): tdSql.close() diff --git a/tests/pytest/pytest_1.sh b/tests/pytest/pytest_1.sh index 6905f0c61eb077cc45e4c9a6f40a7650931fbf16..e6638cbb171ab3d8dc38c13f5767ff2f09c522bb 100755 --- a/tests/pytest/pytest_1.sh +++ b/tests/pytest/pytest_1.sh @@ -16,10 +16,10 @@ python3 ./test.py -f insert/nchar.py python3 ./test.py -f insert/nchar-unicode.py python3 ./test.py -f insert/multi.py python3 ./test.py -f insert/randomNullCommit.py -#python3 insert/retentionpolicy.py +python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py -#python3 ./test.py -f insert/before_1970.py +python3 ./test.py -f insert/before_1970.py python3 bug2265.py #table diff --git a/tests/pytest/pytest_2.sh b/tests/pytest/pytest_2.sh index 4ec517a0bf1c5eff8ad670cf28ab63d5ce818460..d152ed85fb9e138a6b9d62574bcc8a5119973448 100755 --- a/tests/pytest/pytest_2.sh +++ b/tests/pytest/pytest_2.sh @@ -7,15 +7,10 @@ python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py python3 ./test.py -f update/merge_commit_data.py -python3 ./test.py -f update/merge_commit_data-0.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py python3 ./test.py -f update/merge_commit_last-0.py python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/bug_td2279.py -# wal -python3 ./test.py -f wal/addOldWalTest.py - -# function -python3 ./test.py -f functions/all_null_value.py \ No newline at end of file diff --git a/tests/pytest/pytest_3.sh b/tests/pytest/pytest_3.sh index 5e59bb879b1a6920dff536b49a13d976eba94256..b1e2539fa935bcb9ce69482325e1e4c6df0503f2 100755 --- a/tests/pytest/pytest_3.sh +++ b/tests/pytest/pytest_3.sh @@ -70,25 +70,7 @@ python3 testCompress.py python3 testNoCompress.py python3 testMinTablesPerVnode.py -# functions -python3 ./test.py -f functions/function_avg.py -r 1 -python3 ./test.py -f functions/function_bottom.py -r 1 -python3 ./test.py -f functions/function_count.py -r 1 -python3 ./test.py -f functions/function_diff.py -r 1 -python3 ./test.py -f functions/function_first.py -r 1 -python3 ./test.py -f functions/function_last.py -r 1 -python3 ./test.py -f functions/function_last_row.py -r 1 -python3 ./test.py -f functions/function_leastsquares.py -r 1 -python3 ./test.py -f functions/function_max.py -r 1 -python3 ./test.py -f functions/function_min.py -r 1 -python3 ./test.py -f functions/function_operations.py -r 1 -python3 ./test.py -f functions/function_percentile.py -r 1 -python3 ./test.py -f functions/function_spread.py -r 1 -python3 ./test.py -f functions/function_stddev.py -r 1 -python3 ./test.py -f functions/function_sum.py -r 1 -python3 ./test.py -f functions/function_top.py -r 1 -python3 ./test.py -f functions/function_twa.py -r 1 -python3 ./test.py -f functions/function_twa_test2.py + python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py diff --git a/tests/pytest/pytest_4.sh b/tests/pytest/pytest_4.sh new file mode 100755 index 0000000000000000000000000000000000000000..dada90c709a967df37693d7398692c6d7fc12039 --- /dev/null +++ b/tests/pytest/pytest_4.sh @@ -0,0 +1,39 @@ +python3 ./test.py -f update/merge_commit_data-0.py +# wal +python3 ./test.py -f wal/addOldWalTest.py + +# function +python3 ./test.py -f functions/all_null_value.py +# functions +python3 ./test.py -f functions/function_avg.py -r 1 +python3 ./test.py -f functions/function_bottom.py -r 1 +python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_diff.py -r 1 +python3 ./test.py -f functions/function_first.py -r 1 +python3 ./test.py -f functions/function_last.py -r 1 +python3 ./test.py -f functions/function_last_row.py -r 1 +python3 ./test.py -f functions/function_leastsquares.py -r 1 +python3 ./test.py -f functions/function_max.py -r 1 +python3 ./test.py -f functions/function_min.py -r 1 +python3 ./test.py -f functions/function_operations.py -r 1 +python3 ./test.py -f functions/function_percentile.py -r 1 +python3 ./test.py -f functions/function_spread.py -r 1 +python3 ./test.py -f functions/function_stddev.py -r 1 +python3 ./test.py -f functions/function_sum.py -r 1 +python3 ./test.py -f functions/function_top.py -r 1 +python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py +python3 ./test.py -f functions/function_stddev_td2555.py +python3 ./test.py -f insert/metadataUpdate.py +python3 ./test.py -f tools/taosdemoTest2.py +python3 ./test.py -f query/last_cache.py +python3 ./test.py -f query/last_row_cache.py +python3 ./test.py -f account/account_create.py +python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f query/queryGroupbySort.py + +python3 ./test.py -f insert/unsignedInt.py +python3 ./test.py -f insert/unsignedBigint.py +python3 ./test.py -f insert/unsignedSmallint.py +python3 ./test.py -f insert/unsignedTinyint.py +python3 ./test.py -f query/filterAllUnsignedIntTypes.py \ No newline at end of file diff --git a/tests/pytest/query/filterAllUnsignedIntTypes.py b/tests/pytest/query/filterAllUnsignedIntTypes.py new file mode 100644 index 0000000000000000000000000000000000000000..0ff2f2b6eeb1424b0bcfd9a9e54537e8f216f9c6 --- /dev/null +++ b/tests/pytest/query/filterAllUnsignedIntTypes.py @@ -0,0 +1,176 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.powers = [8, 16, 32, 64] + self.types = ["tinyint", "smallint", "int", "bigint"] + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + for i in range(len(self.powers)): + curType = self.types[i] + print("======= Verify filter for %s type =========" % (curType)) + tdLog.debug( + "create table st%s(ts timestamp, num %s unsigned) tags(id %s unsigned)" % + (curType, curType, curType)) + tdSql.execute( + "create table st%s(ts timestamp, num %s unsigned) tags(id %s unsigned)" % + (curType, curType, curType)) + + # create 10 tables, insert 10 rows for each table + for j in range(self.rowNum): + tdSql.execute( + "create table st%s%d using st%s tags(%d)" % + (curType, j + 1, curType, j + 1)) + for k in range(self.rowNum): + tdSql.execute( + "insert into st%s%d values(%d, %d)" % + (curType, j + 1, self.ts + k + 1, j * 10 + k + 1)) + + tdSql.error("insert into st%s10 values(%d, %d)" % + (curType, self.ts + 11, pow(2, self.powers[i]) - 1 )) + tdSql.execute("insert into st%s10 values(%d, %d)" % + (curType, self.ts + 12, pow(2, self.powers[i]) - 2 )) + tdSql.error("insert into st%s10 values(%d, %d)" % + (curType, self.ts + 13, -1 )) + tdSql.execute("insert into st%s10 values(%d, %d)" % + (curType, self.ts + 14, 0 )) + # select all data + # tdSql.query("select * from st%s " % curType) + # tdSql.checkRows(102) + + # > for int type on column + tdSql.query("select * from st%s where num > 50" % curType) + tdSql.checkRows(51) + + # >= for int type on column + tdSql.query("select * from st%s where num >= 50" % curType) + tdSql.checkRows(52) + + # = for int type on column + tdSql.query("select * from st%s where num = 50" % curType) + tdSql.checkRows(1) + + # < for int type on column + tdSql.query("select * from st%s where num < 50" % curType) + tdSql.checkRows(50) + + # <= for int type on column + tdSql.query("select * from st%s where num <= 50" % curType) + tdSql.checkRows(51) + + # <> for int type on column + tdSql.query("select * from st%s where num <> 50" % curType) + tdSql.checkRows(101) + + # != for int type on column + tdSql.query("select * from st%s where num != 50" % curType) + tdSql.checkRows(101) + + # range for int type on column + tdSql.query( + "select * from st%s where num > 50 and num < 100" % + curType) + tdSql.checkRows(49) + + tdSql.query( + "select * from st%s where num >= 50 and num < 100" % + curType) + tdSql.checkRows(50) + + tdSql.query( + "select * from st%s where num > 50 and num <= 100" % + curType) + tdSql.checkRows(50) + + tdSql.query( + "select * from st%s where num >= 50 and num <= 100" % + curType) + tdSql.checkRows(51) + + # > for int type on tag + tdSql.query("select * from st%s where id > 5" % curType) + tdSql.checkRows(52) + + # >= for int type on tag + tdSql.query("select * from st%s where id >= 5" % curType) + tdSql.checkRows(62) + + # = for int type on tag + tdSql.query("select * from st%s where id = 5" % curType) + tdSql.checkRows(10) + + # < for int type on tag + tdSql.query("select * from st%s where id < 5" % curType) + tdSql.checkRows(40) + + # <= for int type on tag + tdSql.query("select * from st%s where id <= 5" % curType) + tdSql.checkRows(50) + + # <> for int type on tag + tdSql.query("select * from st%s where id <> 5" % curType) + tdSql.checkRows(92) + + # != for int type on tag + tdSql.query("select * from st%s where id != 5" % curType) + tdSql.checkRows(92) + + # != for int type on tag + tdSql.query("select * from st%s where id != 5" % curType) + tdSql.checkRows(92) + + # range for int type on tag + tdSql.query("select * from st%s where id > 5 and id < 7" % curType) + tdSql.checkRows(10) + + tdSql.query( + "select * from st%s where id >= 5 and id < 7" % + curType) + tdSql.checkRows(20) + + tdSql.query( + "select * from st%s where id > 5 and id <= 7" % + curType) + tdSql.checkRows(20) + + tdSql.query( + "select * from st%s where id >= 5 and id <= 7" % + curType) + tdSql.checkRows(30) + + print( + "======= Verify filter for %s type finished =========" % + curType) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/last_row_cache.py b/tests/pytest/query/last_row_cache.py index d9e09dae7acdcd8c7401f1370da9f544c850872d..a0e81477096e9c846e109ae71020b40e47b39a84 100644 --- a/tests/pytest/query/last_row_cache.py +++ b/tests/pytest/query/last_row_cache.py @@ -25,18 +25,23 @@ class TDTestCase: self.tables = 10 self.rows = 20 + self.columns = 50 self.perfix = 't' self.ts = 1601481600000 def insertData(self): - print("==============step1") - tdSql.execute("create table st (ts timestamp, c1 int) tags(t1 int)") + print("==============step1") + sql = "create table st(ts timestamp, " + for i in range(self.columns - 1): + sql += "c%d int, " % (i + 1) + sql += "c50 int) tags(t1 int)" + tdSql.execute(sql) for i in range(self.tables): tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i)) for j in range(self.rows): tc = self.ts + j * 60000 - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, tc, j)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, %d)" %(self.perfix, i, tc, j)) def executeQueries(self): print("==============step2") @@ -66,29 +71,29 @@ class TDTestCase: tdSql.checkData(0, 0, 19) tc = self.ts + 1 * 3600000 - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, %d)" %(self.perfix, 1, tc, 10)) tc = self.ts + 3 * 3600000 - tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, null)" %(self.perfix, 1, tc)) tc = self.ts + 5 * 3600000 - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, %d)" %(self.perfix, 1, tc, -1)) tc = self.ts + 7 * 3600000 - tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, null)" %(self.perfix, 1, tc)) def insertData2(self): tc = self.ts + 1 * 3600000 - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, %d)" %(self.perfix, 1, tc, 10)) tc = self.ts + 3 * 3600000 - tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, null)" %(self.perfix, 1, tc)) tc = self.ts + 5 * 3600000 - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, %d)" %(self.perfix, 1, tc, -1)) tc = self.ts + 7 * 3600000 - tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + tdSql.execute("insert into %s%d(ts, c1) values(%d, null)" %(self.perfix, 1, tc)) def executeQueries2(self): # For stable @@ -164,6 +169,9 @@ class TDTestCase: self.executeQueries() self.insertData2() self.executeQueries2() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries2() tdSql.execute("alter database test2 cachelast 0") self.executeQueries2() diff --git a/tests/pytest/query/queryGroupbySort.py b/tests/pytest/query/queryGroupbySort.py index 80f6d55aae1ae79cafbe651cc99db1d301dd90fc..c2649a86db8dcd399d7cddf2d752d1ac0b898abb 100644 --- a/tests/pytest/query/queryGroupbySort.py +++ b/tests/pytest/query/queryGroupbySort.py @@ -45,6 +45,10 @@ class TDTestCase: tdSql.checkData(0, 0, "2018-10-03 14:38:05") tdSql.checkData(1, 0, "2018-10-03 14:38:15") tdSql.checkData(2, 0, "2018-10-03 14:38:16") + + tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s ASC") + + tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s DESC") def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryNullValueTest.py b/tests/pytest/query/queryNullValueTest.py index 9920543b3a8d78363197a632eabe47ac6f82fa2e..5a95b369fe39d6bc1216847b3f108dff7d7171b7 100644 --- a/tests/pytest/query/queryNullValueTest.py +++ b/tests/pytest/query/queryNullValueTest.py @@ -25,7 +25,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.types = ["tinyint", "smallint", "int", "bigint", "float", "double", "bool", "binary(10)", "nchar(10)"] + self.types = ["tinyint", "smallint", "int", "bigint", "float", "double", "bool", "binary(10)", "nchar(10)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] self.ts = 1537146000000 def checkNullValue(self, result): diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py index 649e0dc1cb3191ba08b3f2da0a5edee3afc66575..17022bdc41057bcb67e1530a2cb6d399bada20ff 100644 --- a/tests/pytest/query/querySort.py +++ b/tests/pytest/query/querySort.py @@ -73,19 +73,19 @@ class TDTestCase: print("======= step 1: create table and insert data =========") tdLog.debug( ''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double, - tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float, - tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''') + tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20), tbcol11 tinyint unsigned, tbcol12 smallint unsigned, tbcol13 int unsigned, tbcol14 bigint unsigned) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float, + tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20), tagcol11 tinyint unsigned, tagcol12 smallint unsigned, tagcol13 int unsigned, tagcol14 bigint unsigned)''') tdSql.execute( ''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double, - tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float, - tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''') + tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20), tbcol11 tinyint unsigned, tbcol12 smallint unsigned, tbcol13 int unsigned, tbcol14 bigint unsigned) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float, + tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20), tagcol11 tinyint unsigned, tagcol12 smallint unsigned, tagcol13 int unsigned, tagcol14 bigint unsigned)''') for i in range(self.rowNum): - tdSql.execute("create table st%d using st tags(%d, %d, %d, %d, %f, %f, %d, 'tag%d', '标签%d')" % ( - i + 1, i + 1, i + 1, i + 1, i + 1, 1.1 * (i + 1), 1.23 * (i + 1), (i + 1) % 2, i + 1, i + 1)) + tdSql.execute("create table st%d using st tags(%d, %d, %d, %d, %f, %f, %d, 'tag%d', '标签%d', %d, %d, %d, %d)" % ( + i + 1, i + 1, i + 1, i + 1, i + 1, 1.1 * (i + 1), 1.23 * (i + 1), (i + 1) % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) for j in range(self.rowNum): - tdSql.execute("insert into st%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" % ( - i + 1, self.ts + 10 * (i + 1) + j + 1, j + 1, j + 1, j + 1, j + 1, 1.1 * (j + 1), 1.23 * (j + 1), (j + 1) % 2, j + 1, j + 1)) + tdSql.execute("insert into st%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % ( + i + 1, self.ts + 10 * (i + 1) + j + 1, j + 1, j + 1, j + 1, j + 1, 1.1 * (j + 1), 1.23 * (j + 1), (j + 1) % 2, j + 1, j + 1, i + 1, i + 1, i + 1, i + 1)) print("======= step 2: verify order for each column =========") # sort for timestamp in asc order diff --git a/tests/pytest/tag_lite/add.py b/tests/pytest/tag_lite/add.py index 49e2f8ab6990c24e90e9d1b01df957bc0d3107cd..eb52e9cb9b8f595d77e4d816c2fb2c1317501cfb 100644 --- a/tests/pytest/tag_lite/add.py +++ b/tests/pytest/tag_lite/add.py @@ -141,6 +141,98 @@ class TDTestCase: # TSIM: sql select * from $mt where tgcol2 = 1 -x step2 tdLog.info('select * from %s where tgcol2 = 1 -x step2' % (mt)) tdSql.error('select * from %s where tgcol2 = 1' % (mt)) + tdLog.info('=============== step2-1') + # TSIM: $i = 2 + i = 21 + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 + # bool, tgcol2 int) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int unsigned)' % + (mt)) + # TSIM: sql create table $tb using $mt tags( 1, 2 ) + tdLog.info('create table %s using %s tags( 1, 2 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1, 2 )' % (tb, mt)) + # TSIM: sql insert into $tb values(now, 1) + tdLog.info('insert into %s values(now, 1)' % (tb)) + tdSql.execute('insert into %s values(now, 1)' % (tb)) + # TSIM: sql select * from $mt where tgcol2 = 2 + tdLog.info('select * from %s where tgcol2 = 2' % (mt)) + tdSql.query('select * from %s where tgcol2 = 2' % (mt)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 2 then + tdLog.info('tdSql.checkData(0, 3, 2)') + tdSql.checkData(0, 3, 2) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql alter table $mt drop tag tgcol2 + tdLog.info('alter table %s drop tag tgcol2' % (mt)) + tdSql.execute('alter table %s drop tag tgcol2' % (mt)) + # TSIM: sql alter table $mt add tag tgcol4 int + tdLog.info('alter table %s add tag tgcol4 int unsigned' % (mt)) + tdSql.execute('alter table %s add tag tgcol4 int unsigned' % (mt)) + tdLog.info('select * from %s where tgcol4=6' % (mt)) + tdSql.query('select * from %s where tgcol4=6' % (mt)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: sql alter table $tb set tag tgcol4 =4 + tdLog.info('alter table %s set tag tgcol4 =4' % (tb)) + tdSql.execute('alter table %s set tag tgcol4 =4' % (tb)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: + # TSIM: sql select * from $mt where tgcol4 = 4 + tdLog.info('select * from %s where tgcol4 = 4' % (mt)) + tdSql.query('select * from %s where tgcol4 = 4' % (mt)) + # TSIM: print $data01 $data02 $data03 + tdLog.info('$data01 $data02 $data03') + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 4 then + tdLog.info('tdSql.checkData(0, 3, 4)') + tdSql.checkData(0, 3, 4) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where tgcol2 = 1 -x step2 + tdLog.info('select * from %s where tgcol2 = 1 -x step2' % (mt)) + tdSql.error('select * from %s where tgcol2 = 1' % (mt)) # TSIM: return -1 # TSIM: step2: # TSIM: @@ -235,6 +327,96 @@ class TDTestCase: # TSIM: sql select * from $mt where tgcol2 = 1 -x step3 tdLog.info('select * from %s where tgcol2 = 1 -x step3' % (mt)) tdSql.error('select * from %s where tgcol2 = 1' % (mt)) + tdLog.info('=============== step3-1') + # TSIM: $i = 3 + i = 31 + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 + # smallint, tgcol2 tinyint) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 smallint unsigned, tgcol2 tinyint unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 smallint unsigned, tgcol2 tinyint unsigned)' % + (mt)) + # TSIM: sql create table $tb using $mt tags( 1, 2 ) + tdLog.info('create table %s using %s tags( 1, 2 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1, 2 )' % (tb, mt)) + # TSIM: sql insert into $tb values(now, 1) + tdLog.info('insert into %s values(now, 1)' % (tb)) + tdSql.execute('insert into %s values(now, 1)' % (tb)) + # TSIM: sql select * from $mt where tgcol2 = 2 + tdLog.info('select * from %s where tgcol2 = 2' % (mt)) + tdSql.query('select * from %s where tgcol2 = 2' % (mt)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 2 then + tdLog.info('tdSql.checkData(0, 3, 2)') + tdSql.checkData(0, 3, 2) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql alter table $mt drop tag tgcol2 + tdLog.info('alter table %s drop tag tgcol2' % (mt)) + tdSql.execute('alter table %s drop tag tgcol2' % (mt)) + # TSIM: sql alter table $mt add tag tgcol4 tinyint + tdLog.info('alter table %s add tag tgcol4 tinyint unsigned' % (mt)) + tdSql.execute('alter table %s add tag tgcol4 tinyint unsigned' % (mt)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: sql alter table $tb set tag tgcol4=4 + tdLog.info('alter table %s set tag tgcol4=4' % (tb)) + tdSql.execute('alter table %s set tag tgcol4=4' % (tb)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: + # TSIM: sql select * from $mt where tgcol4 = 4 + tdLog.info('select * from %s where tgcol4 = 4' % (mt)) + tdSql.query('select * from %s where tgcol4 = 4' % (mt)) + # TSIM: print $data01 $data02 $data03 + tdLog.info('$data01 $data02 $data03') + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 4 then + tdLog.info('tdSql.checkData(0, 3, 4)') + tdSql.checkData(0, 3, 4) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where tgcol2 = 1 -x step3 + tdLog.info('select * from %s where tgcol2 = 1 -x step3' % (mt)) + tdSql.error('select * from %s where tgcol2 = 1' % (mt)) # TSIM: return -1 # TSIM: step3: # TSIM: @@ -353,6 +535,120 @@ class TDTestCase: # TSIM: sql select * from $mt where tgcol2 = 1 -x step4 tdLog.info('select * from %s where tgcol2 = 1 -x step4' % (mt)) tdSql.error('select * from %s where tgcol2 = 1' % (mt)) + tdLog.info('=============== step4-1') + # TSIM: $i = 4 + i = 41 + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 + # bigint, tgcol2 float) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 bigint unsigned, tgcol2 float)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 bigint unsigned, tgcol2 float)' % + (mt)) + # TSIM: sql create table $tb using $mt tags( 1, 2 ) + tdLog.info('create table %s using %s tags( 1, 2 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1, 2 )' % (tb, mt)) + # TSIM: sql insert into $tb values(now, 1) + tdLog.info('insert into %s values(now, 1)' % (tb)) + tdSql.execute('insert into %s values(now, 1)' % (tb)) + # TSIM: sql select * from $mt where tgcol2 = 2 + tdLog.info('select * from %s where tgcol2 = 2' % (mt)) + tdSql.query('select * from %s where tgcol2 = 2' % (mt)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 2.00000 then + tdLog.info('tdSql.checkData(0, 3, 2.00000)') + tdSql.checkData(0, 3, 2.00000) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql describe $tb + tdLog.info('describe %s' % (tb)) + tdSql.query('describe %s' % (tb)) + # TSIM: if $data21 != BIGINT then + tdLog.info('tdSql.checkDataType(2, 1, "BIGINT UNSIGNED")') + tdSql.checkDataType(2, 1, "BIGINT") + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data31 != FLOAT then + tdLog.info('tdSql.checkDataType(3, 1, "FLOAT")') + tdSql.checkDataType(3, 1, "FLOAT") + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data23 != 1 then + tdLog.info('tdSql.checkData(2, 3, TAG)') + tdSql.checkData(2, 3, "TAG") + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data33 != 2.000000 then + tdLog.info('tdSql.checkData(3, 3, 2.000000)') + tdSql.checkData(3, 3, "TAG") + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql alter table $mt drop tag tgcol2 + tdLog.info('alter table %s drop tag tgcol2' % (mt)) + tdSql.execute('alter table %s drop tag tgcol2' % (mt)) + # TSIM: sql alter table $mt add tag tgcol4 float + tdLog.info('alter table %s add tag tgcol4 float' % (mt)) + tdSql.execute('alter table %s add tag tgcol4 float' % (mt)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: sql alter table $tb set tag tgcol4=4 + tdLog.info('alter table %s set tag tgcol4=4' % (tb)) + tdSql.execute('alter table %s set tag tgcol4=4' % (tb)) + # TSIM: sql reset query cache + tdLog.info('reset query cache') + tdSql.execute('reset query cache') + # TSIM: + # TSIM: sql select * from $mt where tgcol4 = 4 + tdLog.info('select * from %s where tgcol4 = 4' % (mt)) + tdSql.query('select * from %s where tgcol4 = 4' % (mt)) + # TSIM: print $data01 $data02 $data03 + tdLog.info('$data01 $data02 $data03') + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, 1)') + tdSql.checkData(0, 2, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 4.00000 then + tdLog.info('tdSql.checkData(0, 3, 4.00000)') + tdSql.checkData(0, 3, 4.00000) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where tgcol2 = 1 -x step4 + tdLog.info('select * from %s where tgcol2 = 1 -x step4' % (mt)) + tdSql.error('select * from %s where tgcol2 = 1' % (mt)) # TSIM: return -1 # TSIM: step4: # TSIM: @@ -408,12 +704,18 @@ class TDTestCase: # TSIM: sql alter table $mt add tag tgcol4 smallint tdLog.info('alter table %s add tag tgcol4 smallint' % (mt)) tdSql.execute('alter table %s add tag tgcol4 smallint' % (mt)) + # TSIM: sql alter table $mt add tag tgcol5 smallint unsigned + tdLog.info('alter table %s add tag tgcol5 smallint unsigned' % (mt)) + tdSql.execute('alter table %s add tag tgcol5 smallint unsigned' % (mt)) # TSIM: sql reset query cache tdLog.info('reset query cache') tdSql.execute('reset query cache') # TSIM: sql alter table $tb set tag tgcol4=4 tdLog.info('alter table %s set tag tgcol4=4' % (tb)) tdSql.execute('alter table %s set tag tgcol4=4' % (tb)) + # TSIM: sql alter table $tb set tag tgcol5=5 + tdLog.info('alter table %s set tag tgcol5=5' % (tb)) + tdSql.execute('alter table %s set tag tgcol5=5' % (tb)) # TSIM: sql reset query cache tdLog.info('reset query cache') tdSql.execute('reset query cache') @@ -441,6 +743,11 @@ class TDTestCase: # TSIM: if $data03 != 4 then tdLog.info('tdSql.checkData(0, 3, 4)') tdSql.checkData(0, 3, 4) + tdSql.query('select * from %s where tgcol5 = 5' % (mt)) + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1.000000000) + tdSql.checkData(0, 4, 5) # TSIM: return -1 # TSIM: endi # TSIM: diff --git a/tests/pytest/tag_lite/alter_tag.py b/tests/pytest/tag_lite/alter_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5abb6c134840ecb4ab52c7d3a6ab623885e12b --- /dev/null +++ b/tests/pytest/tag_lite/alter_tag.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "CREATE TABLE IF NOT EXISTS ampere (ts TIMESTAMP(8),ampere DOUBLE(8)) TAGS (device_name BINARY(50),build_id BINARY(50),project_id BINARY(50),alias BINARY(50))") + tdSql.execute("insert into d1001 using ampere tags('test', '2', '2', '2') VALUES (now, 123)") + tdSql.execute("ALTER TABLE ampere ADD TAG variable_id BINARY(50)") + + print("==============step2") + + tdSql.execute("insert into d1002 using ampere tags('test', '2', '2', '2', 'test') VALUES (now, 124)") + + tdSql.query("select * from ampere") + tdSql.checkRows(2) + tdSql.checkData(0, 6, None) + tdSql.checkData(1, 6, 'test') + + # Test case for: https://jira.taosdata.com:18080/browse/TD-2423 + tdSql.execute("create table stb(ts timestamp, col1 int, col2 nchar(20)) tags(tg1 int, tg2 binary(20), tg3 nchar(25))") + tdSql.execute("insert into tb1 using stb(tg1, tg3) tags(1, 'test1') values(now, 1, 'test1')") + tdSql.query("select *, tg1, tg2, tg3 from tb1") + tdSql.checkRows(1) + tdSql.checkData(0, 3, 1) + tdSql.checkData(0, 4, None) + tdSql.checkData(0, 5, 'test1') + + tdSql.execute("create table tb2 using stb(tg3, tg2) tags('test3', 'test2')") + tdSql.query("select tg1, tg2, tg3 from tb2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'test2') + tdSql.checkData(0, 2, 'test3') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/delete.py b/tests/pytest/tag_lite/delete.py index 17b4deb815e7a05aaa5eaf33685b95e144d3d59b..5e82a96bf41608782fca09d6aeb8b18aa1a727ef 100644 --- a/tests/pytest/tag_lite/delete.py +++ b/tests/pytest/tag_lite/delete.py @@ -869,6 +869,91 @@ class TDTestCase: # TSIM: sql alter table $mt drop tag tgcol6 tdLog.info('alter table %s drop tag tgcol6' % (mt)) tdSql.execute('alter table %s drop tag tgcol6' % (mt)) + tdLog.info('=============== step14') + # TSIM: $i = 14 + i = 14 + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 + # binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 + # double, tgcol6 binary(20)) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int unsigned, tgcol3 smallint unsigned, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20), tgcol7 tinyint unsigned, tgcol8 bigint unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int unsigned, tgcol3 smallint unsigned, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20), tgcol7 tinyint unsigned, tgcol8 bigint unsigned)' % + (mt)) + # TSIM: sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) + tdLog.info( + 'create table %s using %s tags( "1", 2, 3, "4", 5, "6", 7, 8 )' % + (tb, mt)) + tdSql.execute( + 'create table %s using %s tags( "1", 2, 3, "4", 5, "6", 7, 8 )' % + (tb, mt)) + # TSIM: sql insert into $tb values(now, 1) + tdLog.info('insert into %s values(now, 1)' % (tb)) + tdSql.execute('insert into %s values(now, 1)' % (tb)) + # TSIM: sql select * from $mt where tgcol1 = '1' + tdLog.info('select * from %s where tgcol1 = "1"' % (mt)) + tdSql.query('select * from %s where tgcol1 = "1"' % (mt)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data01 != 1 then + tdLog.info('tdSql.checkData(0, 1, 1)') + tdSql.checkData(0, 1, 1) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data02 != 1 then + tdLog.info('tdSql.checkData(0, 2, "1")') + tdSql.checkData(0, 2, "1") + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data03 != 2 then + tdLog.info('tdSql.checkData(0, 3, 2)') + tdSql.checkData(0, 3, 2) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data04 != 3 then + tdLog.info('tdSql.checkData(0, 4, 3)') + tdSql.checkData(0, 4, 3) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data05 != 4 then + tdLog.info('tdSql.checkData(0, 5, "4")') + tdSql.checkData(0, 5, "4") + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data06 != 5.000000000 then + tdLog.info('tdSql.checkData(0, 6, 5.000000000)') + tdSql.checkData(0, 6, 5.000000000) + # TSIM: return -1 + # TSIM: endi + # TSIM: if $data07 != 6 then + tdLog.info('tdSql.checkData(0, 7, "6")') + tdSql.checkData(0, 7, "6") + tdLog.info('tdSql.checkData(0, 8, 7)') + tdSql.checkData(0, 8, 7) + tdLog.info('tdSql.checkData(0, 9, 8)') + tdSql.checkData(0, 9, 8) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql alter table $mt drop tag tgcol3 + tdLog.info('alter table %s drop tag tgcol3' % (mt)) + tdSql.execute('alter table %s drop tag tgcol3' % (mt)) + # TSIM: sql alter table $mt drop tag tgcol4 + tdLog.info('alter table %s drop tag tgcol4' % (mt)) + tdSql.execute('alter table %s drop tag tgcol4' % (mt)) + # TSIM: sql alter table $mt drop tag tgcol6 + tdLog.info('alter table %s drop tag tgcol6' % (mt)) + tdSql.execute('alter table %s drop tag tgcol6' % (mt)) + tdLog.info('alter table %s drop tag tgcol8' % (mt)) + tdSql.execute('alter table %s drop tag tgcol8' % (mt)) # TSIM: # TSIM: sleep 5000 # TSIM: @@ -1556,7 +1641,7 @@ class TDTestCase: # TSIM: print =============== step14 tdLog.info('=============== step14') # TSIM: $i = 14 - i = 14 + i = 20 # TSIM: $mt = $mtPrefix . $i mt = "%s%d" % (mtPrefix, i) # TSIM: $tb = $tbPrefix . $i diff --git a/tests/pytest/tag_lite/unsignedBigint.py b/tests/pytest/tag_lite/unsignedBigint.py new file mode 100644 index 0000000000000000000000000000000000000000..032dacd366437c378b0b8c9b553d6495966cd563 --- /dev/null +++ b/tests/pytest/tag_lite/unsignedBigint.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + # TSIM: system sh/stop_dnodes.sh + # TSIM: + # TSIM: + # TSIM: system sh/deploy.sh -n dnode1 -i 1 + # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 + # TSIM: system sh/exec.sh -n dnode1 -s start + # TSIM: + # TSIM: sleep 3000 + # TSIM: sql connect + # TSIM: + # TSIM: print ======================== dnode1 start + tdLog.info('======================== dnode1 start') + # TSIM: + # TSIM: $dbPrefix = ta_sm_db + # TSIM: $tbPrefix = ta_sm_tb + tbPrefix = "ta_sm_tb" + # TSIM: $mtPrefix = ta_sm_mt + mtPrefix = "ta_sm_mt" + # TSIM: $tbNum = 10 + tbNum = 10 + # TSIM: $rowNum = 20 + rowNum = 20 + # TSIM: $totalNum = 200 + totalNum = 200 + # TSIM: + # TSIM: print =============== step1 + tdLog.info('=============== step1') + # TSIM: $i = 0 + i = 0 + # TSIM: $db = $dbPrefix . $i + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: + # TSIM: sql create database $db + # TSIM: sql use $db + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol + # bigint unsigned) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol bigint unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol bigint unsigned)' % + (mt)) + # TSIM: + # TSIM: $i = 0 + i = 0 + # TSIM: while $i < 5 + while (i < 5): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 0 ) + tdLog.info('create table %s using %s tags( 0 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 0 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: while $i < 10 + while (i < 10): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 1 ) + tdLog.info('create table %s using %s tags( 1 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: + # TSIM: print =============== step2 + tdLog.info('=============== step2') + # TSIM: sleep 100 + # TSIM: sql select * from $tb + tdLog.info('select * from %s' % (tb)) + tdSql.query('select * from %s' % (tb)) + # TSIM: if $rows != $rowNum then + tdLog.info('tdSql.checkRow($rowNum)') + tdSql.checkRows(rowNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (tb)) + tdSql.query('select * from %s where ts < now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts <= now + 4m + tdLog.info('select * from %s where ts <= now + 4m' % (tb)) + tdSql.query('select * from %s where ts <= now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (tb)) + tdSql.query('select * from %s where ts > now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts >= now + 4m + tdLog.info('select * from %s where ts >= now + 4m' % (tb)) + tdSql.query('select * from %s where ts >= now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m and ts > now + 5m + tdLog.info( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > 100000 and ts < 100000 + tdLog.info('select * from %s where ts > 100000 and ts < 100000' % (tb)) + tdSql.query( + 'select * from %s where ts > 100000 and ts < 100000' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 3m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts > now + 5m and + # ts < now + 6m + tdLog.info( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step3 + tdLog.info('=============== step3') + # TSIM: sql select * from $mt + tdLog.info('select * from %s' % (mt)) + tdSql.query('select * from %s' % (mt)) + # TSIM: if $rows != $totalNum then + tdLog.info('tdSql.checkRow($totalNum)') + tdSql.checkRows(totalNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (mt)) + tdSql.query('select * from %s where ts < now + 4m' % (mt)) + # TSIM: if $rows != 50 then + tdLog.info('tdSql.checkRow(50)') + tdSql.checkRows(50) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (mt)) + tdSql.query('select * from %s where ts > now + 4m' % (mt)) + # TSIM: if $rows != 150 then + tdLog.info('tdSql.checkRow(150)') + tdSql.checkRows(150) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts = now + 4m + tdLog.info('select * from %s where ts = now + 4m' % (mt)) + tdSql.query('select * from %s where ts = now + 4m' % (mt)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 10 then + tdLog.info('tdSql.checkRow(10)') + tdSql.checkRows(10) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step4 + tdLog.info('=============== step4') + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step5 + tdLog.info('=============== step5') + # TSIM: sql select * from $mt where ts > now + 4m and tgcol = 1 + tdLog.info('select * from %s where ts > now + 4m and tgcol = 1' % (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol = 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 1 + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol = 0 + tdLog.info('select * from %s where ts < now + 4m and tgcol = 0' % (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol = 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m and + # tgcol <> 0 + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts + # < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step6 + tdLog.info('=============== step6') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 200 then + tdLog.info('tdSql.checkData(0, 0, 200)') + tdSql.checkData(0, 0, 200) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step7 + tdLog.info('=============== step7') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step8 + tdLog.info('=============== step8') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 50 then + tdLog.info('tdSql.checkData(0, 0, 50)') + tdSql.checkData(0, 0, 50) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step9 + tdLog.info('=============== step9') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step10 + tdLog.info('=============== step10') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group + # by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step11 + tdLog.info('=============== step11') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + # group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 25 then + tdLog.info('tdSql.checkData(0, 0, 25)') + tdSql.checkData(0, 0, 25) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: + # TSIM: print =============== step12 + tdLog.info('=============== step12') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d) group by + # tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data01 != 100 then + tdLog.info('tdSql.checkData(0, 1, 100)') + tdSql.checkData(0, 1, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== clear + tdLog.info('=============== clear') + # TSIM: sql drop database $db + tdLog.info('drop database db') + tdSql.execute('drop database db') + # TSIM: sql show databases + tdLog.info('show databases') + tdSql.query('show databases') + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT +# convert end + tdSql.execute("create database db") + tdSql.execute("use db") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev bigint unsigned)") + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 64) - 1)) + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1)) + + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 64) - 2)) + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (0)) + tdSql.execute( + 'CREATE TABLE if not exists dev_003 using st tags(%s)' % ('NULL')) + print("==============step2") + tdSql.query("show tables") + tdSql.checkRows(3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/unsignedInt.py b/tests/pytest/tag_lite/unsignedInt.py new file mode 100644 index 0000000000000000000000000000000000000000..6efe12edf110f70d9c2c0187429a0696372bae49 --- /dev/null +++ b/tests/pytest/tag_lite/unsignedInt.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + # TSIM: system sh/stop_dnodes.sh + # TSIM: + # TSIM: + # TSIM: system sh/deploy.sh -n dnode1 -i 1 + # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 + # TSIM: system sh/exec.sh -n dnode1 -s start + # TSIM: + # TSIM: sleep 3000 + # TSIM: sql connect + # TSIM: + # TSIM: print ======================== dnode1 start + tdLog.info('======================== dnode1 start') + # TSIM: + # TSIM: $dbPrefix = ta_sm_db + # TSIM: $tbPrefix = ta_sm_tb + tbPrefix = "ta_sm_tb" + # TSIM: $mtPrefix = ta_sm_mt + mtPrefix = "ta_sm_mt" + # TSIM: $tbNum = 10 + tbNum = 10 + # TSIM: $rowNum = 20 + rowNum = 20 + # TSIM: $totalNum = 200 + totalNum = 200 + # TSIM: + # TSIM: print =============== step1 + tdLog.info('=============== step1') + # TSIM: $i = 0 + i = 0 + # TSIM: $db = $dbPrefix . $i + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: + # TSIM: sql create database $db + # TSIM: sql use $db + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol + # int unsigned) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol int unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol int unsigned)' % + (mt)) + # TSIM: + # TSIM: $i = 0 + i = 0 + # TSIM: while $i < 5 + while (i < 5): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 0 ) + tdLog.info('create table %s using %s tags( 0 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 0 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: while $i < 10 + while (i < 10): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 1 ) + tdLog.info('create table %s using %s tags( 1 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: + # TSIM: print =============== step2 + tdLog.info('=============== step2') + # TSIM: sleep 100 + # TSIM: sql select * from $tb + tdLog.info('select * from %s' % (tb)) + tdSql.query('select * from %s' % (tb)) + # TSIM: if $rows != $rowNum then + tdLog.info('tdSql.checkRow($rowNum)') + tdSql.checkRows(rowNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (tb)) + tdSql.query('select * from %s where ts < now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts <= now + 4m + tdLog.info('select * from %s where ts <= now + 4m' % (tb)) + tdSql.query('select * from %s where ts <= now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (tb)) + tdSql.query('select * from %s where ts > now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts >= now + 4m + tdLog.info('select * from %s where ts >= now + 4m' % (tb)) + tdSql.query('select * from %s where ts >= now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m and ts > now + 5m + tdLog.info( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > 100000 and ts < 100000 + tdLog.info('select * from %s where ts > 100000 and ts < 100000' % (tb)) + tdSql.query( + 'select * from %s where ts > 100000 and ts < 100000' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 3m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts > now + 5m and + # ts < now + 6m + tdLog.info( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step3 + tdLog.info('=============== step3') + # TSIM: sql select * from $mt + tdLog.info('select * from %s' % (mt)) + tdSql.query('select * from %s' % (mt)) + # TSIM: if $rows != $totalNum then + tdLog.info('tdSql.checkRow($totalNum)') + tdSql.checkRows(totalNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (mt)) + tdSql.query('select * from %s where ts < now + 4m' % (mt)) + # TSIM: if $rows != 50 then + tdLog.info('tdSql.checkRow(50)') + tdSql.checkRows(50) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (mt)) + tdSql.query('select * from %s where ts > now + 4m' % (mt)) + # TSIM: if $rows != 150 then + tdLog.info('tdSql.checkRow(150)') + tdSql.checkRows(150) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts = now + 4m + tdLog.info('select * from %s where ts = now + 4m' % (mt)) + tdSql.query('select * from %s where ts = now + 4m' % (mt)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 10 then + tdLog.info('tdSql.checkRow(10)') + tdSql.checkRows(10) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step4 + tdLog.info('=============== step4') + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step5 + tdLog.info('=============== step5') + # TSIM: sql select * from $mt where ts > now + 4m and tgcol = 1 + tdLog.info('select * from %s where ts > now + 4m and tgcol = 1' % (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol = 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 1 + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol = 0 + tdLog.info('select * from %s where ts < now + 4m and tgcol = 0' % (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol = 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m and + # tgcol <> 0 + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts + # < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step6 + tdLog.info('=============== step6') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 200 then + tdLog.info('tdSql.checkData(0, 0, 200)') + tdSql.checkData(0, 0, 200) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step7 + tdLog.info('=============== step7') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step8 + tdLog.info('=============== step8') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 50 then + tdLog.info('tdSql.checkData(0, 0, 50)') + tdSql.checkData(0, 0, 50) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step9 + tdLog.info('=============== step9') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step10 + tdLog.info('=============== step10') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group + # by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step11 + tdLog.info('=============== step11') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + # group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 25 then + tdLog.info('tdSql.checkData(0, 0, 25)') + tdSql.checkData(0, 0, 25) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: + # TSIM: print =============== step12 + tdLog.info('=============== step12') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d) group by + # tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data01 != 100 then + tdLog.info('tdSql.checkData(0, 1, 100)') + tdSql.checkData(0, 1, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== clear + tdLog.info('=============== clear') + # TSIM: sql drop database $db + tdLog.info('drop database db') + tdSql.execute('drop database db') + # TSIM: sql show databases + tdLog.info('show databases') + tdSql.query('show databases') + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT +# convert end + tdSql.execute("create database db") + tdSql.execute("use db") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev int unsigned)") + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 32) - 1)) + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1)) + + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 32) - 2)) + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (0)) + tdSql.execute( + 'CREATE TABLE if not exists dev_003 using st tags(%s)' % ('NULL')) + print("==============step2") + tdSql.query("show tables") + tdSql.checkRows(3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/unsignedSmallint.py b/tests/pytest/tag_lite/unsignedSmallint.py new file mode 100644 index 0000000000000000000000000000000000000000..59cb33dc18eb74b27e9a916e921e10cbabf1b4eb --- /dev/null +++ b/tests/pytest/tag_lite/unsignedSmallint.py @@ -0,0 +1,603 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + # TSIM: system sh/stop_dnodes.sh + # TSIM: + # TSIM: + # TSIM: system sh/deploy.sh -n dnode1 -i 1 + # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 + # TSIM: system sh/exec.sh -n dnode1 -s start + # TSIM: + # TSIM: sleep 3000 + # TSIM: sql connect + # TSIM: + # TSIM: print ======================== dnode1 start + tdLog.info('======================== dnode1 start') + # TSIM: + # TSIM: $dbPrefix = ta_sm_db + # TSIM: $tbPrefix = ta_sm_tb + tbPrefix = "ta_sm_tb" + # TSIM: $mtPrefix = ta_sm_mt + mtPrefix = "ta_sm_mt" + # TSIM: $tbNum = 10 + tbNum = 10 + # TSIM: $rowNum = 20 + rowNum = 20 + # TSIM: $totalNum = 200 + totalNum = 200 + # TSIM: + # TSIM: print =============== step1 + tdLog.info('=============== step1') + # TSIM: $i = 0 + i = 0 + # TSIM: $db = $dbPrefix . $i + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: + # TSIM: sql create database $db + # TSIM: sql use $db + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol + # smallint unsigned) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol smallint unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol smallint unsigned)' % + (mt)) + # TSIM: + # TSIM: $i = 0 + i = 0 + # TSIM: while $i < 5 + while (i < 5): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 0 ) + tdLog.info('create table %s using %s tags( 0 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 0 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: while $i < 10 + while (i < 10): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 1 ) + tdLog.info('create table %s using %s tags( 1 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: + # TSIM: print =============== step2 + tdLog.info('=============== step2') + # TSIM: sleep 100 + # TSIM: sql select * from $tb + tdLog.info('select * from %s' % (tb)) + tdSql.query('select * from %s' % (tb)) + # TSIM: if $rows != $rowNum then + tdLog.info('tdSql.checkRow($rowNum)') + tdSql.checkRows(rowNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (tb)) + tdSql.query('select * from %s where ts < now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts <= now + 4m + tdLog.info('select * from %s where ts <= now + 4m' % (tb)) + tdSql.query('select * from %s where ts <= now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (tb)) + tdSql.query('select * from %s where ts > now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts >= now + 4m + tdLog.info('select * from %s where ts >= now + 4m' % (tb)) + tdSql.query('select * from %s where ts >= now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m and ts > now + 5m + tdLog.info( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > 100000 and ts < 100000 + tdLog.info('select * from %s where ts > 100000 and ts < 100000' % (tb)) + tdSql.query( + 'select * from %s where ts > 100000 and ts < 100000' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 3m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts > now + 5m and + # ts < now + 6m + tdLog.info( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step3 + tdLog.info('=============== step3') + # TSIM: sql select * from $mt + tdLog.info('select * from %s' % (mt)) + tdSql.query('select * from %s' % (mt)) + # TSIM: if $rows != $totalNum then + tdLog.info('tdSql.checkRow($totalNum)') + tdSql.checkRows(totalNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (mt)) + tdSql.query('select * from %s where ts < now + 4m' % (mt)) + # TSIM: if $rows != 50 then + tdLog.info('tdSql.checkRow(50)') + tdSql.checkRows(50) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (mt)) + tdSql.query('select * from %s where ts > now + 4m' % (mt)) + # TSIM: if $rows != 150 then + tdLog.info('tdSql.checkRow(150)') + tdSql.checkRows(150) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts = now + 4m + tdLog.info('select * from %s where ts = now + 4m' % (mt)) + tdSql.query('select * from %s where ts = now + 4m' % (mt)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 10 then + tdLog.info('tdSql.checkRow(10)') + tdSql.checkRows(10) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step4 + tdLog.info('=============== step4') + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step5 + tdLog.info('=============== step5') + # TSIM: sql select * from $mt where ts > now + 4m and tgcol = 1 + tdLog.info('select * from %s where ts > now + 4m and tgcol = 1' % (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol = 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 1 + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol = 0 + tdLog.info('select * from %s where ts < now + 4m and tgcol = 0' % (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol = 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m and + # tgcol <> 0 + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts + # < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step6 + tdLog.info('=============== step6') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 200 then + tdLog.info('tdSql.checkData(0, 0, 200)') + tdSql.checkData(0, 0, 200) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step7 + tdLog.info('=============== step7') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step8 + tdLog.info('=============== step8') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 50 then + tdLog.info('tdSql.checkData(0, 0, 50)') + tdSql.checkData(0, 0, 50) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step9 + tdLog.info('=============== step9') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step10 + tdLog.info('=============== step10') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group + # by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step11 + tdLog.info('=============== step11') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + # group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 25 then + tdLog.info('tdSql.checkData(0, 0, 25)') + tdSql.checkData(0, 0, 25) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: + # TSIM: print =============== step12 + tdLog.info('=============== step12') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d) group by + # tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data01 != 100 then + tdLog.info('tdSql.checkData(0, 1, 100)') + tdSql.checkData(0, 1, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== clear + tdLog.info('=============== clear') + # TSIM: sql drop database $db + tdLog.info('drop database db') + tdSql.execute('drop database db') + # TSIM: sql show databases + tdLog.info('show databases') + tdSql.query('show databases') + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT +# convert end + tdSql.execute("create database db") + tdSql.execute("use db") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev smallint unsigned)") + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 16)-1)) + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1)) + + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 16) - 2)) + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (0)) + tdSql.execute( + 'CREATE TABLE if not exists dev_003 using st tags(%s)' % ('NULL')) + + print("==============step2") + tdSql.query("show tables") + tdSql.checkRows(3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/unsignedTinyint.py b/tests/pytest/tag_lite/unsignedTinyint.py new file mode 100644 index 0000000000000000000000000000000000000000..1250b08d1b8dd4d3cb5f61af424576146ab70967 --- /dev/null +++ b/tests/pytest/tag_lite/unsignedTinyint.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + # TSIM: system sh/stop_dnodes.sh + # TSIM: + # TSIM: + # TSIM: system sh/deploy.sh -n dnode1 -i 1 + # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 + # TSIM: system sh/exec.sh -n dnode1 -s start + # TSIM: + # TSIM: sleep 3000 + # TSIM: sql connect + # TSIM: + # TSIM: print ======================== dnode1 start + tdLog.info('======================== dnode1 start') + # TSIM: + # TSIM: $dbPrefix = ta_sm_db + # TSIM: $tbPrefix = ta_sm_tb + tbPrefix = "ta_sm_tb" + # TSIM: $mtPrefix = ta_sm_mt + mtPrefix = "ta_sm_mt" + # TSIM: $tbNum = 10 + tbNum = 10 + # TSIM: $rowNum = 20 + rowNum = 20 + # TSIM: $totalNum = 200 + totalNum = 200 + # TSIM: + # TSIM: print =============== step1 + tdLog.info('=============== step1') + # TSIM: $i = 0 + i = 0 + # TSIM: $db = $dbPrefix . $i + # TSIM: $mt = $mtPrefix . $i + mt = "%s%d" % (mtPrefix, i) + # TSIM: + # TSIM: sql create database $db + # TSIM: sql use $db + # TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol + # tinyint unsigned) + tdLog.info( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol tinyint unsigned)' % + (mt)) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol tinyint unsigned)' % + (mt)) + # TSIM: + # TSIM: $i = 0 + i = 0 + # TSIM: while $i < 5 + while (i < 5): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 0 ) + tdLog.info('create table %s using %s tags( 0 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 0 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: while $i < 10 + while (i < 10): + # TSIM: $tb = $tbPrefix . $i + tb = "%s%d" % (tbPrefix, i) + # TSIM: sql create table $tb using $mt tags( 1 ) + tdLog.info('create table %s using %s tags( 1 )' % (tb, mt)) + tdSql.execute('create table %s using %s tags( 1 )' % (tb, mt)) + # TSIM: $x = 0 + x = 0 + # TSIM: while $x < $rowNum + while (x < rowNum): + # TSIM: $ms = $x . m + ms = "%dm" % x + # TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + tdSql.execute( + 'insert into %s values (now + %s , %d )' % + (tb, ms, x)) + # TSIM: $x = $x + 1 + x = x + 1 + # TSIM: endw + # TSIM: $i = $i + 1 + i = i + 1 + # TSIM: endw + # TSIM: + # TSIM: print =============== step2 + tdLog.info('=============== step2') + # TSIM: sleep 100 + # TSIM: sql select * from $tb + tdLog.info('select * from %s' % (tb)) + tdSql.query('select * from %s' % (tb)) + # TSIM: if $rows != $rowNum then + tdLog.info('tdSql.checkRow($rowNum)') + tdSql.checkRows(rowNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (tb)) + tdSql.query('select * from %s where ts < now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts <= now + 4m + tdLog.info('select * from %s where ts <= now + 4m' % (tb)) + tdSql.query('select * from %s where ts <= now + 4m' % (tb)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (tb)) + tdSql.query('select * from %s where ts > now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts >= now + 4m + tdLog.info('select * from %s where ts >= now + 4m' % (tb)) + tdSql.query('select * from %s where ts >= now + 4m' % (tb)) + # TSIM: if $rows != 15 then + tdLog.info('tdSql.checkRow(15)') + tdSql.checkRows(15) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts < now + 4m and ts > now + 5m + tdLog.info( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + tdSql.query( + 'select * from %s where ts < now + 4m and ts > now + 5m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > 100000 and ts < 100000 + tdLog.info('select * from %s where ts > 100000 and ts < 100000' % (tb)) + tdSql.query( + 'select * from %s where ts > 100000 and ts < 100000' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts < now + 3m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 3m' % + (tb)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $tb where ts > now + 4m and ts > now + 5m and + # ts < now + 6m + tdLog.info( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts > now + 5m and ts < now + 6m' % + (tb)) + # TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step3 + tdLog.info('=============== step3') + # TSIM: sql select * from $mt + tdLog.info('select * from %s' % (mt)) + tdSql.query('select * from %s' % (mt)) + # TSIM: if $rows != $totalNum then + tdLog.info('tdSql.checkRow($totalNum)') + tdSql.checkRows(totalNum) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: sql select * from $mt where ts < now + 4m + tdLog.info('select * from %s where ts < now + 4m' % (mt)) + tdSql.query('select * from %s where ts < now + 4m' % (mt)) + # TSIM: if $rows != 50 then + tdLog.info('tdSql.checkRow(50)') + tdSql.checkRows(50) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m + tdLog.info('select * from %s where ts > now + 4m' % (mt)) + tdSql.query('select * from %s where ts > now + 4m' % (mt)) + # TSIM: if $rows != 150 then + tdLog.info('tdSql.checkRow(150)') + tdSql.checkRows(150) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts = now + 4m + tdLog.info('select * from %s where ts = now + 4m' % (mt)) + tdSql.query('select * from %s where ts = now + 4m' % (mt)) + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 10 then + tdLog.info('tdSql.checkRow(10)') + tdSql.checkRows(10) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step4 + tdLog.info('=============== step4') + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 1 + tdLog.info('select * from %s where tgcol = 1' % (mt)) + tdSql.query('select * from %s where tgcol = 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 1 + tdLog.info('select * from %s where tgcol <> 1' % (mt)) + tdSql.query('select * from %s where tgcol <> 1' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol = 0 + tdLog.info('select * from %s where tgcol = 0' % (mt)) + tdSql.query('select * from %s where tgcol = 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where tgcol <> 0 + tdLog.info('select * from %s where tgcol <> 0' % (mt)) + tdSql.query('select * from %s where tgcol <> 0' % (mt)) + # TSIM: if $rows != 100 then + tdLog.info('tdSql.checkRow(100)') + tdSql.checkRows(100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step5 + tdLog.info('=============== step5') + # TSIM: sql select * from $mt where ts > now + 4m and tgcol = 1 + tdLog.info('select * from %s where ts > now + 4m and tgcol = 1' % (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol = 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 1 + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 1' % + (mt)) + # TSIM: if $rows != 75 then + tdLog.info('tdSql.checkRow(75)') + tdSql.checkRows(75) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol = 0 + tdLog.info('select * from %s where ts < now + 4m and tgcol = 0' % (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts < now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts < now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol = 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol = 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts <= now + 4m and tgcol <> 0 + tdLog.info( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts <= now + 4m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 25 then + tdLog.info('tdSql.checkRow(25)') + tdSql.checkRows(25) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and ts < now + 5m and + # tgcol <> 0 + tdLog.info( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and ts < now + 5m and tgcol <> 0' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts + # < now + 5m + tdLog.info( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + tdSql.query( + 'select * from %s where ts > now + 4m and tgcol <> 0 and ts < now + 5m' % + (mt)) + # TSIM: if $rows != 5 then + tdLog.info('tdSql.checkRow(5)') + tdSql.checkRows(5) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step6 + tdLog.info('=============== step6') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 200 then + tdLog.info('tdSql.checkData(0, 0, 200)') + tdSql.checkData(0, 0, 200) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step7 + tdLog.info('=============== step7') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step8 + tdLog.info('=============== step8') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 50 then + tdLog.info('tdSql.checkData(0, 0, 50)') + tdSql.checkData(0, 0, 50) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step9 + tdLog.info('=============== step9') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step10 + tdLog.info('=============== step10') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group + # by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = 1 group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== step11 + tdLog.info('=============== step11') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m + # group by tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where ts < now + 4m group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data00 != 25 then + tdLog.info('tdSql.checkData(0, 0, 25)') + tdSql.checkData(0, 0, 25) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: + # TSIM: print =============== step12 + tdLog.info('=============== step12') + # TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), + # max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d) group by + # tgcol + tdLog.info( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + tdSql.query( + 'select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s interval(1d) group by tgcol' % + (mt)) + # TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + # TSIM: if $data01 != 100 then + tdLog.info('tdSql.checkData(0, 1, 100)') + tdSql.checkData(0, 1, 100) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: print =============== clear + tdLog.info('=============== clear') + # TSIM: sql drop database $db + tdLog.info('drop database db') + tdSql.execute('drop database db') + # TSIM: sql show databases + tdLog.info('show databases') + tdSql.query('show databases') + # TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + # TSIM: return -1 + # TSIM: endi + # TSIM: + # TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT +# convert end + tdSql.execute("create database db") + tdSql.execute("use db") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev tinyint unsigned)") + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 8) - 1)) + tdSql.error( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1)) + + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 8) - 2)) + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (0)) + tdSql.execute( + 'CREATE TABLE if not exists dev_003 using st tags(%s)' % ('NULL')) + print("==============step2") + tdSql.query("show tables") + tdSql.checkRows(3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/lowaTest.py b/tests/pytest/tools/lowaTest.py index 2b65dcf3eff1ed9ed7275fd774807cfa0318ec81..ad8b5925bd99b9c5918421eb277cea6e5ed100a7 100644 --- a/tests/pytest/tools/lowaTest.py +++ b/tests/pytest/tools/lowaTest.py @@ -51,7 +51,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("yes | %staosdemox -f tools/insert.json" % binPath) + os.system("yes | %staosdemo -f tools/insert.json" % binPath) tdSql.execute("use db01") tdSql.query("select count(*) from stb01") diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 2a4a552c8fb87f9c5a3169afcad71793462b924f..c450570d2446a236ad8fa98c0817d2e31bcae795 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -23,9 +23,10 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - - self.numberOfTables = 10000 + + self.numberOfTables = 1000 self.numberOfRecords = 100 + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -38,9 +39,10 @@ class TDTestCase: if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + buildPath = root[:len(root) - len("/build/bin")] break return buildPath + def run(self): tdSql.prepare() buildPath = self.getBuildPath() @@ -48,18 +50,21 @@ class TDTestCase: tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - os.system("yes | %staosdemo -t %d -n %d -x" % (binPath,self.numberOfTables, self.numberOfRecords)) + binPath = buildPath + "/build/bin/" + os.system("%staosdemo -y -M -t %d -n %d -x" % + (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute("use test") tdSql.query("select count(*) from meters") tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) - tdSql.query("select sum(f1) from test.meters interval(1h) sliding(30m)") + tdSql.query( + "select sum(col1) from test.meters interval(1h) sliding(30m)") tdSql.checkRows(2) - tdSql.query("select apercentile(f1, 1) from test.meters interval(10s)") - tdSql.checkRows(11) + tdSql.query( + "select apercentile(col1, 1) from test.meters interval(10s)") + tdSql.checkRows(1) tdSql.error("select loc, count(loc) from test.meters") diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTest2.py index 4d7e871e66b87c4c3ff8c46f68f00426d117d956..75a79d0585e766718151ca9b0e3e195c03732e16 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTest2.py @@ -31,31 +31,41 @@ class TDTestCase: def insertDataAndAlterTable(self, threadID): if(threadID == 0): - os.system("yes | taosdemo -t %d -n %d -x" % (self.numberOfTables, self.numberOfRecords)) + os.system("taosdemo -M -y -t %d -n %d -x" % + (self.numberOfTables, self.numberOfRecords)) if(threadID == 1): + time.sleep(2) print("use test") tdSql.execute("use test") + # check if all the tables have heen created + while True: + tdSql.query("show tables") + rows = tdSql.queryRows + print("number of tables: %d" % rows) + if(rows == self.numberOfTables): + break + time.sleep(1) + # check if there are any records in the last created table while True: print("query started") tdSql.query("select * from test.t9") rows = tdSql.queryRows - print("rows %d" % rows) + print("number of records: %d" % rows) if(rows > 0): break - time.sleep(1) - print("alter table test.meters add column f4 int") - tdSql.execute("alter table test.meters add column f4 int") - print("insert into test.t0 values (now, 1, 2, 3, 4)") - tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4)") + time.sleep(1) + print("alter table test.meters add column col10 int") + tdSql.execute("alter table test.meters add column col10 int") + print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - def run(self): + def run(self): tdSql.prepare() t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, )) t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, )) - t1.start() - time.sleep(2) + t1.start() t2.start() t1.join() t2.join() @@ -69,4 +79,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim index 9d4dedfce0e081345d8659abdcdebf3cce2efb74..170ba21c28b7ff4a79a8f2375dc1d36ba91e6120 100644 --- a/tests/script/general/db/alter_option.sim +++ b/tests/script/general/db/alter_option.sim @@ -209,7 +209,7 @@ sql alter database db wal 1 sql alter database db wal 2 sql alter database db wal 1 sql alter database db wal 2 -sql_error alter database db wal 0 +sql alter database db wal 0 sql_error alter database db wal 3 sql_error alter database db wal 4 sql_error alter database db wal -1 diff --git a/tests/script/general/http/restful.sim b/tests/script/general/http/restful.sim index fdde975238778176e447d01564851acc95b00c95..6ebf5644e7a99593ebac0302c3dcdbc0a27fc8c1 100644 --- a/tests/script/general/http/restful.sim +++ b/tests/script/general/http/restful.sim @@ -39,14 +39,14 @@ print =============== step3 - query data system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql print curl 127.0.0.1:7111/rest/sql -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then +if $system_content != @{"status":"succ","head":["ts","i"],"column_meta":[["ts",9,8],["i",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then return -1 endi print =============== step4 - insert data system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql print curl 127.0.0.1:7111/rest/sql -----> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then return -1 endi @@ -54,7 +54,7 @@ print =============== step5 - query data system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql print curl 127.0.0.1:7111/rest/sql -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then +if $system_content != @{"status":"succ","head":["ts","i"],"column_meta":[["ts",9,8],["i",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi @@ -79,4 +79,4 @@ if $system_content != @{"status":"error","code":3,"desc":"Authentication failure return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 17ee0ea23243c484854aef463dd9f7efa5698edd..8094a943f507a702bb56a7d026da672134188b83 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -88,13 +88,13 @@ print =============== step2 - no db #11 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content -if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"data":[],"rows":0}@ then +if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[],"rows":0}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 12-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then return -1 endi @@ -160,26 +160,26 @@ endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 22-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 23-> $system_content -if $system_content != @{"status":"succ","head":["ts","speed"],"data":[],"rows":0}@ then +if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[],"rows":0}@ then return -1 endi #24 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 24-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 25-> $system_content -if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 endi @@ -208,32 +208,32 @@ system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl #27 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql print 27-> $system_content -if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then +if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:7111/rest/sql print 28-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 29-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then return -1 endi #30 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql print 30-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:7111/rest/sql print 31-> $system_content -if $system_content != @{"status":"succ","head":["ts","speed"],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then return -1 endi diff --git a/tests/script/general/http/telegraf.sim b/tests/script/general/http/telegraf.sim index 6825e5c479774f2a8eca5df42d55910e12a975fd..f3426971864e6cf510ff6600b7731cf81f64f952 100644 --- a/tests/script/general/http/telegraf.sim +++ b/tests/script/general/http/telegraf.sim @@ -285,8 +285,8 @@ system_content curl -u root:taosdata -d 'select count(*) from db.win_cpu' 127.0 print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"data":[[3]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/between_and.sim b/tests/script/general/parser/between_and.sim new file mode 100644 index 0000000000000000000000000000000000000000..2e031c4917cf396044a399493ef0be2849baa830 --- /dev/null +++ b/tests/script/general/parser/between_and.sim @@ -0,0 +1,165 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2") +sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4") + +sql select tbname,id1 from st2; + +if $rows != 4 then + return -1 +endi + + +sql select * from st2; + +if $rows != 8 then + return -1 +endi + +sql select * from st2 where ts between now-50s and now+450s + +if $rows != 5 then + return -1 +endi + +sql select tbname,id1 from st2 where id1 between 2 and 3; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select tbname,id2 from st2 where id2 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2.00000 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi + + +sql select tbname,id4 from st2 where id4 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3.000000000 then + return -1 +endi + + +sql select tbname,id5 from st2 where id5 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select tbname,id6 from st2 where id6 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select * from st2 where f1 between 2 and 3 and f2 between 2.0 and 3.0 and f3 between 2.0 and 3.0 and f4 between 2.0 and 3.0 and f5 between 2.0 and 3.0 and f6 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data01 != 2 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql_error select * from st2 where f7 between 2.0 and 3.0; +sql_error select * from st2 where f8 between 2.0 and 3.0; +sql_error select * from st2 where f9 between 2.0 and 3.0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim index 51014a56c137931ea1fee3e024340c150085178d..02faa5db17fa2fb34b0bd86da2717c8f79e91f1b 100644 --- a/tests/script/general/parser/col_arithmetic_operation.sim +++ b/tests/script/general/parser/col_arithmetic_operation.sim @@ -111,7 +111,7 @@ system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 500c +sleep 5000 run general/parser/col_arithmetic_query.sim diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 0588a3a60b640841218d3e531e3a901c55face4e..2c56c6445fa8134ab28837940a11ff6f4127b7c7 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -41,10 +41,10 @@ if $data00 != 0.000000000 then return -1 endi -if $data01 != -nan then - print expect -nan, actual: $data01 - return -1 -endi +#if $data01 != -nan then +# print expect -nan, actual: $data01 +# return -1 +#endi if $data10 != 0.666666667 then return -1 @@ -185,9 +185,9 @@ if $data00 != 0.000000000 then return -1 endi -if $data01 != -nan then - return -1 -endi +#if $data01 != -nan then +# return -1 +#endi if $data02 != 0.000000000 then return -1 diff --git a/tests/script/general/parser/columnValue_unsign.sim b/tests/script/general/parser/columnValue_unsign.sim index 6e9a37fdb60f3ddbb847a092c4a7745316c68b01..8e44ccb5facf074691f3aeeb7c60099ab6ef691f 100644 --- a/tests/script/general/parser/columnValue_unsign.sim +++ b/tests/script/general/parser/columnValue_unsign.sim @@ -83,7 +83,7 @@ if $data03 != NULL then endi sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, 1, 2, 3, 4, 5, 6, 7, 8, 9); +sql insert into mt_unsigned_1 values(now+1s, 1, 2, 3, 4, 5, 6, 7, 8, 9); sql_error insert into mt_unsigned_1 values(now, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); sql_error insert into mt_unsigned_1 values(now, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/script/general/parser/create_db.sim b/tests/script/general/parser/create_db.sim index 6cf08a5ac4231ea715c1892842b0e4d122d2520c..ea1cc17a6a779391c8dfa064bf98bf70715d9e70 100644 --- a/tests/script/general/parser/create_db.sim +++ b/tests/script/general/parser/create_db.sim @@ -169,8 +169,8 @@ sql_error create database $db cache 0 sql_error create database $db ctime 29 sql_error create database $db ctime 40961 -# wal {1, 2} -sql_error create database $db wal 0 +# wal {0, 2} +#sql_error create database $db wal 0 sql_error create database $db wal -1 sql_error create database $db wal 3 diff --git a/tests/script/general/parser/create_tb_with_tag_name.sim b/tests/script/general/parser/create_tb_with_tag_name.sim new file mode 100644 index 0000000000000000000000000000000000000000..bbd5fc11e1eb662053860bca4f0d4210c1d1fbbc --- /dev/null +++ b/tests/script/general/parser/create_tb_with_tag_name.sim @@ -0,0 +1,162 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int) tags (id int, t1 int, t2 nchar(4), t3 double) + + +sql insert into tb1 using st2 (id, t1) tags(1,2) values (now, 1) + +sql select id,t1,t2,t3 from tb1 + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +sql create table tb2 using st2 (t2,t3) tags ("12",22.0) + +sql select id,t1,t2,t3 from tb2; + +if $rows != 1 then + return -1 +endi + +if $data00 != NULL then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data02 != 12 then + return -1 +endi +if $data03 != 22.000000000 then + return -1 +endi + + +sql create table tb3 using st2 tags (1,2,"3",33.0); + +sql select id,t1,t2,t3 from tb3; + + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 3 then + return -1 +endi +if $data03 != 33.000000000 then + return -1 +endi + +sql insert into tb4 using st2 tags(1,2,"33",44.0) values (now, 1); + +sql select id,t1,t2,t3 from tb4; + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 33 then + return -1 +endi +if $data03 != 44.000000000 then + return -1 +endi + +sql_error create table tb5 using st2() tags (3,3,"3",33.0); + +sql_error create table tb6 using st2 (id,t1) tags (3,3,"3",33.0); + +sql_error create table tb7 using st2 (id,t1) tags (3); + +sql_error create table tb8 using st2 (ide) tags (3); + +sql_error create table tb9 using st2 (id); + +sql_error create table tb10 using st2 (id t1) tags (1,1); + +sql_error create table tb10 using st2 (id,,t1) tags (1,1,1); + +sql_error create table tb11 using st2 (id,t1,) tags (1,1,1); + +sql create table tb12 using st2 (t1,id) tags (2,1); + +sql select id,t1,t2,t3 from tb12; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +sql create table tb13 using st2 ("t1",'id') tags (2,1); + +sql select id,t1,t2,t3 from tb13; + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index ca020c40631c5ecb77ef0d808fa8251e6014865b..af16bfd4f18a40d9964b6f4f7d4f6ea0840937a2 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -763,3 +763,20 @@ endi if $data01 != 1.414213562 then return -1 endi + +sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int); +sql create table tb1 using st1 tags(1); + +sql insert into tb1 values (now, 1, 1); + +sql select stddev(f1) from st1 group by f1; + +if $rows != 1 then + return -1 +endi + + +if $data00 != 0.000000000 then + return -1 +endi + diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 59e03c55a068d57d77ede3c26bb60eba1e747fa7..6d730ba7619508a1780a17002a224bbc315c74eb 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -83,7 +83,7 @@ #run general/parser/join.sim #sleep 100 #run general/parser/join_multivnode.sim -#sleep 100 +sleep 100 run general/parser/projection_limit_offset.sim sleep 100 run general/parser/select_with_tags.sim diff --git a/tests/script/general/user/monitor.sim b/tests/script/general/user/monitor.sim index fe12df9baaf1f6ae3810e51d1fdc0020b67cbb40..016848c06d1a5a08f8e78a2e2ea8cdc5acc68744 100644 --- a/tests/script/general/user/monitor.sim +++ b/tests/script/general/user/monitor.sim @@ -15,18 +15,18 @@ print ========== step2 # return -1 #step21: sql drop table log.dn -x step22 - return -1 +# return -1 step22: sql drop user log -x step23 - return -1 +# return -1 step23: print ========== step3 sleep 2000 -sql select * from log.dn -if $rows == 0 then - return -1 -endi +#sql select * from log.dn +#if $rows == 0 then +# return -1 +#endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/jenkins/basic_2.txt b/tests/script/jenkins/basic_2.txt index 20e574711a8b2989ab017611b49aa2c1f4b52027..5a2a6f4062e9f35f9ef77222383ecfc800ea7574 100644 --- a/tests/script/jenkins/basic_2.txt +++ b/tests/script/jenkins/basic_2.txt @@ -72,4 +72,3 @@ cd ../../../debug; make ./test.sh -f unique/cluster/cache.sim ./test.sh -f unique/cluster/vgroup100.sim -./test.sh -f unique/column/replica3.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_4.txt b/tests/script/jenkins/basic_4.txt index 895281f218717af7dac3b8e1f890ce3e03b81609..5a7d23df719f737d0c5a3c85acfd3875b87872ab 100644 --- a/tests/script/jenkins/basic_4.txt +++ b/tests/script/jenkins/basic_4.txt @@ -1,14 +1,3 @@ -./test.sh -f unique/dnode/alternativeRole.sim -./test.sh -f unique/dnode/balance1.sim -./test.sh -f unique/dnode/balance2.sim -./test.sh -f unique/dnode/balance3.sim -./test.sh -f unique/dnode/balancex.sim -./test.sh -f unique/dnode/offline1.sim -./test.sh -f unique/dnode/offline2.sim -./test.sh -f unique/dnode/reason.sim -./test.sh -f unique/dnode/remove1.sim -./test.sh -f unique/dnode/remove2.sim -./test.sh -f unique/dnode/vnode_clean.sim ./test.sh -f unique/http/admin.sim ./test.sh -f unique/http/opentsdb.sim @@ -46,4 +35,12 @@ ./test.sh -f general/stable/refcount.sim ./test.sh -f general/stable/show.sim ./test.sh -f general/stable/values.sim -./test.sh -f general/stable/vnode3.sim \ No newline at end of file +./test.sh -f general/stable/vnode3.sim + +./test.sh -f unique/column/replica3.sim +./test.sh -f issue/TD-2713.sim +./test.sh -f general/parser/select_distinct_tag.sim +./test.sh -f unique/mnode/mgmt30.sim +./test.sh -f issue/TD-2677.sim +./test.sh -f issue/TD-2680.sim +./test.sh -f unique/dnode/lossdata.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_5.txt b/tests/script/jenkins/basic_5.txt index 66c2ce36b2c60458471fe9c8c56c752c9cb641d4..f89be9499e7a672a3c72646614552a43d1537463 100644 --- a/tests/script/jenkins/basic_5.txt +++ b/tests/script/jenkins/basic_5.txt @@ -1,3 +1,11 @@ +./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/balance1.sim +./test.sh -f unique/dnode/balance2.sim +./test.sh -f unique/dnode/balance3.sim +./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/offline1.sim +./test.sh -f unique/dnode/offline2.sim + ./test.sh -f general/stream/metrics_del.sim ./test.sh -f general/stream/metrics_replica1_vnoden.sim ./test.sh -f general/stream/restart_stream.sim diff --git a/tests/script/jenkins/basic_6.txt b/tests/script/jenkins/basic_6.txt index 893346e6ca70529aeb9f37e17f6bf4edfb125183..9156360a9f548ba17d9b96d297e839e6b74aaa55 100644 --- a/tests/script/jenkins/basic_6.txt +++ b/tests/script/jenkins/basic_6.txt @@ -1,3 +1,8 @@ +./test.sh -f unique/dnode/reason.sim +./test.sh -f unique/dnode/remove1.sim +./test.sh -f unique/dnode/remove2.sim +./test.sh -f unique/dnode/vnode_clean.sim + ./test.sh -f unique/db/commit.sim ./test.sh -f unique/db/delete.sim ./test.sh -f unique/db/delete_part.sim diff --git a/tests/script/sh/deploy.bat b/tests/script/sh/deploy.bat index 04c7b8a660ee48c02a0d97583ac2b22d06323e7c..921f1611fb5ea80bbeb746b693d7c529c421ef27 100644 --- a/tests/script/sh/deploy.bat +++ b/tests/script/sh/deploy.bat @@ -65,24 +65,24 @@ echo serverPort %NODE% >> %TAOS_CFG% echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo debugFlag 0 >> %TAOS_CFG% -echo mDebugFlag 143 >> %TAOS_CFG% -echo sdbDebugFlag 143 >> %TAOS_CFG% -echo dDebugFlag 143 >> %TAOS_CFG% -echo vDebugFlag 143 >> %TAOS_CFG% -echo tsdbDebugFlag 143 >> %TAOS_CFG% -echo cDebugFlag 143 >> %TAOS_CFG% -echo jnidebugFlag 143 >> %TAOS_CFG% -echo odbcdebugFlag 143 >> %TAOS_CFG% -echo httpDebugFlag 143 >> %TAOS_CFG% -echo monDebugFlag 143 >> %TAOS_CFG% -echo mqttDebugFlag 143 >> %TAOS_CFG% -echo qdebugFlag 143 >> %TAOS_CFG% -echo rpcDebugFlag 143 >> %TAOS_CFG% +echo mDebugFlag 135 >> %TAOS_CFG% +echo sdbDebugFlag 135 >> %TAOS_CFG% +echo dDebugFlag 135 >> %TAOS_CFG% +echo vDebugFlag 135 >> %TAOS_CFG% +echo tsdbDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 135 >> %TAOS_CFG% +echo jnidebugFlag 135 >> %TAOS_CFG% +echo odbcdebugFlag 135 >> %TAOS_CFG% +echo httpDebugFlag 135 >> %TAOS_CFG% +echo monDebugFlag 135 >> %TAOS_CFG% +echo mqttDebugFlag 135 >> %TAOS_CFG% +echo qdebugFlag 135 >> %TAOS_CFG% +echo rpcDebugFlag 135 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo udebugFlag 143 >> %TAOS_CFG% -echo sdebugFlag 143 >> %TAOS_CFG% -echo wdebugFlag 143 >> %TAOS_CFG% -echo cqdebugFlag 143 >> %TAOS_CFG% +echo udebugFlag 135 >> %TAOS_CFG% +echo sdebugFlag 135 >> %TAOS_CFG% +echo wdebugFlag 135 >> %TAOS_CFG% +echo cqdebugFlag 135 >> %TAOS_CFG% echo monitor 0 >> %TAOS_CFG% echo monitorInterval 1 >> %TAOS_CFG% echo http 0 >> %TAOS_CFG% diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim index 3d8e5a8c4411f1ae92d04d95596b052c26fe6889..7d1e6b03d4547a6b0b2a6a7857000a8a6518a002 100644 --- a/tests/script/unique/http/opentsdb.sim +++ b/tests/script/unique/http/opentsdb.sim @@ -169,7 +169,7 @@ endi system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then return -1 endi @@ -186,7 +186,7 @@ system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_w print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then +if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then return -1 endi @@ -194,7 +194,7 @@ system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"data":[[3]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then return -1 endi @@ -211,7 +211,7 @@ system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_w print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then +if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then return -1 endi @@ -219,7 +219,7 @@ system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"data":[[2]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then return -1 endi @@ -233,7 +233,7 @@ system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 134 system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"data":[[7]],"rows":1}@ then +if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then return -1 endi @@ -244,4 +244,4 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT system sh/exec.sh -n dnode6 -s stop -x SIGINT system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/test-all.sh b/tests/test-all.sh index 2374750be461e7f19e0e154a3ed4461506e6a35a..4f7afe7d17bc57d6f62a5d68cac3277914dedb50 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -7,6 +7,21 @@ GREEN_DARK='\033[0;32m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' +tests_dir=`pwd` +IN_TDINTERNAL="community" + +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + function dohavecore(){ corefile=`find $corepath -mmin 1` if [ -n "$corefile" ];then @@ -19,10 +34,9 @@ function dohavecore(){ function runSimCaseOneByOne { while read -r line; do if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then - case=`echo $line | grep sim$ |awk '{print $NF}'` - IN_TDINTERNAL="community" - start_time=`date +%s` - IN_TDINTERNAL="community" + case=`echo $line | grep sim$ |awk '{print $NF}'` + start_time=`date +%s` + date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case ./test.sh -f $case > /dev/null 2>&1 && \ @@ -51,8 +65,8 @@ function runSimCaseOneByOnefq { if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then case=`echo $line | grep sim$ |awk '{print $NF}'` - start_time=`date +%s` - IN_TDINTERNAL="community" + start_time=`date +%s` + date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case ./test.sh -f $case > /dev/null 2>&1 && \ @@ -94,6 +108,7 @@ function runPyCaseOneByOne { case=`echo $line|awk '{print $NF}'` fi start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log echo -n $case $line > /dev/null 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ @@ -122,6 +137,7 @@ function runPyCaseOneByOnefq { case=`echo $line|awk '{print $NF}'` fi start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log echo -n $case $line > /dev/null 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ @@ -140,12 +156,14 @@ function runPyCaseOneByOnefq { fi done < $1 } + totalFailed=0 totalPyFailed=0 +totalJDBCFailed=0 +totalUnitFailed=0 -tests_dir=`pwd` corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` -if [ "$2" != "python" ]; then +if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -214,11 +232,10 @@ if [ "$2" != "python" ]; then fi fi -if [ "$2" != "sim" ]; then +if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then echo "### run Python test case ###" cd $tests_dir - IN_TDINTERNAL="community" if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then cd ../.. @@ -261,6 +278,9 @@ if [ "$2" != "sim" ]; then elif [ "$1" == "p3" ]; then echo "### run Python_3 test ###" runPyCaseOneByOnefq pytest_3.sh + elif [ "$1" == "p4" ]; then + echo "### run Python_4 test ###" + runPyCaseOneByOnefq pytest_4.sh elif [ "$1" == "b2" ] || [ "$1" == "b3" ]; then exit $(($totalFailed + $totalPyFailed)) elif [ "$1" == "smoke" ] || [ -z "$1" ]; then @@ -280,4 +300,84 @@ if [ "$2" != "sim" ]; then fi fi -exit $(($totalFailed + $totalPyFailed)) + +if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then + echo "### run JDBC test cases ###" + + cd $tests_dir + + if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then + cd ../../ + else + cd ../ + fi + + pwd + cd debug/ + + stopTaosd + nohup build/bin/taosd -c /etc/taos/ > /dev/null 2>&1 & + sleep 30 + + cd $tests_dir/../src/connector/jdbc + + mvn test > jdbc-out.log 2>&1 + tail -n 20 jdbc-out.log + + cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'` + totalJDBCCases=`echo ${cases/%,}` + failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'` + JDBCFailed=`echo ${failed/%,}` + error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'` + JDBCError=`echo ${error/%,}` + + totalJDBCFailed=`expr $JDBCFailed + $JDBCError` + totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed` + + if [ "$totalJDBCSuccess" -gt "0" ]; then + echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}" + fi + + if [ "$totalJDBCFailed" -ne "0" ]; then + echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}" + fi + dohavecore 1 +fi + +if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" == "full" ]; then + echo "### run Unit tests ###" + + stopTaosd + cd $tests_dir + + if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then + cd ../../ + else + cd ../ + fi + + pwd + cd debug/build/bin + nohup ./taosd -c /etc/taos/ > /dev/null 2>&1 & + sleep 30 + + pwd + ./queryTest > unittest-out.log 2>&1 + tail -n 20 unittest-out.log + + totalUnitTests=`grep "Running" unittest-out.log | awk '{print $3}'` + totalUnitSuccess=`grep 'PASSED' unittest-out.log | awk '{print $4}'` + totalUnitFailed=`expr $totalUnitTests - $totalUnitSuccess` + + if [ "$totalUnitSuccess" -gt "0" ]; then + echo -e "\n${GREEN} ### Total $totalUnitSuccess Unit test succeed! ### ${NC}" + fi + + if [ "$totalUnitFailed" -ne "0" ]; then + echo -e "\n${RED} ### Total $totalUnitFailed Unit test failed! ### ${NC}" + fi + dohavecore 1 +fi + + +exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed)) diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 33e1528b70b5525da23723dc9bf6309870d172ee..2eb8ee1614b286f3827705865cf073a7eded0c88 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 83ca46599cbb7e219bf1c67123373d81159db4ec..dbda5f1bbdd01a451376f8afe7717b7290980796 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -146,7 +146,7 @@ char *simGetVariable(SScript *script, char *varName, int32_t varLen) { int32_t simExecuteExpression(SScript *script, char *exp) { char * op1, *op2, *var1, *var2, *var3, *rest; int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; - char t0[512], t1[512], t2[512], t3[1024]; + char t0[1024], t1[1024], t2[1024], t3[2048]; int32_t result; rest = paGetToken(exp, &var1, &var1Len); diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 3a409ecbf900bfe05c423963020d7d3a37cf4771..40937e70532897fa6776965b861a8d678532d8aa 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -81,7 +81,9 @@ char *simParseHostName(char *varName) { } bool simSystemInit() { - taos_init(); + if (taos_init()) { + return false; + } taosGetFqdn(simHostName); simInitsimCmdList(); memset(simScriptList, 0, sizeof(SScript *) * MAX_MAIN_SCRIPT_NUM);