diff --git a/.gitignore b/.gitignore index 9772284ef1c29fe3005195ec394afef3aa8992e8..47eae4dc03e979cdfc23c71b98a44ee0c6b03812 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,9 @@ Target/ *.failed *.sql sim/ +psim/ +pysim/ +*.out *DS_Store # Doxygen Generated files diff --git a/.travis.yml b/.travis.yml index 41937853d671d1178608b9e216338171989f18c4..9bc576dcf9de328032037162f0c882e7ccbf4057 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ # matrix: - os: linux + dist: bionic language: c git: @@ -49,7 +50,7 @@ matrix: ./test-all.sh $TRAVIS_EVENT_TYPE || travis_terminate $? cd ${TRAVIS_BUILD_DIR}/tests/pytest - ./valgrind-test.sh -g 2>&1 | tee mem-error-out.txt + ./valgrind-test.sh 2>&1 > mem-error-out.txt sleep 1 # Color setting @@ -59,9 +60,9 @@ matrix: GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' - grep 'ERROR SUMMARY' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-mem-error-out.txt + grep 'start to execute\|ERROR SUMMARY' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-mem-error-out.txt - for memError in `cat uniq-mem-error-out.txt | awk '{print $4}'` + for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.txt | awk '{print $4}'` do if [ -n "$memError" ]; then if [ "$memError" -gt 12 ]; then @@ -72,8 +73,8 @@ matrix: fi done - grep 'definitely lost:' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.txt - for defiMemError in `cat uniq-definitely-lost-out.txt | awk '{print $7}'` + grep 'start to execute\|definitely lost:' mem-error-out.txt|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.txt + for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.txt | awk '{print $7}'` do if [ -n "$defiMemError" ]; then if [ "$defiMemError" -gt 13 ]; then diff --git a/README.md b/README.md index 0babfe77763812ae1251e6ac0773091e29724a01..d75680cb22204011b628f45990355982a097ef64 100644 --- a/README.md +++ b/README.md @@ -115,251 +115,6 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo - [RESTful API](https://www.taosdata.com/en/documentation/connector/#RESTful-Connector) - [Node.js](https://www.taosdata.com/en/documentation/connector/#Node.js-Connector) -# How to run the test cases and how to add a new test case? - -### Prepare development environment - -1. sudo apt install - build-essential cmake net-tools python-pip python-setuptools python3-pip - python3-setuptools valgrind psmisc curl - -2. git clone ; cd TDengine - -3. mkdir debug; cd debug; cmake ..; make ; sudo make install - -4. pip install src/connector/python/linux/python2 ; pip3 install - src/connector/python/linux/python3 - -### How to run TSIM test suite - -1. cd \/tests/script - -2. sudo ./test.sh - -### How to run Python test suite - -1. cd \/tests/pytest - -2. ./smoketest.sh \# for smoke test - -3. ./smoketest.sh -g \# for memory leak detection test with valgrind - -4. ./fulltest.sh \# for full test - -> Note1: TDengine daemon's configuration and data files are stored in -> \/sim directory. As a historical design, it's same place with -> TSIM script. So after the TSIM script ran with sudo privilege, the directory -> has been used by TSIM then the python script cannot write it by a normal -> user. You need to remove the directory completely first before running the -> Python test case. We should consider using two different locations to store -> for TSIM and Python script. - -> Note2: if you need to debug crash problem with a core dump, you need -> manually edit smoketest.sh or fulltest.sh to add "ulimit -c unlimited" -> before the script line. Then you can look for the core file in -> \/tests/pytest after the program crash. - -### How to add a new test case - -**1. add a new TSIM test cases:** - -TSIM test cases are now included in the new development branch and can be -added to the TDengine/tests/script/test.sh script based on the manual test -methods necessary to add test cases as described above. - -**2. add a new Python test cases:** - -**2.1 Please refer to \/tests/pytest/insert/basic.py to add a new -test case.** The new test case must implement 3 functions, where self.init() -and self.stop() simply copy the contents of insert/basic.py and the test -logic is implemented in self.run(). You can refer to the code in the util -directory for more information. - -**2.2 Edit smoketest.sh to add the path and filename of the new test case** - -Note: The Python test framework may continue to be improved in the future, -hopefully, to provide more functionality and ease of writing test cases. The -method of writing the test case above does not exclude that it will also be -affected. - -**2.3 What test.py does in detail:** - -test.py is the entry program for test case execution and monitoring. - -test.py has the following functions. - -\-f --file, Specifies the test case file name to be executed --p --path, Specifies deployment path - -\-m --master, Specifies the master server IP for cluster deployment --c--cluster, test cluster function --s--stop, terminates all running nodes - -\-g--valgrind, load valgrind for memory leak detection test - -\-h--help, display help - -**2.4 What util/log.py does in detail:** - -log.py is quite simple, the main thing is that you can print the output in -different colors as needed. The success() should be called for successful -test case execution and the success() will print green text. The exit() will -print red text and exit the program, exit() should be called for test -failure. - -**util/log.py** - -... - -    def info(self, info): - -        printf("%s %s" % (datetime.datetime.now(), info)) - -  - -    def sleep(self, sec): - -        printf("%s sleep %d seconds" % (datetime.datetime.now(), sec)) - -        time.sleep(sec) - -  - -    def debug(self, err): - -        printf("\\033[1;36m%s %s\\033[0m" % (datetime.datetime.now(), err)) - -  - -    def success(self, info): - -        printf("\\033[1;32m%s %s\\033[0m" % (datetime.datetime.now(), info)) - -  - -    def notice(self, err): - -        printf("\\033[1;33m%s %s\\033[0m" % (datetime.datetime.now(), err)) - -  - -    def exit(self, err): - -        printf("\\033[1;31m%s %s\\033[0m" % (datetime.datetime.now(), err)) - -        sys.exit(1) - -  - -    def printNoPrefix(self, info): - -        printf("\\033[1;36m%s\\033[0m" % (info) - -... - -**2.5 What util/sql.py does in detail:** - -SQL.py is mainly used to execute SQL statements to manipulate the database, -and the code is extracted and commented as follows: - -**util/sql.py** - -\# prepare() is mainly used to set up the environment for testing table and -data, and to set up the database db for testing. do not call prepare() if you -need to test the database operation command. - -def prepare(self): - -tdLog.info("prepare database:db") - -self.cursor.execute('reset query cache') - -self.cursor.execute('drop database if exists db') - -self.cursor.execute('create database db') - -self.cursor.execute('use db') - -... - -\# query() is mainly used to execute select statements for normal syntax input - -def query(self, sql): - -... - -\# error() is mainly used to execute the select statement with the wrong syntax -input, the error will be caught as a reasonable behavior, if not caught it will -prove that the test failed - -def error() - -... - -\# checkRows() is used to check the number of returned lines after calling -query(select ...) after calling the query(select ...) to check the number of -rows of returned results. - -def checkRows(self, expectRows): - -... - -\# checkData() is used to check the returned result data after calling -query(select ...) after the query(select ...) is called, failure to meet -expectation is - -def checkData(self, row, col, data): - -... - -\# getData() returns the result data after calling query(select ...) to return -the resulting data after calling query(select ...) - -def getData(self, row, col): - -... - -\# execute() used to execute sql and return the number of affected rows - -def execute(self, sql): - -... - -\# executeTimes() Multiple executions of the same sql statement - -def executeTimes(self, sql, times): - -... - -\# CheckAffectedRows() Check if the number of affected rows is as expected - -def checkAffectedRows(self, expectAffectedRows): - -... - -> Note: Both Python2 and Python3 are currently supported by the Python test -> case. Since Python2 is no longer officially supported by January 1, 2020, it -> is recommended that subsequent test case development be guaranteed to run -> correctly on Python3. For Python2, please consider being compatible if -> appropriate without additional -> burden.   - -### CI Covenant submission adoption principle. - -- Every commit / PR compilation must pass. Currently, the warning is treated - as an error, so the warning must also be resolved. - -- Test cases that already exist must pass. - -- Because CI is very important to support build and automatically test - procedure, it is necessary to manually test the test case before adding it - and do as many iterations as possible to ensure that the test case provides - stable and reliable test results when added. - -> Note: In the future, according to the requirements and test development -> progress will add stress testing, performance testing, code style, -> and other features based on functional testing. - ### Third Party Connectors The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them. @@ -367,6 +122,10 @@ The TDengine community has also kindly built some of their own connectors! Follo - [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) +# How to run the test cases and how to add a new test case? + TDengine's test framework and all test cases are fully open source. + Please refer to [this document](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md) for how to run test and develop new test case. + # TDengine Roadmap - Support event-driven stream computing - Support user defined functions diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 1654f76f1c5bd5feef17e3f25c08a87f286608d9..715d76e072cae10cc266ec9182b9fda806962e83 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -43,7 +43,7 @@ extern "C" { typedef struct SParsedColElem { int16_t colIndex; - int16_t offset; + uint16_t offset; } SParsedColElem; typedef struct SParsedDataColInfo { @@ -264,6 +264,7 @@ bool hasMoreVnodesToTry(SSqlObj *pSql); void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()); +int tscSetMgmtIpListFromCfg(const char *first, const char *second); void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index a7eec31388d3a4fa389b164bb721316c9453b1ee..e6a37a274551cde9e1f24a90f57b47c281f33d3e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -49,7 +49,7 @@ typedef struct STableComInfo { uint8_t numOfTags; uint8_t precision; int16_t numOfColumns; - int16_t rowSize; + int32_t rowSize; } STableComInfo; typedef struct STableMeta { diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 5675416e6b999c4be8efaf75586fda9a712892e3..c6e8499426450cbb2046d69efb2876a85bdf9937 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -3620,299 +3620,6 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } -static void getStatics_i8(int64_t *primaryKey, int32_t type, int8_t *data, int32_t numOfRow, int64_t *min, int64_t *max, - int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = INT64_MAX; - *max = INT64_MIN; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - // int64_t lastKey = 0; - // int8_t lastVal = TSDB_DATA_TINYINT_NULL; - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((char *)&data[i], type)) { - (*numOfNull) += 1; - continue; - } - - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; - } - - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; - } - - // if (type != TSDB_DATA_TYPE_BOOL) { // ignore the bool data type pre-calculation - // if (isNull((char *)&lastVal, type)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - // } - } -} - -static void getStatics_i16(int64_t *primaryKey, int16_t *data, int32_t numOfRow, int64_t *min, int64_t *max, - int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = INT64_MAX; - *max = INT64_MIN; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - // int64_t lastKey = 0; - // int16_t lastVal = TSDB_DATA_SMALLINT_NULL; - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) { - (*numOfNull) += 1; - continue; - } - - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; - } - - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; - } - - // if (isNull(&lastVal, TSDB_DATA_TYPE_SMALLINT)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - } -} - -static void getStatics_i32(int64_t *primaryKey, int32_t *data, int32_t numOfRow, int64_t *min, int64_t *max, - int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = INT64_MAX; - *max = INT64_MIN; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - // int64_t lastKey = 0; - // int32_t lastVal = TSDB_DATA_INT_NULL; - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) { - (*numOfNull) += 1; - continue; - } - - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; - } - - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; - } - - // if (isNull(&lastVal, TSDB_DATA_TYPE_INT)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - } -} - -static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow, int64_t *min, int64_t *max, - int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - *min = INT64_MAX; - *max = INT64_MIN; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) { - (*numOfNull) += 1; - continue; - } - - *sum += data[i]; - if (*min > data[i]) { - *min = data[i]; - *minIndex = i; - } - - if (*max < data[i]) { - *max = data[i]; - *maxIndex = i; - } - - // if (isNull(&lastVal, TSDB_DATA_TYPE_BIGINT)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - } -} - -static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, double *min, double *max, double *sum, - int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - float fmin = DBL_MAX; - float fmax = -DBL_MAX; - double dsum = 0; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((const char*) &data[i], TSDB_DATA_TYPE_FLOAT)) { - (*numOfNull) += 1; - continue; - } - - float fv = 0; - fv = GET_FLOAT_VAL(&(data[i])); - dsum += fv; - if (fmin > fv) { - fmin = fv; - *minIndex = i; - } - - if (fmax < fv) { - fmax = fv; - *maxIndex = i; - } - - // if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - } - - double csum = 0; - csum = GET_DOUBLE_VAL(sum); - csum += dsum; -#ifdef _TD_ARM_32_ - SET_DOUBLE_VAL_ALIGN(sum, &csum); - SET_DOUBLE_VAL_ALIGN(max, &fmax); - SET_DOUBLE_VAL_ALIGN(min, &fmin); -#else - *sum = csum; - *max = fmax; - *min = fmin; -#endif -} - -static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, double *min, double *max, double *sum, - int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - double dmin = DBL_MAX; - double dmax = -DBL_MAX; - double dsum = 0; - *minIndex = 0; - *maxIndex = 0; - - assert(numOfRow <= INT16_MAX); - - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull((const char*) &data[i], TSDB_DATA_TYPE_DOUBLE)) { - (*numOfNull) += 1; - continue; - } - - double dv = 0; - dv = GET_DOUBLE_VAL(&(data[i])); - dsum += dv; - if (dmin > dv) { - dmin = dv; - *minIndex = i; - } - - if (dmax < dv) { - dmax = dv; - *maxIndex = i; - } - - // if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) { - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } else { - // *wsum = lastVal * (primaryKey[i] - lastKey); - // lastKey = primaryKey[i]; - // lastVal = data[i]; - // } - } - - double csum = 0; - csum = GET_DOUBLE_VAL(sum); - csum += dsum; - - -#ifdef _TD_ARM_32_ - SET_DOUBLE_VAL_ALIGN(sum, &csum); - SET_DOUBLE_VAL_ALIGN(max, &dmax); - SET_DOUBLE_VAL_ALIGN(min, &dmin); -#else - *sum = csum; - *max = dmax; - *min = dmin; -#endif -} - -void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max, - int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int32_t *numOfNull) { - int64_t *primaryKey = (int64_t *)priData; - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - for (int32_t i = 0; i < numOfRow; ++i) { - if (isNull(data + i * size, type)) { - (*numOfNull) += 1; - continue; - } - } - } else { - if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) { - getStatics_i8(primaryKey, type, (int8_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); - } else if (type == TSDB_DATA_TYPE_SMALLINT) { - getStatics_i16(primaryKey, (int16_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); - } else if (type == TSDB_DATA_TYPE_INT) { - getStatics_i32(primaryKey, (int32_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); - } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { - getStatics_i64(primaryKey, (int64_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull); - } else if (type == TSDB_DATA_TYPE_DOUBLE) { - getStatics_d(primaryKey, (double *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); - } else if (type == TSDB_DATA_TYPE_FLOAT) { - getStatics_f(primaryKey, (float *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull); - } - } -} /** * param[1]: start time diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 7e67ff82e9bac2a19d393bcb89949ac1983a384c..36b1ab59931851c2f2e1fc4a5f9bea89d3b1767a 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1016,7 +1016,9 @@ int doParseInsertSql(SSqlObj *pSql, char *str) { pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); } - if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { + // TODO: 2048 is added because TSDB_MAX_TAGS_LEN now is 65536 + // but TSDB_PAYLOAD_SIZE is 65380 + if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE + 2048)) != TSDB_CODE_SUCCESS) { return code; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index bc717ed88cda2a06ce8ce055e8aeb59b5b335f62..7324363c9d0a9004999883f13b3c8136b6b0ad8e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -27,6 +27,7 @@ #include "ttimer.h" #include "tutil.h" #include "tscLog.h" +#include "qsqltype.h" #define TSC_MGMT_VNODE 999 @@ -67,7 +68,7 @@ void tscPrintMgmtIp() { } } -void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) { +void tscSetMgmtIpList(SRpcIpSet *pIpList) { tscMgmtIpSet.numOfIps = pIpList->numOfIps; tscMgmtIpSet.inUse = pIpList->inUse; for (int32_t i = 0; i < tscMgmtIpSet.numOfIps; ++i) { @@ -75,16 +76,6 @@ void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) { } } -void tscSetMgmtIpListFromEdge() { - if (tscMgmtIpSet.numOfIps != 1) { - tscMgmtIpSet.numOfIps = 1; - tscMgmtIpSet.inUse = 0; - taosGetFqdnPortFromEp(tsFirst, tscMgmtIpSet.fqdn[0], &tscMgmtIpSet.port[0]); - tscTrace("edge mgmt IP list:"); - tscPrintMgmtIp(); - } -} - void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { tscMgmtIpSet = *pIpSet; tscTrace("mgmt IP list is changed for ufp is called, numOfIps:%d inUse:%d", tscMgmtIpSet.numOfIps, tscMgmtIpSet.inUse); @@ -93,18 +84,6 @@ void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { } } -void tscSetMgmtIpList(SRpcIpSet *pIpList) { - /* - * The iplist returned by the cluster edition is the current management nodes - * and the iplist returned by the edge edition is empty - */ - if (pIpList->numOfIps != 0) { - tscSetMgmtIpListFromCluster(pIpList); - } else { - tscSetMgmtIpListFromEdge(); - } -} - /* * For each management node, try twice at least in case of poor network situation. * If the client start to connect to a non-management node from the client, and the first retry may fail due to @@ -132,7 +111,8 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (code == 0) { SCMHeartBeatRsp *pRsp = (SCMHeartBeatRsp *)pRes->pRsp; SRpcIpSet * pIpList = &pRsp->ipList; - tscSetMgmtIpList(pIpList); + if (pIpList->numOfIps > 0) + tscSetMgmtIpList(pIpList); if (pRsp->killConnection) { tscKillConnection(pObj); @@ -207,7 +187,7 @@ int tscSendMsgToServer(SSqlObj *pSql) { memcpy(pMsg, pSql->cmd.payload, pSql->cmd.payloadLen); } - tscTrace("%p msg:%s is sent to server", pSql, taosMsg[pSql->cmd.msgType]); + // tscTrace("%p msg:%s is sent to server", pSql, taosMsg[pSql->cmd.msgType]); SRpcMsg rpcMsg = { .msgType = pSql->cmd.msgType, @@ -235,7 +215,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; STscObj *pObj = pSql->pTscObj; - tscTrace("%p msg:%p is received from server", pSql, rpcMsg->pCont); + // tscTrace("%p msg:%s is received from server", pSql, taosMsg[rpcMsg->msgType]); if (pSql->freed || pObj->signature != pObj) { tscTrace("%p sql is already released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed, @@ -340,10 +320,10 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { pMsg->numOfFailedBlocks = htonl(pMsg->numOfFailedBlocks); pRes->numOfRows += pMsg->affectedRows; - tscTrace("%p cmd:%d code:%s, inserted rows:%d, rsp len:%d", pSql, pCmd->command, tstrerror(pRes->code), - pMsg->affectedRows, pRes->rspLen); + tscTrace("%p SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql, sqlCmd[pCmd->command], + tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen); } else { - tscTrace("%p cmd:%d code:%s rsp len:%d", pSql, pCmd->command, tstrerror(pRes->code), pRes->rspLen); + tscTrace("%p SQL cmd:%s, code:%s rspLen:%d", pSql, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen); } } @@ -426,7 +406,7 @@ int tscProcessSql(SSqlObj *pSql) { assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0); } - tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pCmd->command, name, type); + tscTrace("%p SQL cmd:%s will be processed, name:%s, type:%d", pSql, sqlCmd[pCmd->command], name, type); if (pCmd->command < TSDB_SQL_MGMT) { // the pTableMetaInfo cannot be NULL if (pTableMetaInfo == NULL) { pSql->res.code = TSDB_CODE_OTHERS; @@ -1487,15 +1467,16 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char * pMsg; int msgLen = 0; - char *tmpData = 0; - if (pSql->cmd.allocSize > 0) { - tmpData = calloc(1, pSql->cmd.allocSize); + char *tmpData = NULL; + uint32_t len = pSql->cmd.payloadLen; + if (len > 0) { + tmpData = calloc(1, len); if (NULL == tmpData) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } // STagData is in binary format, strncpy is not available - memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); + memcpy(tmpData, pSql->cmd.payload, len); } SSqlCmd * pCmd = &pSql->cmd; @@ -1509,9 +1490,9 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg); - if (pSql->cmd.autoCreated) { - memcpy(pInfoMsg->tags, tmpData, sizeof(STagData)); - pMsg += sizeof(STagData); + if (pSql->cmd.autoCreated && len > 0) { + memcpy(pInfoMsg->tags, tmpData, len); + pMsg += len; } pCmd->payloadLen = pMsg - (char*)pInfoMsg;; @@ -2224,7 +2205,8 @@ int tscProcessConnectRsp(SSqlObj *pSql) { assert(len <= tListLen(pObj->db)); strncpy(pObj->db, temp, tListLen(pObj->db)); - tscSetMgmtIpList(&pConnect->ipList); + if (pConnect->ipList.numOfIps > 0) + tscSetMgmtIpList(&pConnect->ipList); strcpy(pObj->sversion, pConnect->serverVersion); pObj->writeAuth = pConnect->writeAuth; @@ -2394,7 +2376,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo); pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists - if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { + if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) { tscError("%p malloc failed for payload to get table meta", pSql); free(pNew); @@ -2405,7 +2387,8 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); strncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, tListLen(pNewMeterMetaInfo->name)); - memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); // tag information if table does not exists. + memcpy(pNew->cmd.payload, pSql->cmd.payload, pSql->cmd.payloadLen); // tag information if table does not exists. + pNew->cmd.payloadLen = pSql->cmd.payloadLen; tscTrace("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated); pNew->fp = tscTableMetaCallBack; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 87292f4fe659adf2c0d54ad08143fbcf2ba36126..d8ec104a5072cd9331e8dfa088f3cd05f0ed1e6e 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -65,32 +65,18 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con terrno = TSDB_CODE_INVALID_PASS; return NULL; } - + + if (ip) { + if (tscSetMgmtIpListFromCfg(ip, NULL) < 0) return NULL; + if (port) tscMgmtIpSet.port[0] = port; + } + void *pDnodeConn = NULL; if (tscInitRpc(user, pass, &pDnodeConn) != 0) { terrno = TSDB_CODE_NETWORK_UNAVAIL; return NULL; } - - tscMgmtIpSet.numOfIps = 0; - - if (ip && ip[0]) { - tscMgmtIpSet.inUse = 0; - tscMgmtIpSet.numOfIps = 1; - strcpy(tscMgmtIpSet.fqdn[0], ip); - tscMgmtIpSet.port[0] = port? port: tsDnodeShellPort; - } else { - if (tsFirst[0] != 0) { - taosGetFqdnPortFromEp(tsFirst, tscMgmtIpSet.fqdn[tscMgmtIpSet.numOfIps], &tscMgmtIpSet.port[tscMgmtIpSet.numOfIps]); - tscMgmtIpSet.numOfIps++; - } - - if (tsSecond[0] != 0) { - taosGetFqdnPortFromEp(tsSecond, tscMgmtIpSet.fqdn[tscMgmtIpSet.numOfIps], &tscMgmtIpSet.port[tscMgmtIpSet.numOfIps]); - tscMgmtIpSet.numOfIps++; - } - } - + STscObj *pObj = (STscObj *)calloc(1, sizeof(STscObj)); if (NULL == pObj) { terrno = TSDB_CODE_CLI_OUT_OF_MEMORY; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 5d8652a63140aa6039008c58ac83af9f7588ca4d..5d56fef1e9c9e3ea1a8f4ba6215fcd816b931011 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -23,6 +23,7 @@ #include "tutil.h" #include "tsched.h" #include "tscLog.h" +#include "tscUtil.h" #include "tsclient.h" #include "tglobal.h" #include "tconfig.h" @@ -114,14 +115,10 @@ void taos_init_imp() { taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); } - tscMgmtIpSet.inUse = 0; - tscMgmtIpSet.numOfIps = 1; - taosGetFqdnPortFromEp(tsFirst, tscMgmtIpSet.fqdn[0], &tscMgmtIpSet.port[0]); - - if (tsSecond[0] && strcmp(tsSecond, tsFirst) != 0) { - tscMgmtIpSet.numOfIps = 2; - taosGetFqdnPortFromEp(tsSecond, tscMgmtIpSet.fqdn[1], &tscMgmtIpSet.port[1]); - } + if (tscSetMgmtIpListFromCfg(tsFirst, tsSecond) < 0) { + tscError("failed to init mgmt IP list"); + return; + } tscInitMsgsFp(); int queueSize = tsMaxVnodeConnections + tsMaxMeterConnections + tsMaxMgmtConnections + tsMaxMgmtConnections; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 0062c4dfc12857c175b82d274b79af32ece7b9ee..8570c2b304b3522342ee6a5ee6b53707a4f588f2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1842,6 +1842,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pNew->fp = fp; pNew->param = param; + pNew->maxRetry = TSDB_MAX_REPLICA_NUM; char* name = pTableMetaInfo->name; STableMetaInfo* pFinalInfo = NULL; @@ -2068,47 +2069,14 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { tscResetForNextRetrieve(pRes); - // in case of async query, set the callback function -// void* fp1 = pSql->fp; + // set the callback function pSql->fp = fp; - -// if (fp1 != NULL) { -// assert(fp != NULL); -// } - int32_t ret = tscProcessSql(pSql); if (ret == TSDB_CODE_SUCCESS) { return; } else {// todo check for failure } - // in case of async query, return now -// if (fp != NULL) { -// return; -// } -// -// if (ret != TSDB_CODE_SUCCESS) { -// pSql->res.code = ret; -// return; -// } -// -// // retrieve data -// assert(pCmd->command == TSDB_SQL_SELECT); -// pCmd->command = TSDB_SQL_FETCH; -// -// if ((ret = tscProcessSql(pSql)) != TSDB_CODE_SUCCESS) { -// pSql->res.code = ret; -// return; -// } -// -// // if the result from current virtual node are empty, try next if exists. otherwise, return the results. -// if (pRes->numOfRows > 0) { -// break; -// } } - -// if (pRes->numOfRows == 0) { -// tscTrace("%p all vnodes exhausted, prj query completed. total res:%d", pSql, totalVnode, pRes->numOfTotal); -// } } void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) { @@ -2195,3 +2163,33 @@ char* strdup_throw(const char* str) { } return p; } + +int tscSetMgmtIpListFromCfg(const char *first, const char *second) { + tscMgmtIpSet.numOfIps = 0; + tscMgmtIpSet.inUse = 0; + + if (first && first[0] != 0) { + if (strlen(first) >= TSDB_EP_LEN) { + terrno = TSDB_CODE_INVALID_FQDN; + return -1; + } + taosGetFqdnPortFromEp(first, tscMgmtIpSet.fqdn[tscMgmtIpSet.numOfIps], &tscMgmtIpSet.port[tscMgmtIpSet.numOfIps]); + tscMgmtIpSet.numOfIps++; + } + + if (second && second[0] != 0) { + if (strlen(second) >= TSDB_EP_LEN) { + terrno = TSDB_CODE_INVALID_FQDN; + return -1; + } + taosGetFqdnPortFromEp(second, tscMgmtIpSet.fqdn[tscMgmtIpSet.numOfIps], &tscMgmtIpSet.port[tscMgmtIpSet.numOfIps]); + tscMgmtIpSet.numOfIps++; + } + + if ( tscMgmtIpSet.numOfIps == 0) { + terrno = TSDB_CODE_INVALID_FQDN; + return -1; + } + + return 0; +} diff --git a/src/common/inc/qsqltype.h b/src/common/inc/qsqltype.h new file mode 100644 index 0000000000000000000000000000000000000000..d1e5bcaa523bfceb3f89b870acabf152c0f8ebf1 --- /dev/null +++ b/src/common/inc/qsqltype.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_QSQLCMD_H +#define TDENGINE_QSQLCMD_H + +#ifdef __cplusplus +extern "C" { +#endif + +// sql type + +#ifdef TSDB_SQL_C +#define TSDB_DEFINE_SQL_TYPE( name, msg ) msg, +char *sqlCmd[] = { + "null", +#else +#define TSDB_DEFINE_SQL_TYPE( name, msg ) name, +enum { + TSDB_SQL_NULL = 0, +#endif + + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SELECT, "select" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_FETCH, "fetch" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_INSERT, "insert" ) + + // the SQL below is for mgmt node + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_TABLE, "create-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_DB, "drop-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_TABLE, "drop-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_ACCT, "create-acct" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_USER, "create-user" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_ACCT, "drop-acct" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_USER, "drop-user" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_USER, "alter-user" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_DNODE, "drop-dnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_DNODE, "cfg-dnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_MNODE, "cfg-mnode" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW, "show" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE, "retrieve" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_QUERY, "kill-query" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_STREAM, "kill-stream" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_CONNECTION, "kill-connection" ) + + // SQL below is for read operation + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_READ, "read" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CONNECT, "connect" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_USE_DB, "use-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_META, "meta" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_STABLEVGROUP, "stable-vgroup" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MULTI_META, "multi-meta" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_HB, "heart-beat" ) + + // SQL below for client local + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_LOCAL, "local" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DESCRIBE_TABLE, "describe-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_LOCALMERGE, "retrieve-localmerge" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" ) + + /* + * build empty result instead of accessing dnode to fetch result + * reset the client cache + */ + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" ) + + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ") + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" ) + + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" ) +}; + +// create table operation type +enum TSQL_TYPE { + TSQL_CREATE_TABLE = 0x1, + TSQL_CREATE_STABLE = 0x2, + TSQL_CREATE_TABLE_FROM_STABLE = 0x3, + TSQL_CREATE_STREAM = 0x4, +}; + +extern char *sqlCmd[]; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_QSQLCMD_H diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index b077f40945bc3661a214d344249d083b2f2ced2e..da8f3cd1e1971518d744cbe69ac05b187a6aa412 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -175,7 +175,7 @@ void taosInitGlobalCfg(); bool taosCheckGlobalCfg(); void taosSetAllDebugFlag(); bool taosCfgDynamicOptions(char *msg); -int taosGetFqdnPortFromEp(char *ep, char *fqdn, uint16_t *port); +int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port); #ifdef __cplusplus } diff --git a/src/common/src/sqlcmdstr.c b/src/common/src/sqlcmdstr.c new file mode 100644 index 0000000000000000000000000000000000000000..8584ba79761835989ab7a3e24d88824c14d107c5 --- /dev/null +++ b/src/common/src/sqlcmdstr.c @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define TSDB_SQL_C + +#include "qsqltype.h" diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 90637265b322b6d429c2c39f6b2f2935acee10ca..324edb422b653cb9c0734a1af8c405df6377c3f6 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -61,10 +61,10 @@ int32_t tscEmbedded = 0; */ int64_t tsMsPerDay[] = {86400000L, 86400000000L}; -char tsFirst[TSDB_FQDN_LEN] = {0}; -char tsSecond[TSDB_FQDN_LEN] = {0}; -char tsArbitrator[TSDB_FQDN_LEN] = {0}; -char tsLocalEp[TSDB_FQDN_LEN] = {0}; // Local End Point, hostname:port +char tsFirst[TSDB_EP_LEN] = {0}; +char tsSecond[TSDB_EP_LEN] = {0}; +char tsArbitrator[TSDB_EP_LEN] = {0}; +char tsLocalEp[TSDB_EP_LEN] = {0}; // Local End Point, hostname:port uint16_t tsServerPort = 6030; uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035] uint16_t tsDnodeDnodePort = 6035; // udp/tcp @@ -284,7 +284,7 @@ static void doInitGlobalConfig() { cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; - cfg.ptrLength = TSDB_FQDN_LEN; + cfg.ptrLength = TSDB_EP_LEN; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -294,7 +294,7 @@ static void doInitGlobalConfig() { cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; - cfg.ptrLength = TSDB_FQDN_LEN; + cfg.ptrLength = TSDB_EP_LEN; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -356,7 +356,7 @@ static void doInitGlobalConfig() { cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; - cfg.ptrLength = TSDB_FQDN_LEN; + cfg.ptrLength = TSDB_EP_LEN; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -1252,7 +1252,7 @@ bool taosCheckGlobalCfg() { return true; } -int taosGetFqdnPortFromEp(char *ep, char *fqdn, uint16_t *port) { +int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { *port = 0; strcpy(fqdn, ep); diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 626fde3293d1710530ad401ade3412e40c0a2aed..a972881a41a054f92ae34608bb1f038a5d52680d 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -32,18 +32,280 @@ const int32_t TYPE_BYTES[11] = { sizeof(VarDataOffsetT) // TSDB_DATA_TYPE_NCHAR }; +static void getStatics_i8(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + int8_t *data = (int8_t *)pData; + *min = INT64_MAX; + *max = INT64_MIN; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + // int64_t lastKey = 0; + // int8_t lastVal = TSDB_DATA_TINYINT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((char *)&data[i], TSDB_DATA_TYPE_TINYINT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + *minIndex = i; + } + + if (*max < data[i]) { + *max = data[i]; + *maxIndex = i; + } + } +} + +static void getStatics_i16(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + int16_t *data = (int16_t *)pData; + *min = INT64_MAX; + *max = INT64_MIN; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + // int64_t lastKey = 0; + // int16_t lastVal = TSDB_DATA_SMALLINT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + *minIndex = i; + } + + if (*max < data[i]) { + *max = data[i]; + *maxIndex = i; + } + + // if (isNull(&lastVal, TSDB_DATA_TYPE_SMALLINT)) { + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } else { + // *wsum = lastVal * (primaryKey[i] - lastKey); + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } + } +} + +static void getStatics_i32(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + int32_t *data = (int32_t *)pData; + *min = INT64_MAX; + *max = INT64_MIN; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + // int64_t lastKey = 0; + // int32_t lastVal = TSDB_DATA_INT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + *minIndex = i; + } + + if (*max < data[i]) { + *max = data[i]; + *maxIndex = i; + } + + // if (isNull(&lastVal, TSDB_DATA_TYPE_INT)) { + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } else { + // *wsum = lastVal * (primaryKey[i] - lastKey); + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } + } +} + +static void getStatics_i64(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + int64_t *data = (int64_t *)pData; + *min = INT64_MAX; + *max = INT64_MIN; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + *minIndex = i; + } + + if (*max < data[i]) { + *max = data[i]; + *maxIndex = i; + } + + // if (isNull(&lastVal, TSDB_DATA_TYPE_BIGINT)) { + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } else { + // *wsum = lastVal * (primaryKey[i] - lastKey); + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } + } +} + +static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + float *data = (float *)pData; + float fmin = DBL_MAX; + float fmax = -DBL_MAX; + double dsum = 0; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_FLOAT)) { + (*numOfNull) += 1; + continue; + } + + float fv = 0; + fv = GET_FLOAT_VAL(&(data[i])); + dsum += fv; + if (fmin > fv) { + fmin = fv; + *minIndex = i; + } + + if (fmax < fv) { + fmax = fv; + *maxIndex = i; + } + + // if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) { + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } else { + // *wsum = lastVal * (primaryKey[i] - lastKey); + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } + } + + double csum = 0; + csum = GET_DOUBLE_VAL(sum); + csum += dsum; +#ifdef _TD_ARM_32_ + SET_DOUBLE_VAL_ALIGN(sum, &csum); + SET_DOUBLE_VAL_ALIGN(max, &fmax); + SET_DOUBLE_VAL_ALIGN(min, &fmin); +#else + *sum = csum; + *max = fmax; + *min = fmin; +#endif +} + +static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { + double *data = (double *)pData; + double dmin = DBL_MAX; + double dmax = -DBL_MAX; + double dsum = 0; + *minIndex = 0; + *maxIndex = 0; + + ASSERT(numOfRow <= INT16_MAX); + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((const char*) &data[i], TSDB_DATA_TYPE_DOUBLE)) { + (*numOfNull) += 1; + continue; + } + + double dv = 0; + dv = GET_DOUBLE_VAL(&(data[i])); + dsum += dv; + if (dmin > dv) { + dmin = dv; + *minIndex = i; + } + + if (dmax < dv) { + dmax = dv; + *maxIndex = i; + } + + // if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) { + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } else { + // *wsum = lastVal * (primaryKey[i] - lastKey); + // lastKey = primaryKey[i]; + // lastVal = data[i]; + // } + } + + double csum = 0; + csum = GET_DOUBLE_VAL(sum); + csum += dsum; + + +#ifdef _TD_ARM_32_ + SET_DOUBLE_VAL_ALIGN(sum, &csum); + SET_DOUBLE_VAL_ALIGN(max, &dmax); + SET_DOUBLE_VAL_ALIGN(min, &dmin); +#else + *sum = csum; + *max = dmax; + *min = dmin; +#endif +} + tDataTypeDescriptor tDataTypeDesc[11] = { - {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", NULL, NULL}, - {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool}, - {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint}, - {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint}, - {TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt}, - {TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint}, - {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat}, - {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble}, - {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString}, - {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp}, - {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString}, + {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", NULL, NULL, NULL}, + {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, NULL}, + {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8}, + {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", tsCompressSmallint, tsDecompressSmallint, getStatics_i16}, + {TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT", tsCompressInt, tsDecompressInt, getStatics_i32}, + {TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", tsCompressBigint, tsDecompressBigint, getStatics_i64}, + {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", tsCompressFloat, tsDecompressFloat, getStatics_f}, + {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", tsCompressDouble, tsDecompressDouble, getStatics_d}, + {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", tsCompressString, tsDecompressString, NULL}, + {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", tsCompressTimestamp, tsDecompressTimestamp, NULL}, + {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", tsCompressString, tsDecompressString, NULL}, }; char tTokenTypeSwitcher[13] = { diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 2b409268d95d4f2014359114d3717ad0ce8bbd7a..d076beb8c048e885365e82f976de9f135a0d1c27 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -241,17 +241,12 @@ function CTaosInterface (config = null, pass = false) { 'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]], // Subscription - //TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, long time, int mseconds) - ////TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds); - 'taos_subscribe': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int64, ref.types.int] ], - //TAOS_ROW taos_consume(TAOS_SUB *tsub); - 'taos_consume': [ ref.refType(ref.types.void_ptr2), [ref.types.void_ptr] ], + //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) + 'taos_subscribe': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int] ], + // TAOS_RES *taos_consume(TAOS_SUB *tsub) + 'taos_consume': [ ref.types.void_ptr, [ref.types.void_ptr] ], //void taos_unsubscribe(TAOS_SUB *tsub); 'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ], - //int taos_subfields_count(TAOS_SUB *tsub); - 'taos_subfields_count': [ ref.types.int, [ref.types.void_ptr ] ], - //TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub); - 'taos_fetch_subfields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ], // Continuous Query //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), @@ -362,7 +357,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { blocks.fill(null); num_of_rows = Math.abs(num_of_rows); let offset = 0; - pblock = pblock.deref() + pblock = pblock.deref(); for (let i = 0; i < fields.length; i++) { if (!convertFunctions[fields[i]['type']] ) { @@ -472,64 +467,40 @@ CTaosInterface.prototype.getClientInfo = function getClientInfo() { } // Subscription -CTaosInterface.prototype.subscribe = function subscribe(host=null, user="root", password="taosdata", db=null, table=null, time=null, mseconds=null) { - let dbOrig = db; - let tableOrig = table; - try { - host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL); - } - catch(err) { - throw "Attribute Error: host is expected as a str"; - } - try { - user = ref.allocCString(user) - } - catch(err) { - throw "Attribute Error: user is expected as a str"; - } +CTaosInterface.prototype.subscribe = function subscribe(connection, restart, topic, sql, interval) { + let topicOrig = topic; + let sqlOrig = sql; try { - password = ref.allocCString(password); + sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); } catch(err) { - throw "Attribute Error: password is expected as a str"; + throw "Attribute Error: sql is expected as a str"; } try { - db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL); + topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); } catch(err) { - throw "Attribute Error: db is expected as a str"; - } - try { - table = table != null ? ref.allocCString(table) : ref.alloc(ref.types.char_ptr, ref.NULL); - } - catch(err) { - throw TypeError("table is expected as a str"); - } - try { - mseconds = ref.alloc(ref.types.int, mseconds); + throw TypeError("topic is expected as a str"); } - catch(err) { - throw TypeError("mseconds is expected as an int"); - } - //TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds); - let subscription = this.libtaos.taos_subscribe(host, user, password, db, table, time, mseconds); + + restart = ref.alloc(ref.types.int, restart); + + let subscription = this.libtaos.taos_subscribe(connection, restart, topic, sql, null, null, interval); if (ref.isNull(subscription)) { throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig); } else { - console.log('Successfully subscribed to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig); + console.log('Successfully subscribed to TDengine - Topic: ' + topicOrig); } return subscription; } -CTaosInterface.prototype.subFieldsCount = function subFieldsCount(subscription) { - return this.libtaos.taos_subfields_count(subscription); -} -CTaosInterface.prototype.fetchSubFields = function fetchSubFields(subscription) { - let pfields = this.libtaos.taos_fetch_subfields(subscription); - let pfieldscount = this.subFieldsCount(subscription); + +CTaosInterface.prototype.consume = function consume(subscription) { + let result = this.libtaos.taos_consume(subscription); let fields = []; + let pfields = this.fetchFields(result); if (ref.isNull(pfields) == false) { - pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0); + pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); for (let i = 0; i < pfields.length; i += 68) { //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type fields.push( { @@ -539,27 +510,23 @@ CTaosInterface.prototype.fetchSubFields = function fetchSubFields(subscription) }) } } - return fields; -} -CTaosInterface.prototype.consume = function consume(subscription) { - let row = this.libtaos.taos_consume(subscription); - let fields = this.fetchSubFields(subscription); - //let isMicro = (cti.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO); - let isMicro = false; //no supported function for determining precision? - let blocks = new Array(fields.length); - blocks.fill(null); - let numOfRows2 = 1; //Math.abs(numOfRows2); - let offset = 0; - if (numOfRows2 > 0){ - for (let i = 0; i < fields.length; i++) { - if (!convertFunctions[fields[i]['type']] ) { - throw new errors.DatabaseError("Invalid data type returned from database"); + + let data = []; + while(true) { + let { blocks, num_of_rows } = this.fetchBlock(result, fields); + if (num_of_rows == 0) { + break; + } + for (let i = 0; i < num_of_rows; i++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let j = 0; j < fields.length; j++) { + rowBlock[j] = blocks[j][i]; } - blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro); - offset += fields[i]['bytes'] * numOfRows2; + data[data.length-1] = (rowBlock); } } - return {blocks:blocks, fields:fields}; + return { data: data, fields: fields, result: result }; } CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { //void taos_unsubscribe(TAOS_SUB *tsub); diff --git a/src/connector/nodejs/nodetaos/cursor.js b/src/connector/nodejs/nodetaos/cursor.js index 092c19dfd4af715749510736b45493483cb12d14..acfe96dfbcb94f5e12e26f903ae134aa4ad75f9a 100644 --- a/src/connector/nodejs/nodetaos/cursor.js +++ b/src/connector/nodejs/nodetaos/cursor.js @@ -405,18 +405,16 @@ TDengineCursor.prototype.getClientInfo = function getClientInfo() { /** * Subscribe to a table from a database in TDengine. * @param {Object} config - A configuration object containing the configuration options for the subscription - * @param {string} config.host - The host to subscribe to - * @param {string} config.user - The user to subscribe as - * @param {string} config.password - The password for the said user - * @param {string} config.db - The db containing the table to subscribe to - * @param {string} config.table - The name of the table to subscribe to - * @param {number} config.time - The start time to start a subscription session - * @param {number} config.mseconds - The pulling period of the subscription session + * @param {string} config.restart - whether or not to continue a subscription if it already exits, otherwise start from beginning + * @param {string} config.topic - The unique identifier of a subscription + * @param {string} config.sql - A sql statement for data query + * @param {string} config.interval - The pulling interval * @return {Buffer} A buffer pointing to the subscription session handle * @since 1.3.0 */ TDengineCursor.prototype.subscribe = function subscribe(config) { - return this._chandle.subscribe(config.host, config.user, config.password, config.db, config.table, config.time, config.mseconds); + let restart = config.restart ? 1 : 0; + return this._chandle.subscribe(this._connection._conn, restart, config.topic, config.sql, config.interval); }; /** * An infinite loop that consumes the latest data and calls a callback function that is provided. @@ -426,18 +424,8 @@ TDengineCursor.prototype.subscribe = function subscribe(config) { */ TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { while (true) { - let res = this._chandle.consume(subscription); - let data = []; - let num_of_rows = res.blocks[0].length; - for (let j = 0; j < num_of_rows; j++) { - data.push([]); - let rowBlock = new Array(res.fields.length); - for (let k = 0; k < res.fields.length; k++) { - rowBlock[k] = res.blocks[k][j]; - } - data[data.length-1] = rowBlock; - } - callback(data, res.fields, subscription); + let { data, fields, result} = this._chandle.consume(subscription); + callback(data, fields, result); } } /** diff --git a/src/connector/nodejs/package-lock.json b/src/connector/nodejs/package-lock.json index ab7789f7c68b0f647012a22df5881ada29c64c47..1137e351064e3e2540399e5f23accd2d073fde30 100644 --- a/src/connector/nodejs/package-lock.json +++ b/src/connector/nodejs/package-lock.json @@ -1,6 +1,6 @@ { "name": "td-connector", - "version": "1.5.0", + "version": "1.6.1", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index 7f42d40a915144086da2eb0a035386ba35c0d328..8d7a971aa318844b8052cc17ae4becfab74cebab 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td-connector", - "version": "1.5.0", + "version": "1.6.1", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "scripts": { diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js index 67f0a783b95a0e375e73a9da7bee38f5812d2dbb..5d96e798d8b295dd986eb50de3e7954b923098c9 100644 --- a/src/connector/nodejs/test/test.js +++ b/src/connector/nodejs/test/test.js @@ -33,12 +33,12 @@ for (let i = 0; i < 10000; i++) { parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt parseFloat( R(-3.4E38, 3.4E38) ), // Float - parseFloat( R(-1.7E308, 1.7E308) ), // Double + parseFloat( R(-1.7E30, 1.7E30) ), // Double "\"Long Binary\"", // Binary parseInt( R(-32767, 32767) ), // Small Int parseInt( R(-127, 127) ), // Tiny Int randomBool(), - "\"Nchars 一些中文字幕\""]; // Bool + "\"Nchars\""]; // Bool c1.execute('insert into td_connector_test.all_types values(' + insertData.join(',') + ' );', {quiet:true}); if (i % 1000 == 0) { console.log("Insert # " , i); diff --git a/src/connector/nodejs/test/testSubscribe.js b/src/connector/nodejs/test/testSubscribe.js new file mode 100644 index 0000000000000000000000000000000000000000..30fb3f425683f0113873534f2b67255db811edcc --- /dev/null +++ b/src/connector/nodejs/test/testSubscribe.js @@ -0,0 +1,16 @@ +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; +c1.execute('use td_connector_test'); +let sub = c1.subscribe({ + restart: true, + sql: "select AVG(_int) from td_connector_test.all_Types;", + topic: 'all_Types', + interval: 1000 +}); + +c1.consumeData(sub, (data, fields) => { + console.log(data); +}); \ No newline at end of file diff --git a/src/rpc/inc/rpcHaship.h b/src/dnode/inc/dnodeMain.h similarity index 67% rename from src/rpc/inc/rpcHaship.h rename to src/dnode/inc/dnodeMain.h index d3ed48997a0b2798e8d89942d8a16f4b3de81f31..df7698ffc3246fbb419c554123b98ed74dbef9a7 100644 --- a/src/rpc/inc/rpcHaship.h +++ b/src/dnode/inc/dnodeMain.h @@ -13,18 +13,15 @@ * along with this program. If not, see . */ -#ifndef _rpc_hash_ip_header_ -#define _rpc_hash_ip_header_ +#ifndef TDENGINE_DNODE_MAIN_H +#define TDENGINE_DNODE_MAIN_H #ifdef __cplusplus extern "C" { #endif -void *rpcOpenIpHash(int maxSessions); -void rpcCloseIpHash(void *handle); -void *rpcAddIpHash(void *handle, void *pData, uint32_t ip, uint16_t port); -void rpcDeleteIpHash(void *handle, uint32_t ip, uint16_t port); -void *rpcGetIpHash(void *handle, uint32_t ip, uint16_t port); +int32_t dnodeInitSystem(); +void dnodeCleanUpSystem(); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 7949a629663455e528449c748bbd992106c4e43d..2f693c61fbf5df441b57a130f7beab1cc77cd81e 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -16,8 +16,6 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taos.h" -#include "tglobal.h" -#include "trpc.h" #include "tutil.h" #include "tconfig.h" #include "tglobal.h" @@ -29,112 +27,14 @@ #include "dnodeVRead.h" #include "dnodeShell.h" #include "dnodeVWrite.h" -#include "tgrant.h" -static int32_t dnodeInitSystem(); static int32_t dnodeInitStorage(); -extern void grantParseParameter(); static void dnodeCleanupStorage(); -static void dnodeCleanUpSystem(); static void dnodeSetRunStatus(SDnodeRunStatus status); -static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static void dnodeCheckDataDirOpenned(char *dir); static SDnodeRunStatus tsDnodeRunStatus = TSDB_DNODE_RUN_STATUS_STOPPED; -int32_t main(int32_t argc, char *argv[]) { - // Set global configuration file - for (int32_t i = 1; i < argc; ++i) { - if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { - strcpy(configDir, argv[++i]); - } else { - printf("'-c' requires a parameter, default:%s\n", configDir); - exit(EXIT_FAILURE); - } - } else if (strcmp(argv[i], "-V") == 0) { -#ifdef _SYNC - char *versionStr = "enterprise"; -#else - char *versionStr = "community"; -#endif - printf("%s version: %s compatible_version: %s\n", versionStr, version, compatible_version); - printf("gitinfo: %s\n", gitinfo); - printf("gitinfoI: %s\n", gitinfoOfInternal); - printf("buildinfo: %s\n", buildinfo); - exit(EXIT_SUCCESS); - } else if (strcmp(argv[i], "-k") == 0) { - grantParseParameter(); - exit(EXIT_SUCCESS); - } -#ifdef TAOS_MEM_CHECK - else if (strcmp(argv[i], "--alloc-random-fail") == 0) { - if ((i < argc - 1) && (argv[i + 1][0] != '-')) { - taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, argv[++i], true); - } else { - taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, NULL, true); - } - } else if (strcmp(argv[i], "--detect-mem-leak") == 0) { - if ((i < argc - 1) && (argv[i + 1][0] != '-')) { - taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, argv[++i], true); - } else { - taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); - } - } -#endif - } - - /* Set termination handler. */ - struct sigaction act = {{0}}; - act.sa_flags = SA_SIGINFO; - act.sa_sigaction = signal_handler; - sigaction(SIGTERM, &act, NULL); - sigaction(SIGHUP, &act, NULL); - sigaction(SIGINT, &act, NULL); - sigaction(SIGUSR1, &act, NULL); - sigaction(SIGUSR2, &act, NULL); - - // Open /var/log/syslog file to record information. - openlog("TDengine:", LOG_PID | LOG_CONS | LOG_NDELAY, LOG_LOCAL1); - syslog(LOG_INFO, "Starting TDengine service..."); - - // Initialize the system - if (dnodeInitSystem() < 0) { - syslog(LOG_ERR, "Error initialize TDengine system"); - closelog(); - - dnodeCleanUpSystem(); - exit(EXIT_FAILURE); - } - - syslog(LOG_INFO, "Started TDengine service successfully."); - - while (1) { - sleep(1000); - } -} - -static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context) { - if (signum == SIGUSR1) { - taosCfgDynamicOptions("debugFlag 135"); - return; - } - if (signum == SIGUSR2) { - taosCfgDynamicOptions("resetlog"); - return; - } - syslog(LOG_INFO, "Shut down signal is %d", signum); - syslog(LOG_INFO, "Shutting down TDengine service..."); - // clean the system. - dPrint("shut down signal is %d, sender PID:%d", signum, sigInfo->si_pid); - dnodeCleanUpSystem(); - // close the syslog - syslog(LOG_INFO, "Shut down TDengine service successfully"); - dPrint("TDengine is shut down!"); - closelog(); - exit(EXIT_SUCCESS); -} - -static int32_t dnodeInitSystem() { +int32_t dnodeInitSystem() { dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_INITIALIZE); tscEmbedded = 1; taosResolveCRC(); @@ -180,7 +80,7 @@ static int32_t dnodeInitSystem() { return 0; } -static void dnodeCleanUpSystem() { +void dnodeCleanUpSystem() { if (dnodeGetRunStatus() != TSDB_DNODE_RUN_STATUS_STOPPED) { dnodeSetRunStatus(TSDB_DNODE_RUN_STATUS_STOPPED); dnodeCleanupShell(); diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 81d426b49637c5028ad415d2ea7d7b37ad3e9e8b..4b28992aa492f41edfbbf6bd2ac4a795140b6f45 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -411,7 +411,7 @@ static bool dnodeReadMnodeInfos() { dError("failed to read mnode mgmtIpList.json, nodeName not found"); goto PARSE_OVER; } - strncpy(tsMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_FQDN_LEN); + strncpy(tsMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN); } ret = true; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 52407dc7a451f743a40bda8c48b44bac77e63eab..fbed16483996304471a0dbad1df5638bead01920 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -157,8 +157,8 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char if (rpcRsp.code != 0) { dError("user:%s, auth msg received from mnode, error:%s", user, tstrerror(rpcRsp.code)); } else { - dTrace("user:%s, auth msg received from mnode", user); SDMAuthRsp *pRsp = rpcRsp.pCont; + dTrace("user:%s, auth msg received from mnode", user); memcpy(secret, pRsp->secret, TSDB_KEY_LEN); memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN); *spi = pRsp->spi; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c new file mode 100644 index 0000000000000000000000000000000000000000..683328db29e4a4e072549d91e5e9cc7da9e6e53d --- /dev/null +++ b/src/dnode/src/dnodeSystem.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tgrant.h" +#include "tutil.h" +#include "tglobal.h" +#include "dnodeInt.h" +#include "dnodeMain.h" + +static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); + +int32_t main(int32_t argc, char *argv[]) { + // Set global configuration file + for (int32_t i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-c") == 0) { + if (i < argc - 1) { + strcpy(configDir, argv[++i]); + } else { + printf("'-c' requires a parameter, default:%s\n", configDir); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-V") == 0) { +#ifdef _SYNC + char *versionStr = "enterprise"; +#else + char *versionStr = "community"; +#endif + printf("%s version: %s compatible_version: %s\n", versionStr, version, compatible_version); + printf("gitinfo: %s\n", gitinfo); + printf("gitinfoI: %s\n", gitinfoOfInternal); + printf("buildinfo: %s\n", buildinfo); + exit(EXIT_SUCCESS); + } else if (strcmp(argv[i], "-k") == 0) { + grantParseParameter(); + exit(EXIT_SUCCESS); + } +#ifdef TAOS_MEM_CHECK + else if (strcmp(argv[i], "--alloc-random-fail") == 0) { + if ((i < argc - 1) && (argv[i + 1][0] != '-')) { + taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, argv[++i], true); + } else { + taosSetAllocMode(TAOS_ALLOC_MODE_RANDOM_FAIL, NULL, true); + } + } else if (strcmp(argv[i], "--detect-mem-leak") == 0) { + if ((i < argc - 1) && (argv[i + 1][0] != '-')) { + taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, argv[++i], true); + } else { + taosSetAllocMode(TAOS_ALLOC_MODE_DETECT_LEAK, NULL, true); + } + } +#endif + } + + /* Set termination handler. */ + struct sigaction act = {{0}}; + act.sa_flags = SA_SIGINFO; + act.sa_sigaction = signal_handler; + sigaction(SIGTERM, &act, NULL); + sigaction(SIGHUP, &act, NULL); + sigaction(SIGINT, &act, NULL); + sigaction(SIGUSR1, &act, NULL); + sigaction(SIGUSR2, &act, NULL); + + // Open /var/log/syslog file to record information. + openlog("TDengine:", LOG_PID | LOG_CONS | LOG_NDELAY, LOG_LOCAL1); + syslog(LOG_INFO, "Starting TDengine service..."); + + // Initialize the system + if (dnodeInitSystem() < 0) { + syslog(LOG_ERR, "Error initialize TDengine system"); + closelog(); + + dnodeCleanUpSystem(); + exit(EXIT_FAILURE); + } + + syslog(LOG_INFO, "Started TDengine service successfully."); + + while (1) { + sleep(1000); + } +} + +static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context) { + if (signum == SIGUSR1) { + taosCfgDynamicOptions("debugFlag 135"); + return; + } + if (signum == SIGUSR2) { + taosCfgDynamicOptions("resetlog"); + return; + } + syslog(LOG_INFO, "Shut down signal is %d", signum); + syslog(LOG_INFO, "Shutting down TDengine service..."); + // clean the system. + dPrint("shut down signal is %d, sender PID:%d", signum, sigInfo->si_pid); + dnodeCleanUpSystem(); + // close the syslog + syslog(LOG_INFO, "Shut down TDengine service successfully"); + dPrint("TDengine is shut down!"); + closelog(); + exit(EXIT_SUCCESS); +} diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index aaf71838bfe2cc3916f10c579bd89e34acb89553..aa8cd997858c7ad27a02d751b00e765fc176d327 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -92,8 +92,6 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) { char *pCont = (char *) pMsg->pCont; void *pVnode; - dTrace("dnode %s msg incoming, thandle:%p", taosMsg[pMsg->msgType], pMsg->handle); - while (leftLen > 0) { SMsgHead *pHead = (SMsgHead *) pCont; pHead->vgId = htonl(pHead->vgId); @@ -214,6 +212,7 @@ static void *dnodeProcessReadQueue(void *param) { continue; } + dTrace("%p, msg:%s will be processed", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]); int32_t code = vnodeProcessRead(pVnode, pReadMsg->rpcMsg.msgType, pReadMsg->pCont, pReadMsg->contLen, &pReadMsg->rspRet); dnodeSendRpcReadRsp(pVnode, pReadMsg, code); taosFreeQitem(pReadMsg); diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 5de4c16c500258fe5f3d5d7588bc8782530d8791..879082f223e27537f8f0dbd8a4e6cc53fce5117b 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -200,6 +200,7 @@ static void *dnodeProcessWriteQueue(void *param) { pHead->msgType = pWrite->rpcMsg.msgType; pHead->version = 0; pHead->len = pWrite->contLen; + dTrace("%p, msg:%s will be processed", pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType]); } else { pHead = (SWalHead *)item; } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 23436fe6a589a805a165da4d1260b5c97dc3fa84..2393654f796666c481e85fbeff81cd7e92e836a8 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -147,6 +147,8 @@ typedef struct tDataTypeDescriptor { char algorithm, char *const buffer, int bufferSize); int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize); + void (*getStatisFunc)(const TSKEY *primaryKey, const void *pData, int32_t numofrow, int64_t *min, int64_t *max, + int64_t *sum, int16_t *minindex, int16_t *maxindex, int16_t *numofnull); } tDataTypeDescriptor; extern tDataTypeDescriptor tDataTypeDesc[11]; @@ -191,20 +193,20 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_ACCT_LEN TSDB_UNI_LEN #define TSDB_PASSWORD_LEN TSDB_UNI_LEN -#define TSDB_MAX_COLUMNS 256 +#define TSDB_MAX_COLUMNS 1024 #define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns #define TSDB_NODE_NAME_LEN 64 #define TSDB_TABLE_NAME_LEN 192 #define TSDB_DB_NAME_LEN 32 #define TSDB_COL_NAME_LEN 64 -#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16 +#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64 #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE #define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb -#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16 -#define TSDB_MAX_TAGS_LEN 512 -#define TSDB_MAX_TAGS 32 +#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 64 +#define TSDB_MAX_TAGS_LEN 65536 +#define TSDB_MAX_TAGS 128 #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 @@ -216,7 +218,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_LOCALE_LEN 64 #define TSDB_TIMEZONE_LEN 64 -#define TSDB_FQDN_LEN 72 +#define TSDB_FQDN_LEN 128 +#define TSDB_EP_LEN (TSDB_FQDN_LEN+6) #define TSDB_IPv4ADDR_LEN 16 #define TSDB_FILENAME_LEN 128 #define TSDB_METER_VNODE_BITS 20 @@ -232,9 +235,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); #define TSDB_DEFAULT_PKT_SIZE 65480 //same as RPC_MAX_UDP_SIZE #define TSDB_PAYLOAD_SIZE (TSDB_DEFAULT_PKT_SIZE - 100) -#define TSDB_DEFAULT_PAYLOAD_SIZE 1024 // default payload size +#define TSDB_DEFAULT_PAYLOAD_SIZE 2048 // default payload size #define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth -#define TSDB_SQLCMD_SIZE 1024 +#define TSDB_CQ_SQL_SIZE 1024 #define TSDB_MAX_VNODES 256 #define TSDB_MIN_VNODES 50 #define TSDB_INVALID_VNODE_NUM 0 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index f9bc1404fe2895ac26b43ec08ffb0a8bc21df67d..1390d66113ac9bd5f184947dd27ec4ebf4baaac6 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -170,6 +170,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_QHANDLE, 0, 459, "invalid handle" TAOS_DEFINE_ERROR(TSDB_CODE_QUERY_CANCELLED, 0, 460, "query cancelled") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_IE, 0, 461, "invalid ie") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_VALUE, 0, 462, "invalid value") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_FQDN, 0, 463, "invalid FQDN") // others TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_FILE_FORMAT, 0, 500, "invalid file format") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index e58bcf52371084ad254ce9d69c1ab87d99382bfa..3aa75523eab5b67b19654712a2fed0854f6cd965 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -187,13 +187,13 @@ typedef struct SMsgHead { // Submit message for one table typedef struct SSubmitBlk { - int64_t uid; // table unique id - int32_t tid; // table id - int32_t padding; // TODO just for padding here - int32_t sversion; // data schema version - int32_t len; // data part length, not including the SSubmitBlk head - int16_t numOfRows; // total number of rows in current submit block - char data[]; + uint64_t uid; // table unique id + int32_t tid; // table id + int32_t padding; // TODO just for padding here + int32_t sversion; // data schema version + int32_t len; // data part length, not including the SSubmitBlk head + int16_t numOfRows; // total number of rows in current submit block + char data[]; } SSubmitBlk; // Submit message for this TSDB @@ -236,6 +236,7 @@ typedef struct { int16_t numOfTags; int32_t sid; int32_t sversion; + int32_t tversion; int32_t tagDataLen; int32_t sqlDataLen; uint64_t uid; @@ -327,9 +328,9 @@ typedef struct { } SMDDropTableMsg; typedef struct { - int32_t contLen; - int32_t vgId; - int64_t uid; + int32_t contLen; + int32_t vgId; + uint64_t uid; char tableId[TSDB_TABLE_ID_LEN + 1]; } SMDDropSTableMsg; @@ -404,9 +405,9 @@ typedef struct SColumnInfo { } SColumnInfo; typedef struct STableIdInfo { - int64_t uid; - int32_t tid; - TSKEY key; // last accessed ts, for subscription + uint64_t uid; + int32_t tid; + TSKEY key; // last accessed ts, for subscription } STableIdInfo; typedef struct STimeWindow { @@ -529,7 +530,7 @@ typedef struct { typedef struct { int32_t nodeId; - char nodeEp[TSDB_FQDN_LEN]; + char nodeEp[TSDB_EP_LEN]; } SDMMnodeInfo; typedef struct { @@ -541,7 +542,7 @@ typedef struct { typedef struct { uint32_t version; int32_t dnodeId; - char dnodeEp[TSDB_FQDN_LEN]; + char dnodeEp[TSDB_EP_LEN]; uint32_t moduleStatus; uint32_t lastReboot; // time stamp for last reboot uint16_t numOfTotalVnodes; // from config file @@ -583,7 +584,7 @@ typedef struct { typedef struct { int32_t nodeId; - char nodeEp[TSDB_FQDN_LEN]; + char nodeEp[TSDB_EP_LEN]; } SMDVnodeDesc; typedef struct { @@ -632,6 +633,7 @@ typedef struct STableMetaMsg { uint8_t tableType; int16_t numOfColumns; int16_t sversion; + int16_t tversion; int32_t sid; uint64_t uid; SCMVgroupInfo vgroup; @@ -667,7 +669,7 @@ typedef struct SCMShowRsp { } SCMShowRsp; typedef struct { - char ep[TSDB_FQDN_LEN]; // end point, hostname:port + char ep[TSDB_EP_LEN]; // end point, hostname:port } SCMCreateDnodeMsg, SCMDropDnodeMsg; typedef struct { @@ -682,7 +684,7 @@ typedef struct { } SDMConfigVnodeMsg; typedef struct { - char ep[TSDB_FQDN_LEN]; // end point, hostname:port + char ep[TSDB_EP_LEN]; // end point, hostname:port char config[64]; } SMDCfgDnodeMsg, SCMCfgDnodeMsg; diff --git a/src/inc/trpc.h b/src/inc/trpc.h index eff210433f7d7bcc2f4a5ad1d12bd88ed59581be..16223b813a1a738f6b0213e8d6c4f62cd0c2290c 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -48,6 +48,7 @@ typedef struct { int contLen; int32_t code; void *handle; + void *ahandle; //app handle set by client, for debug purpose } SRpcMsg; typedef struct { diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 32e35416927d144cdf3dabaa51e29ada6d390ac9..729ed17053479ee509f613d01ea3e2b11bcafd73 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -72,13 +72,13 @@ typedef void TsdbRepoT; // use void to hide implementation details from outside int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg, void *limiter); int32_t tsdbDropRepo(TsdbRepoT *repo); TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH); -int32_t tsdbCloseRepo(TsdbRepoT *repo); +int32_t tsdbCloseRepo(TsdbRepoT *repo, int toCommit); int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg); // --------- TSDB TABLE DEFINITION typedef struct { - int64_t uid; // the unique table ID - int32_t tid; // the table ID in the repository. + uint64_t uid; // the unique table ID + int32_t tid; // the table ID in the repository. } STableId; // --------- TSDB TABLE configuration @@ -88,14 +88,14 @@ typedef struct { STableId tableId; int32_t sversion; char * sname; // super table name - int64_t superUid; + uint64_t superUid; STSchema * schema; STSchema * tagSchema; SDataRow tagValues; } STableCfg; -int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t tid); -int tsdbTableSetSuperUid(STableCfg *config, int64_t uid); +int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid); +int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid); int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup); int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup); int tsdbTableSetTagValue(STableCfg *config, SDataRow row, bool dup); @@ -109,7 +109,7 @@ char* tsdbGetTableName(TsdbRepoT *repo, const STableId* id, int16_t* bytes); int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg); int tsdbDropTable(TsdbRepoT *pRepo, STableId tableId); int tsdbAlterTable(TsdbRepoT *repo, STableCfg *pCfg); -TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, int64_t uid); +TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, uint64_t uid); uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, int32_t *size); @@ -140,7 +140,7 @@ STableInfo *tsdbGetTableInfo(TsdbRepoT *pRepo, STableId tid); * * @return the number of points inserted, -1 for failure and the error number is set */ -int32_t tsdbInsertData(TsdbRepoT *pRepo, SSubmitMsg *pMsg); +int32_t tsdbInsertData(TsdbRepoT *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg * pRsp) ; // -- FOR QUERY TIME SERIES DATA @@ -278,9 +278,9 @@ SArray *tsdbGetTableList(TsdbQueryHandleT *pQueryHandle); * @param stableid. super table sid * @param pTagCond. tag query condition */ -int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, int64_t uid, const char *pTagCond, size_t len, - int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo *pGroupList, SColIndex *pColIndex, int32_t numOfCols); - +int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, uint64_t uid, const char *pTagCond, size_t len, + int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupList, + SColIndex *pColIndex, int32_t numOfCols); /** * create the table group result including only one table, used to handle the normal table query @@ -290,7 +290,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, int64_t uid, const char *pTagC * @param pGroupInfo the generated result * @return */ -int32_t tsdbGetOneTableGroup(TsdbRepoT *tsdb, int64_t uid, STableGroupInfo *pGroupInfo); +int32_t tsdbGetOneTableGroup(TsdbRepoT *tsdb, uint64_t uid, STableGroupInfo *pGroupInfo); /** * clean up the query handle diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 0d6004bba55f1bff3c4f4e17e14fb65b126dc209..137b97e28797d0d6dbc8654ccfc23f3fa99b7760 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -57,7 +57,7 @@ typedef struct { // if name is empty(name[0] is zero), get the file from index or after, used by master // if name is provided(name[0] is not zero), get the named file at the specified index, used by unsynced node // it returns the file magic number and size, if file not there, magic shall be 0. -typedef uint32_t (*FGetFileInfo)(void *ahandle, char *name, uint32_t *index, int32_t *size); +typedef uint32_t (*FGetFileInfo)(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion); // get the wal file from index or after // return value, -1: error, 1:more wal files, 0:last WAL. if name[0]==0, no WAL file @@ -73,7 +73,7 @@ typedef void (*FConfirmForward)(void *ahandle, void *mhandle, int32_t code); typedef void (*FNotifyRole)(void *ahandle, int8_t role); // when data file is synced successfully, notity app -typedef void (*FNotifyFileSynced)(void *ahandle); +typedef void (*FNotifyFileSynced)(void *ahandle, uint64_t fversion); typedef struct { int32_t vgId; // vgroup ID diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 381c6f1dbfcc9f4351919b7602a26be206248f0b..549b0ef9775c073847c9aa7da8054509894cad3a 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -41,14 +41,13 @@ // dynamic config timestamp width according to maximum time precision extern int32_t TIMESTAMP_OUTPUT_LENGTH; -typedef struct History History; -struct History { +typedef struct SShellHistory { char* hist[MAX_HISTORY_SIZE]; int hstart; int hend; -}; +} SShellHistory; -struct arguments { +typedef struct SShellArguments { char* host; char* password; char* user; @@ -62,11 +61,11 @@ struct arguments { char* commands; int abort; int port; -}; +} SShellArguments; /**************** Function declarations ****************/ -extern void shellParseArgument(int argc, char* argv[], struct arguments* arguments); -extern TAOS* shellInit(struct arguments* args); +extern void shellParseArgument(int argc, char* argv[], SShellArguments* arguments); +extern TAOS* shellInit(SShellArguments* args); extern void* shellLoopQuery(void* arg); extern void taos_error(TAOS* con); extern int regex_match(const char* s, const char* reg, int cflags); @@ -76,7 +75,7 @@ void shellRunCommandOnServer(TAOS* con, char command[]); void read_history(); void write_history(); void source_file(TAOS* con, char* fptr); -void source_dir(TAOS* con, struct arguments* args); +void source_dir(TAOS* con, SShellArguments* args); void get_history_path(char* history); void cleanup_handler(void* arg); void exitShell(); @@ -89,12 +88,12 @@ int isCommentLine(char *line); extern char PROMPT_HEADER[]; extern char CONTINUE_PROMPT[]; extern int prompt_size; -extern History history; +extern SShellHistory history; extern struct termios oldtio; extern void set_terminal_mode(); extern int get_old_terminal_mode(struct termios* tio); -extern void reset_terminal_mode(); -extern struct arguments args; -extern TAOS_RES* result; +extern void reset_terminal_mode(); +extern SShellArguments args; +extern TAOS_RES* result; #endif diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 69475371dcf1fee1ae1296eec9b344cdc92290a5..dbb05f6a359fc501d7561444dc032ffa37462877 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -33,12 +33,12 @@ char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; TAOS_RES *result = NULL; -History history; +SShellHistory history; /* * FUNCTION: Initialize the shell. */ -TAOS *shellInit(struct arguments *args) { +TAOS *shellInit(SShellArguments *args) { printf("\n"); printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); fflush(stdout); diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index b29b96379b5def3ddb60e58400b58c721dc27b7b..2cbd07db4bdee34a52edd1573dd0aa923aec35ad 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -221,7 +221,7 @@ void* shellImportThreadFp(void *arg) return NULL; } -static void shellRunImportThreads(struct arguments* args) +static void shellRunImportThreads(SShellArguments* args) { pthread_attr_t thattr; ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj)); @@ -254,7 +254,7 @@ static void shellRunImportThreads(struct arguments* args) free(threadObj); } -void source_dir(TAOS* con, struct arguments* args) { +void source_dir(TAOS* con, SShellArguments* args) { shellGetDirectoryFileList(args->dir); int64_t start = taosGetTimestampMs(); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index f5a1145cf8994c01dded1685b9e8e70406e51013..da2bd94814da4dfd2b83318e61a0dfaffdf4116d 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -50,7 +50,7 @@ static struct argp_option options[] = { static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Get the input argument from argp_parse, which we know is a pointer to our arguments structure. */ - struct arguments *arguments = state->input; + SShellArguments *arguments = state->input; wordexp_t full_path; switch (key) { @@ -129,7 +129,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; -void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { +void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { static char verType[32] = {0}; sprintf(verType, "version: %s\n", version); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index b39fba285ff09a5e83632e17299565fcd1b4a943..f8010b84cd24d3c0282b0366bb38315463428a71 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -62,7 +62,7 @@ int checkVersion() { } // Global configurations -struct arguments args = { +SShellArguments args = { .host = NULL, .password = NULL, .user = NULL, diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 33b76cea2deaa8b54d844b0002fbf648d1822599..3cacafa50501cfd0cf677c92327df499bff7a220 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -67,7 +67,7 @@ static struct argp_option options[] = { {0}}; /* Used by main to communicate with parse_opt. */ -struct arguments { +typedef struct DemoArguments { char *host; uint16_t port; char *user; @@ -87,13 +87,13 @@ struct arguments { int num_of_DPT; int abort; char **arg_list; -}; +} SDemoArguments; /* Parse a single option. */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Get the input argument from argp_parse, which we know is a pointer to our arguments structure. */ - struct arguments *arguments = state->input; + SDemoArguments *arguments = state->input; wordexp_t full_path; char **sptr; switch (key) { @@ -269,7 +269,7 @@ double getCurrentTime(); void callBack(void *param, TAOS_RES *res, int code); int main(int argc, char *argv[]) { - struct arguments arguments = {NULL, // host + SDemoArguments arguments = {NULL, // host 0, // port "root", // user "taosdata", // password diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index ed98a9b92c517b4387b8144cf5a8082e91384ad2..adba0911369731624934cacf9be350c4e1972745 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -168,7 +168,7 @@ static struct argp_option options[] = { {0}}; /* Used by main to communicate with parse_opt. */ -struct arguments { +typedef struct SDumpArguments { // connection option char *host; char *user; @@ -193,13 +193,13 @@ struct arguments { char **arg_list; int arg_list_len; bool isDumpIn; -}; +} SDumpArguments; /* Parse a single option. */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Get the input argument from argp_parse, which we know is a pointer to our arguments structure. */ - struct arguments *arguments = state->input; + SDumpArguments *arguments = state->input; wordexp_t full_path; switch (key) { @@ -296,31 +296,31 @@ char *command = NULL; char *lcommand = NULL; char *buffer = NULL; -int taosDumpOut(struct arguments *arguments); +int taosDumpOut(SDumpArguments *arguments); -int taosDumpIn(struct arguments *arguments); +int taosDumpIn(SDumpArguments *arguments); void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); -int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp); +int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp); -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, struct arguments *arguments, FILE *fp); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp); -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, struct arguments *arguments, +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, FILE *fp); -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp); +int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp); -int32_t taosDumpMetric(char *metric, struct arguments *arguments, FILE *fp); +int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp); -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments); +int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments); -int taosCheckParam(struct arguments *arguments); +int taosCheckParam(SDumpArguments *arguments); void taosFreeDbInfos(); int main(int argc, char *argv[]) { - struct arguments arguments = { + SDumpArguments arguments = { // connection option NULL, "root", "taosdata", 0, // output file @@ -424,7 +424,7 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) { return -1; } -int taosDumpOut(struct arguments *arguments) { +int taosDumpOut(SDumpArguments *arguments) { TAOS_ROW row; char *temp = NULL; FILE *fp = NULL; @@ -602,7 +602,7 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { fprintf(fp, "%s\n\n", buffer); } -int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp) { +int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) { TAOS_ROW row; int fd = -1; STableRecord tableRecord; @@ -660,7 +660,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp) { return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, struct arguments *arguments, FILE *fp) { +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, SDumpArguments *arguments, FILE *fp) { char *pstr = NULL; pstr = buffer; int counter = 0; @@ -703,7 +703,7 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, struct argume fprintf(fp, "%s\n\n", buffer); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, struct arguments *arguments, +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, SDumpArguments *arguments, FILE *fp) { char *pstr = NULL; pstr = buffer; @@ -786,7 +786,7 @@ int taosGetTableDes(char *table, STableDef *tableDes) { return count; } -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp) { +int32_t taosDumpTable(char *table, char *metric, SDumpArguments *arguments, FILE *fp) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -828,7 +828,7 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI return taosDumpTableData(fp, table, arguments); } -int32_t taosDumpMetric(char *metric, struct arguments *arguments, FILE *fp) { +int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) { TAOS_ROW row = NULL; int fd = -1; STableRecord tableRecord; @@ -877,7 +877,7 @@ int32_t taosDumpMetric(char *metric, struct arguments *arguments, FILE *fp) { return 0; } -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { +int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) { /* char temp[MAX_COMMAND_SIZE] = "\0"; */ int count = 0; char *pstr = NULL; @@ -987,7 +987,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { return 0; } -int taosCheckParam(struct arguments *arguments) { +int taosCheckParam(SDumpArguments *arguments) { if (arguments->all_databases && arguments->databases) { fprintf(stderr, "conflict option --all-databases and --databases\n"); return -1; @@ -1072,7 +1072,7 @@ void taosReplaceCtrlChar(char *str) { *pstr = '\0'; } -int taosDumpIn(struct arguments *arguments) { +int taosDumpIn(SDumpArguments *arguments) { assert(arguments->isDumpIn); int tsize = 0; diff --git a/src/mnode/inc/mgmtDef.h b/src/mnode/inc/mgmtDef.h index f33531583a9e4891dae9e35ff0b76d755344bf2e..fac342901aaadebaa825a206dba97c3af5be8e62 100644 --- a/src/mnode/inc/mgmtDef.h +++ b/src/mnode/inc/mgmtDef.h @@ -33,7 +33,7 @@ typedef struct SDnodeObj { int32_t dnodeId; uint16_t dnodePort; char dnodeFqdn[TSDB_FQDN_LEN + 1]; - char dnodeEp[TSDB_FQDN_LEN + 1]; + char dnodeEp[TSDB_EP_LEN + 1]; int64_t createdTime; uint32_t lastAccess; int32_t openVnodes; @@ -68,7 +68,7 @@ typedef struct SMnodeObj { // todo use dynamic length string typedef struct { - char tableId[TSDB_TABLE_ID_LEN + 1]; + char *tableId; int8_t type; } STableObj; @@ -77,6 +77,7 @@ typedef struct SSuperTableObj { uint64_t uid; int64_t createdTime; int32_t sversion; + int32_t tversion; int32_t numOfColumns; int32_t numOfTags; int8_t reserved[15]; @@ -122,7 +123,6 @@ typedef struct SVgObj { int32_t numOfVnodes; int32_t lbDnodeId; int32_t lbTime; - int8_t status; int8_t inUse; int8_t reserved[13]; int8_t updateEnd[1]; diff --git a/src/mnode/inc/mgmtSdb.h b/src/mnode/inc/mgmtSdb.h index a97b1219a983b2d8dc722ea182446782a6de47d2..975206d52e50dbdcb17ad272ef792143e88f6c98 100644 --- a/src/mnode/inc/mgmtSdb.h +++ b/src/mnode/inc/mgmtSdb.h @@ -35,7 +35,8 @@ typedef enum { typedef enum { SDB_KEY_STRING, SDB_KEY_INT, - SDB_KEY_AUTO + SDB_KEY_AUTO, + SDB_KEY_VAR_STRING, } ESdbKey; typedef enum { diff --git a/src/mnode/inc/mgmtVgroup.h b/src/mnode/inc/mgmtVgroup.h index 7acf7112a4fe069e109d8e2d791b4b26520b9ac0..ce4cfed5323d209c7cb80ea2c452455234543915 100644 --- a/src/mnode/inc/mgmtVgroup.h +++ b/src/mnode/inc/mgmtVgroup.h @@ -22,11 +22,6 @@ extern "C" { #include "mgmtDef.h" -enum _TSDB_VG_STATUS { - TSDB_VG_STATUS_READY, - TSDB_VG_STATUS_UPDATE -}; - int32_t mgmtInitVgroups(); void mgmtCleanUpVgroups(); SVgObj *mgmtGetVgroup(int32_t vgId); diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index 463f5dde478c8e126a579d8db6540c24deca5786..c3ae8b5ab1cbb16233a4a89173ada241710c9108 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -74,6 +74,7 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) { SDnodeObj *pDnode = pOper->pObj; #ifndef _SYNC + //TODO: drop dnode local mgmtDropAllDnodeVgroups(pDnode); #endif mgmtDropMnodeLocal(pDnode->dnodeId); diff --git a/src/mnode/src/mgmtSdb.c b/src/mnode/src/mgmtSdb.c index 0520bf8493bf14d4939a7cb4d52cc81676636bb9..087c84effd63e8735a30cc9e738f20050046a3bf 100644 --- a/src/mnode/src/mgmtSdb.c +++ b/src/mnode/src/mgmtSdb.c @@ -104,6 +104,14 @@ bool sdbIsServing() { return tsSdbObj.status == SDB_STATUS_SERVING; } +static void *sdbGetObjKey(SSdbTable *pTable, void *key) { + if (pTable->keyType == SDB_KEY_VAR_STRING) { + return *(char **)key; + } + + return key; +} + static char *sdbGetActionStr(int32_t action) { switch (action) { case SDB_ACTION_INSERT: @@ -116,20 +124,25 @@ static char *sdbGetActionStr(int32_t action) { return "invalid"; } -static char *sdbGetkeyStr(SSdbTable *pTable, void *row) { +static char *sdbGetKeyStr(SSdbTable *pTable, void *key) { static char str[16]; switch (pTable->keyType) { case SDB_KEY_STRING: - return (char *)row; + case SDB_KEY_VAR_STRING: + return (char *)key; case SDB_KEY_INT: case SDB_KEY_AUTO: - sprintf(str, "%d", *(int32_t *)row); + sprintf(str, "%d", *(int32_t *)key); return str; default: return "invalid"; } } +static char *sdbGetKeyStrFromObj(SSdbTable *pTable, void *key) { + return sdbGetKeyStr(pTable, sdbGetObjKey(pTable, key)); +} + static void *sdbGetTableFromId(int32_t tableId) { return tsSdbObj.tableList[tableId]; } @@ -185,7 +198,7 @@ void sdbUpdateMnodeRoles() { } } -static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size) { +static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion) { sdbUpdateMnodeRoles(); return 0; } @@ -332,50 +345,48 @@ void sdbCleanUp() { pthread_mutex_destroy(&tsSdbObj.mutex); } -void sdbIncRef(void *handle, void *pRow) { - if (pRow) { - SSdbTable *pTable = handle; - int32_t * pRefCount = (int32_t *)(pRow + pTable->refCountPos); - atomic_add_fetch_32(pRefCount, 1); - if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) { - sdbTrace("table:%s, add ref to record:%s:%s:%d", pTable->tableName, pTable->tableName, sdbGetkeyStr(pTable, pRow), - *pRefCount); - } +void sdbIncRef(void *handle, void *pObj) { + if (pObj == NULL) return; + + SSdbTable *pTable = handle; + int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); + atomic_add_fetch_32(pRefCount, 1); + if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) { + sdbTrace("table:%s, add ref to record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } } -void sdbDecRef(void *handle, void *pRow) { - if (pRow) { - SSdbTable *pTable = handle; - int32_t * pRefCount = (int32_t *)(pRow + pTable->refCountPos); - int32_t refCount = atomic_sub_fetch_32(pRefCount, 1); - if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) { - sdbTrace("table:%s, def ref of record:%s:%s:%d", pTable->tableName, pTable->tableName, sdbGetkeyStr(pTable, pRow), - *pRefCount); - } - int8_t *updateEnd = pRow + pTable->refCountPos - 1; - if (refCount <= 0 && *updateEnd) { - sdbTrace("table:%s, record:%s:%s:%d is destroyed", pTable->tableName, pTable->tableName, - sdbGetkeyStr(pTable, pRow), *pRefCount); - SSdbOper oper = {.pObj = pRow}; - (*pTable->destroyFp)(&oper); - } +void sdbDecRef(void *handle, void *pObj) { + if (pObj == NULL) return; + + SSdbTable *pTable = handle; + int32_t * pRefCount = (int32_t *)(pObj + pTable->refCountPos); + int32_t refCount = atomic_sub_fetch_32(pRefCount, 1); + if (0 && (pTable->tableId == SDB_TABLE_MNODE || pTable->tableId == SDB_TABLE_DNODE)) { + sdbTrace("table:%s, def ref of record:%s:%d", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); } -} -static SSdbRow *sdbGetRowMeta(void *handle, void *key) { - SSdbTable *pTable = (SSdbTable *)handle; - SSdbRow * pMeta; + int8_t *updateEnd = pObj + pTable->refCountPos - 1; + if (refCount <= 0 && *updateEnd) { + sdbTrace("table:%s, record:%s:%d is destroyed", pTable->tableName, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount); + SSdbOper oper = {.pObj = pObj}; + (*pTable->destroyFp)(&oper); + } +} - if (handle == NULL) return NULL; +static SSdbRow *sdbGetRowMeta(SSdbTable *pTable, void *key) { + if (pTable == NULL) return NULL; int32_t keySize = sizeof(int32_t); - if (pTable->keyType == SDB_KEY_STRING) { + if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { keySize = strlen((char *)key); } - pMeta = taosHashGet(pTable->iHandle, key, keySize); + + return taosHashGet(pTable->iHandle, key, keySize); +} - return pMeta; +static SSdbRow *sdbGetRowMetaFromObj(SSdbTable *pTable, void *key) { + return sdbGetRowMeta(pTable, sdbGetObjKey(pTable, key)); } void *sdbGetRow(void *handle, void *key) { @@ -387,7 +398,7 @@ void *sdbGetRow(void *handle, void *key) { pthread_mutex_lock(&pTable->mutex); int32_t keySize = sizeof(int32_t); - if (pTable->keyType == SDB_KEY_STRING) { + if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { keySize = strlen((char *)key); } pMeta = taosHashGet(pTable->iHandle, key, keySize); @@ -400,6 +411,10 @@ void *sdbGetRow(void *handle, void *key) { return pMeta->row; } +static void *sdbGetRowFromObj(SSdbTable *pTable, void *key) { + return sdbGetRow(pTable, sdbGetObjKey(pTable, key)); +} + static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { SSdbRow rowMeta; rowMeta.rowSize = pOper->rowSize; @@ -407,11 +422,14 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { pthread_mutex_lock(&pTable->mutex); + void * key = sdbGetObjKey(pTable, pOper->pObj); int32_t keySize = sizeof(int32_t); - if (pTable->keyType == SDB_KEY_STRING) { - keySize = strlen((char *)pOper->pObj); + + if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { + keySize = strlen((char *)key); } - taosHashPut(pTable->iHandle, pOper->pObj, keySize, &rowMeta, sizeof(SSdbRow)); + + taosHashPut(pTable->iHandle, key, keySize, &rowMeta, sizeof(SSdbRow)); sdbIncRef(pTable, pOper->pObj); pTable->numOfRows++; @@ -425,7 +443,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) { pthread_mutex_unlock(&pTable->mutex); sdbTrace("table:%s, insert record:%s to hash, numOfRows:%d version:%" PRIu64, pTable->tableName, - sdbGetkeyStr(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); (*pTable->insertFp)(pOper); return TSDB_CODE_SUCCESS; @@ -436,17 +454,20 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { pthread_mutex_lock(&pTable->mutex); + void * key = sdbGetObjKey(pTable, pOper->pObj); int32_t keySize = sizeof(int32_t); - if (pTable->keyType == SDB_KEY_STRING) { - keySize = strlen((char *)pOper->pObj); + + if (pTable->keyType == SDB_KEY_STRING || pTable->keyType == SDB_KEY_VAR_STRING) { + keySize = strlen((char *)key); } - taosHashRemove(pTable->iHandle, pOper->pObj, keySize); + + taosHashRemove(pTable->iHandle, key, keySize); pTable->numOfRows--; pthread_mutex_unlock(&pTable->mutex); sdbTrace("table:%s, delete record:%s from hash, numOfRows:%d version:%" PRIu64, pTable->tableName, - sdbGetkeyStr(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1; *updateEnd = 1; @@ -457,7 +478,7 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) { static int32_t sdbUpdateHash(SSdbTable *pTable, SSdbOper *pOper) { sdbTrace("table:%s, update record:%s in hash, numOfRows:%d version:%" PRIu64, pTable->tableName, - sdbGetkeyStr(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); + sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion()); (*pTable->updateFp)(pOper); return TSDB_CODE_SUCCESS; @@ -488,7 +509,7 @@ static int sdbWrite(void *param, void *data, int type) { } else if (pHead->version != tsSdbObj.version + 1) { pthread_mutex_unlock(&tsSdbObj.mutex); sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64, - pTable->tableName, sdbGetActionStr(action), sdbGetkeyStr(pTable, pHead->cont), pHead->version, + pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, tsSdbObj.version); return TSDB_CODE_OTHERS; } else { @@ -540,8 +561,8 @@ int32_t sdbInsertRow(SSdbOper *pOper) { SSdbTable *pTable = (SSdbTable *)pOper->table; if (pTable == NULL) return -1; - if (sdbGetRow(pTable, pOper->pObj)) { - sdbError("table:%s, failed to insert record:%s, already exist", pTable->tableName, sdbGetkeyStr(pTable, pOper->pObj)); + if (sdbGetRowFromObj(pTable, pOper->pObj)) { + sdbError("table:%s, failed to insert record:%s, already exist", pTable->tableName, sdbGetKeyStrFromObj(pTable, pOper->pObj)); sdbDecRef(pTable, pOper->pObj); return TSDB_CODE_ALREADY_THERE; } @@ -580,7 +601,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) { SSdbTable *pTable = (SSdbTable *)pOper->table; if (pTable == NULL) return -1; - SSdbRow *pMeta = sdbGetRowMeta(pTable, pOper->pObj); + SSdbRow *pMeta = sdbGetRowMetaFromObj(pTable, pOper->pObj); if (pMeta == NULL) { sdbTrace("table:%s, record is not there, delete failed", pTable->tableName); return -1; @@ -590,25 +611,27 @@ int32_t sdbDeleteRow(SSdbOper *pOper) { assert(pMetaRow != NULL); if (pOper->type == SDB_OPER_GLOBAL) { - int32_t rowSize = 0; + void * key = sdbGetObjKey(pTable, pOper->pObj); + int32_t keySize = 0; switch (pTable->keyType) { case SDB_KEY_STRING: - rowSize = strlen((char *)pOper->pObj) + 1; + case SDB_KEY_VAR_STRING: + keySize = strlen((char *)key) + 1; break; case SDB_KEY_INT: case SDB_KEY_AUTO: - rowSize = sizeof(uint64_t); + keySize = sizeof(uint32_t); break; default: return -1; } - int32_t size = sizeof(SWalHead) + rowSize; + int32_t size = sizeof(SWalHead) + keySize; SWalHead *pHead = taosAllocateQitem(size); pHead->version = 0; - pHead->len = rowSize; + pHead->len = keySize; pHead->msgType = pTable->tableId * 10 + SDB_ACTION_DELETE; - memcpy(pHead->cont, pOper->pObj, rowSize); + memcpy(pHead->cont, key, keySize); int32_t code = sdbWrite(pOper, pHead, pHead->msgType); taosFreeQitem(pHead); @@ -622,7 +645,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) { SSdbTable *pTable = (SSdbTable *)pOper->table; if (pTable == NULL) return -1; - SSdbRow *pMeta = sdbGetRowMeta(pTable, pOper->pObj); + SSdbRow *pMeta = sdbGetRowMetaFromObj(pTable, pOper->pObj); if (pMeta == NULL) { sdbTrace("table:%s, record is not there, delete failed", pTable->tableName); return -1; diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c index 804572b9ffbf52259a333c0e45bf44a5a1c0f160..d8bcf6724236d6e7bfa1e4372797a78ab95aa1e3 100644 --- a/src/mnode/src/mgmtShell.c +++ b/src/mnode/src/mgmtShell.c @@ -124,6 +124,8 @@ void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) { void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) { + mTrace("%p, msg:%s will be processed", rpcMsg->ahandle, taosMsg[rpcMsg->msgType]); + if (rpcMsg->pCont == NULL) { mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN); return; diff --git a/src/mnode/src/mgmtTable.c b/src/mnode/src/mgmtTable.c index bd4f022bc17c16c6a3d01efe632f6f4195805814..30433069789c228e5f6d5591bc2592d21cae196e 100644 --- a/src/mnode/src/mgmtTable.c +++ b/src/mnode/src/mgmtTable.c @@ -84,6 +84,7 @@ static void mgmtProcessAlterTableRsp(SRpcMsg *rpcMsg); static int32_t mgmtFindSuperTableColumnIndex(SSuperTableObj *pStable, char *colName); static void mgmtDestroyChildTable(SChildTableObj *pTable) { + tfree(pTable->info.tableId); tfree(pTable->schema); tfree(pTable->sql); tfree(pTable); @@ -180,6 +181,7 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) { SChildTableObj *pNew = pOper->pObj; SChildTableObj *pTable = mgmtGetChildTable(pNew->info.tableId); if (pTable != pNew) { + void *oldTableId = pTable->info.tableId; void *oldSql = pTable->sql; void *oldSchema = pTable->schema; memcpy(pTable, pNew, pOper->rowSize); @@ -188,6 +190,7 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) { free(pNew); free(oldSql); free(oldSchema); + free(oldTableId); } mgmtDecTableRef(pTable); @@ -195,51 +198,66 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) { } static int32_t mgmtChildTableActionEncode(SSdbOper *pOper) { - const int32_t maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); SChildTableObj *pTable = pOper->pObj; assert(pTable != NULL && pOper->rowData != NULL); - if (pTable->info.type == TSDB_CHILD_TABLE) { - memcpy(pOper->rowData, pTable, tsChildTableUpdateSize); - pOper->rowSize = tsChildTableUpdateSize; - } else { + int32_t len = strlen(pTable->info.tableId); + if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_INVALID_TABLE_ID; + + memcpy(pOper->rowData, pTable->info.tableId, len); + memset(pOper->rowData + len, 0, 1); + len++; + + memcpy(pOper->rowData + len, (char*)pTable + sizeof(char *), tsChildTableUpdateSize); + len += tsChildTableUpdateSize; + + if (pTable->info.type != TSDB_CHILD_TABLE) { int32_t schemaSize = pTable->numOfColumns * sizeof(SSchema); - if (maxRowSize < tsChildTableUpdateSize + schemaSize) { - return TSDB_CODE_INVALID_MSG_LEN; + memcpy(pOper->rowData + len, pTable->schema, schemaSize); + len += schemaSize; + + if (pTable->sqlLen != 0) { + memcpy(pOper->rowData + len, pTable->sql, pTable->sqlLen); + len += pTable->sqlLen; } - memcpy(pOper->rowData, pTable, tsChildTableUpdateSize); - memcpy(pOper->rowData + tsChildTableUpdateSize, pTable->schema, schemaSize); - memcpy(pOper->rowData + tsChildTableUpdateSize + schemaSize, pTable->sql, pTable->sqlLen); - pOper->rowSize = tsChildTableUpdateSize + schemaSize + pTable->sqlLen; } + pOper->rowSize = len; + return TSDB_CODE_SUCCESS; } static int32_t mgmtChildTableActionDecode(SSdbOper *pOper) { assert(pOper->rowData != NULL); SChildTableObj *pTable = calloc(1, sizeof(SChildTableObj)); - if (pTable == NULL) { - return TSDB_CODE_SERV_OUT_OF_MEMORY; - } + if (pTable == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY; + + int32_t len = strlen(pOper->rowData); + if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_INVALID_TABLE_ID; + pTable->info.tableId = strdup(pOper->rowData); + len++; - memcpy(pTable, pOper->rowData, tsChildTableUpdateSize); + memcpy((char*)pTable + sizeof(char *), pOper->rowData + len, tsChildTableUpdateSize); + len += tsChildTableUpdateSize; if (pTable->info.type != TSDB_CHILD_TABLE) { int32_t schemaSize = pTable->numOfColumns * sizeof(SSchema); pTable->schema = (SSchema *)malloc(schemaSize); if (pTable->schema == NULL) { mgmtDestroyChildTable(pTable); - return TSDB_CODE_SERV_OUT_OF_MEMORY; + return TSDB_CODE_INVALID_TABLE_TYPE; } - memcpy(pTable->schema, pOper->rowData + tsChildTableUpdateSize, schemaSize); + memcpy(pTable->schema, pOper->rowData + len, schemaSize); + len += schemaSize; - pTable->sql = (char *)malloc(pTable->sqlLen); - if (pTable->sql == NULL) { - mgmtDestroyChildTable(pTable); - return TSDB_CODE_SERV_OUT_OF_MEMORY; + if (pTable->sqlLen != 0) { + pTable->sql = malloc(pTable->sqlLen); + if (pTable->sql == NULL) { + mgmtDestroyChildTable(pTable); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + memcpy(pTable->sql, pOper->rowData + len, pTable->sqlLen); } - memcpy(pTable->sql, pOper->rowData + tsChildTableUpdateSize + schemaSize, pTable->sqlLen); } pOper->pObj = pTable; @@ -311,15 +329,15 @@ static int32_t mgmtChildTableActionRestored() { static int32_t mgmtInitChildTables() { SChildTableObj tObj; - tsChildTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; + tsChildTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj.info.type; SSdbTableDesc tableDesc = { .tableId = SDB_TABLE_CTABLE, .tableName = "ctables", .hashSessions = tsMaxTables, - .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16), + .maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN + TSDB_CQ_SQL_SIZE, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, - .keyType = SDB_KEY_STRING, + .keyType = SDB_KEY_VAR_STRING, .insertFp = mgmtChildTableActionInsert, .deleteFp = mgmtChildTableActionDelete, .updateFp = mgmtChildTableActionUpdate, @@ -372,6 +390,7 @@ static void mgmtDestroySuperTable(SSuperTableObj *pStable) { taosHashCleanup(pStable->vgHash); pStable->vgHash = NULL; } + tfree(pStable->info.tableId); tfree(pStable->schema); tfree(pStable); } @@ -408,11 +427,13 @@ static int32_t mgmtSuperTableActionUpdate(SSdbOper *pOper) { SSuperTableObj *pNew = pOper->pObj; SSuperTableObj *pTable = mgmtGetSuperTable(pNew->info.tableId); if (pTable != pNew) { + void *oldTableId = pTable->info.tableId; void *oldSchema = pTable->schema; memcpy(pTable, pNew, pOper->rowSize); pTable->schema = pNew->schema; free(pNew->vgHash); free(pNew); + free(oldTableId); free(oldSchema); } mgmtDecTableRef(pTable); @@ -420,40 +441,50 @@ static int32_t mgmtSuperTableActionUpdate(SSdbOper *pOper) { } static int32_t mgmtSuperTableActionEncode(SSdbOper *pOper) { - const int32_t maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); - SSuperTableObj *pStable = pOper->pObj; assert(pOper->pObj != NULL && pOper->rowData != NULL); - int32_t schemaSize = sizeof(SSchema) * (pStable->numOfColumns + pStable->numOfTags); + int32_t len = strlen(pStable->info.tableId); + if (len > TSDB_TABLE_ID_LEN) len = TSDB_CODE_INVALID_TABLE_ID; - if (maxRowSize < tsSuperTableUpdateSize + schemaSize) { - return TSDB_CODE_INVALID_MSG_LEN; - } + memcpy(pOper->rowData, pStable->info.tableId, len); + memset(pOper->rowData + len, 0, 1); + len++; - memcpy(pOper->rowData, pStable, tsSuperTableUpdateSize); - memcpy(pOper->rowData + tsSuperTableUpdateSize, pStable->schema, schemaSize); - pOper->rowSize = tsSuperTableUpdateSize + schemaSize; + memcpy(pOper->rowData + len, (char*)pStable + sizeof(char *), tsSuperTableUpdateSize); + len += tsSuperTableUpdateSize; + + int32_t schemaSize = sizeof(SSchema) * (pStable->numOfColumns + pStable->numOfTags); + memcpy(pOper->rowData + len, pStable->schema, schemaSize); + len += schemaSize; + + pOper->rowSize = len; return TSDB_CODE_SUCCESS; } static int32_t mgmtSuperTableActionDecode(SSdbOper *pOper) { assert(pOper->rowData != NULL); - SSuperTableObj *pStable = (SSuperTableObj *) calloc(1, sizeof(SSuperTableObj)); if (pStable == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY; - memcpy(pStable, pOper->rowData, tsSuperTableUpdateSize); + int32_t len = strlen(pOper->rowData); + if (len > TSDB_TABLE_ID_LEN) return TSDB_CODE_INVALID_TABLE_ID; + pStable->info.tableId = strdup(pOper->rowData); + len++; + + memcpy((char*)pStable + sizeof(char *), pOper->rowData + len, tsSuperTableUpdateSize); + len += tsSuperTableUpdateSize; int32_t schemaSize = sizeof(SSchema) * (pStable->numOfColumns + pStable->numOfTags); pStable->schema = malloc(schemaSize); if (pStable->schema == NULL) { mgmtDestroySuperTable(pStable); - return -1; + return TSDB_CODE_NOT_SUPER_TABLE; } - memcpy(pStable->schema, pOper->rowData + tsSuperTableUpdateSize, schemaSize); + memcpy(pStable->schema, pOper->rowData + len, schemaSize); + pOper->pObj = pStable; return TSDB_CODE_SUCCESS; @@ -465,15 +496,15 @@ static int32_t mgmtSuperTableActionRestored() { static int32_t mgmtInitSuperTables() { SSuperTableObj tObj; - tsSuperTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj; + tsSuperTableUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj.info.type; SSdbTableDesc tableDesc = { .tableId = SDB_TABLE_STABLE, .tableName = "stables", .hashSessions = TSDB_MAX_SUPER_TABLES, - .maxRowSize = tsSuperTableUpdateSize + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16), + .maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN, .refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj, - .keyType = SDB_KEY_STRING, + .keyType = SDB_KEY_VAR_STRING, .insertFp = mgmtSuperTableActionInsert, .deleteFp = mgmtSuperTableActionDelete, .updateFp = mgmtSuperTableActionUpdate, @@ -546,6 +577,7 @@ static void *mgmtGetSuperTableByUid(uint64_t uid) { pIter = mgmtGetNextSuperTable(pIter, &pStable); if (pStable == NULL) break; if (pStable->uid == uid) { + sdbFreeIter(pIter); return pStable; } mgmtDecTableRef(pStable); @@ -719,18 +751,19 @@ static void mgmtProcessTableMetaMsg(SQueuedMsg *pMsg) { static void mgmtProcessCreateSuperTableMsg(SQueuedMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->pCont; - SSuperTableObj *pStable = (SSuperTableObj *)calloc(1, sizeof(SSuperTableObj)); + SSuperTableObj *pStable = calloc(1, sizeof(SSuperTableObj)); if (pStable == NULL) { mError("table:%s, failed to create, no enough memory", pCreate->tableId); mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_SERV_OUT_OF_MEMORY); return; } - strcpy(pStable->info.tableId, pCreate->tableId); + pStable->info.tableId = strdup(pCreate->tableId); pStable->info.type = TSDB_SUPER_TABLE; pStable->createdTime = taosGetTimestampMs(); pStable->uid = (((uint64_t) pStable->createdTime) << 16) + (sdbGetVersion() & ((1ul << 16) - 1ul)); pStable->sversion = 0; + pStable->tversion = 0; pStable->numOfColumns = htons(pCreate->numOfColumns); pStable->numOfTags = htons(pCreate->numOfTags); @@ -850,7 +883,7 @@ static int32_t mgmtAddSuperTableTag(SSuperTableObj *pStable, SSchema schema[], i } pStable->numOfTags += ntags; - pStable->sversion++; + pStable->tversion++; SSdbOper oper = { .type = SDB_OPER_GLOBAL, @@ -877,7 +910,7 @@ static int32_t mgmtDropSuperTableTag(SSuperTableObj *pStable, char *tagName) { memmove(pStable->schema + pStable->numOfColumns + col, pStable->schema + pStable->numOfColumns + col + 1, sizeof(SSchema) * (pStable->numOfTags - col - 1)); pStable->numOfTags--; - pStable->sversion++; + pStable->tversion++; SSdbOper oper = { .type = SDB_OPER_GLOBAL, @@ -1203,6 +1236,7 @@ static void mgmtGetSuperTableMeta(SQueuedMsg *pMsg) { STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)); pMeta->uid = htobe64(pTable->uid); pMeta->sversion = htons(pTable->sversion); + pMeta->tversion = htons(pTable->tversion); pMeta->precision = pMsg->pDb->cfg.precision; pMeta->numOfTags = (uint8_t)pTable->numOfTags; pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns); @@ -1326,12 +1360,14 @@ static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableOb pCreate->numOfColumns = htons(pTable->superTable->numOfColumns); pCreate->numOfTags = htons(pTable->superTable->numOfTags); pCreate->sversion = htonl(pTable->superTable->sversion); + pCreate->tversion = htonl(pTable->superTable->tversion); pCreate->tagDataLen = htonl(tagDataLen); pCreate->superTableUid = htobe64(pTable->superTable->uid); } else { pCreate->numOfColumns = htons(pTable->numOfColumns); pCreate->numOfTags = 0; pCreate->sversion = htonl(pTable->sversion); + pCreate->tversion = 0; pCreate->tagDataLen = 0; pCreate->superTableUid = 0; } @@ -1357,7 +1393,7 @@ static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableOb } static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj *pVgroup, int32_t tid) { - SChildTableObj *pTable = (SChildTableObj *) calloc(1, sizeof(SChildTableObj)); + SChildTableObj *pTable = calloc(1, sizeof(SChildTableObj)); if (pTable == NULL) { mError("table:%s, failed to alloc memory", pCreate->tableId); terrno = TSDB_CODE_SERV_OUT_OF_MEMORY; @@ -1370,16 +1406,16 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj pTable->info.type = TSDB_NORMAL_TABLE; } - strcpy(pTable->info.tableId, pCreate->tableId); - pTable->createdTime = taosGetTimestampMs(); - pTable->sid = tid; - pTable->vgId = pVgroup->vgId; + pTable->info.tableId = strdup(pCreate->tableId); + pTable->createdTime = taosGetTimestampMs(); + pTable->sid = tid; + pTable->vgId = pVgroup->vgId; if (pTable->info.type == TSDB_CHILD_TABLE) { char *pTagData = (char *) pCreate->schema; // it is a tag key SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData); if (pSuperTable == NULL) { - mError("table:%s, corresponding super table does not exist", pCreate->tableId); + mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData); free(pTable); terrno = TSDB_CODE_INVALID_TABLE; return NULL; @@ -1459,16 +1495,21 @@ static void mgmtProcessCreateChildTableMsg(SQueuedMsg *pMsg) { return; } - int32_t sid = taosAllocateId(pVgroup->idPool); - if (sid <= 0) { - mTrace("tables:%s, no enough sid in vgId:%d", pCreate->tableId, pVgroup->vgId); - mgmtCreateVgroup(mgmtCloneQueuedMsg(pMsg), pMsg->pDb); - return; - } - if (pMsg->retry == 0) { if (pMsg->pTable == NULL) { + int32_t sid = taosAllocateId(pVgroup->idPool); + if (sid <= 0) { + mTrace("tables:%s, no enough sid in vgId:%d", pCreate->tableId, pVgroup->vgId); + mgmtCreateVgroup(mgmtCloneQueuedMsg(pMsg), pMsg->pDb); + return; + } + pMsg->pTable = (STableObj *)mgmtDoCreateChildTable(pCreate, pVgroup, sid); + if (pMsg->pTable == NULL) { + mgmtSendSimpleResp(pMsg->thandle, terrno); + return; + } + mgmtIncTableRef(pMsg->pTable); } } else { @@ -1654,15 +1695,17 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) { pMeta->sid = htonl(pTable->sid); pMeta->precision = pDb->cfg.precision; pMeta->tableType = pTable->info.type; - strncpy(pMeta->tableId, pTable->info.tableId, tListLen(pTable->info.tableId)); + strncpy(pMeta->tableId, pTable->info.tableId, strlen(pTable->info.tableId)); if (pTable->info.type == TSDB_CHILD_TABLE) { pMeta->sversion = htons(pTable->superTable->sversion); + pMeta->tversion = htons(pTable->superTable->tversion); pMeta->numOfTags = (int8_t)pTable->superTable->numOfTags; pMeta->numOfColumns = htons((int16_t)pTable->superTable->numOfColumns); pMeta->contLen = sizeof(STableMetaMsg) + mgmtSetSchemaFromSuperTable(pMeta->schema, pTable->superTable); } else { pMeta->sversion = htons(pTable->sversion); + pMeta->tversion = 0; pMeta->numOfTags = 0; pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns); pMeta->contLen = sizeof(STableMetaMsg) + mgmtSetSchemaFromNormalTable(pMeta->schema, pTable); @@ -1704,7 +1747,12 @@ static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) { pCreateMsg->igExists = 1; pCreateMsg->getMeta = 1; pCreateMsg->contLen = htonl(contLen); - memcpy(pCreateMsg->schema, pInfo->tags, sizeof(STagData)); + + contLen = sizeof(STagData); + if (contLen > pMsg->contLen - sizeof(SCMTableInfoMsg)) { + contLen = pMsg->contLen - sizeof(SCMTableInfoMsg); + } + memcpy(pCreateMsg->schema, pInfo->tags, contLen); SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg); pMsg->pCont = newMsg->pCont; diff --git a/src/mnode/src/mgmtVgroup.c b/src/mnode/src/mgmtVgroup.c index f8447a31fd06faf6d8dcb76286fc611c72f80b4c..ef5582f395b816df5b7b5ccf49f86253239bc59d 100644 --- a/src/mnode/src/mgmtVgroup.c +++ b/src/mnode/src/mgmtVgroup.c @@ -371,12 +371,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; - pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE; - pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "vgroup_status"); - pSchema[cols].bytes = htons(pShow->bytes[cols]); - cols++; - int32_t maxReplica = 0; SVgObj *pVgroup = NULL; STableObj *pTable = NULL; @@ -471,11 +465,6 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo *(int32_t *) pWrite = pVgroup->numOfTables; cols++; - pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - char* status = pVgroup->status? "updating" : "ready"; - STR_TO_VARSTR(pWrite, status); - cols++; - for (int32_t i = 0; i < maxReplica; ++i) { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; @@ -489,8 +478,8 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - status = mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role); - STR_TO_VARSTR(pWrite, status); + char *role = mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role); + STR_TO_VARSTR(pWrite, role); cols++; } else { pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; @@ -666,7 +655,6 @@ static SMDDropVnodeMsg *mgmtBuildDropVnodeMsg(int32_t vgId) { } void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) { - mTrace("vgId:%d, send drop vnode msg, ahandle:%p", vgId, ahandle); SMDDropVnodeMsg *pDrop = mgmtBuildDropVnodeMsg(vgId); SRpcMsg rpcMsg = { .handle = ahandle, @@ -682,6 +670,7 @@ static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) { mTrace("vgId:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp); + mTrace("vgId:%d, send drop vnode msg to dnode:%d, ahandle:%p", pVgroup->vgId, pVgroup->vnodeGid[i].dnodeId, ahandle); mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle); } } diff --git a/src/plugins/monitor/src/monitorMain.c b/src/plugins/monitor/src/monitorMain.c index 76b322d01b9396cdc48f0af2d8e3ec3e5961b82b..72efd5b552cd37a72addce814246c14073b24685 100644 --- a/src/plugins/monitor/src/monitorMain.c +++ b/src/plugins/monitor/src/monitorMain.c @@ -68,7 +68,7 @@ typedef enum { typedef struct { void * conn; void * timer; - char ep[TSDB_FQDN_LEN]; + char ep[TSDB_EP_LEN]; int8_t cmdIndex; int8_t state; char sql[SQL_LENGTH]; @@ -109,6 +109,11 @@ static void monitorStartSystemRetry() { } static void monitorInitConn(void *para, void *unused) { + if (dnodeGetDnodeId() <= 0) { + monitorStartSystemRetry(); + return; + } + monitorPrint("starting to initialize monitor service .."); tsMonitorConn.state = MONITOR_STATE_INITIALIZING; diff --git a/src/query/inc/qsqltype.h b/src/query/inc/qsqltype.h deleted file mode 100644 index 4087be49eeac3042f04013b9e929fce30d323cea..0000000000000000000000000000000000000000 --- a/src/query/inc/qsqltype.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef TDENGINE_QSQLCMD_H -#define TDENGINE_QSQLCMD_H - -#ifdef __cplusplus -extern "C" { -#endif - -enum _sql_type { - TSDB_SQL_SELECT = 1, - TSDB_SQL_FETCH, - TSDB_SQL_INSERT, - - TSDB_SQL_MGMT, // the SQL below is for mgmt node - TSDB_SQL_CREATE_DB, - TSDB_SQL_CREATE_TABLE, - TSDB_SQL_DROP_DB, - TSDB_SQL_DROP_TABLE, - TSDB_SQL_CREATE_ACCT, - TSDB_SQL_CREATE_USER, // 10 - TSDB_SQL_DROP_ACCT, - TSDB_SQL_DROP_USER, - TSDB_SQL_ALTER_USER, - TSDB_SQL_ALTER_ACCT, - TSDB_SQL_ALTER_TABLE, - TSDB_SQL_ALTER_DB, - TSDB_SQL_CREATE_MNODE, - TSDB_SQL_DROP_MNODE, - TSDB_SQL_CREATE_DNODE, - TSDB_SQL_DROP_DNODE, // 20 - TSDB_SQL_CFG_DNODE, - TSDB_SQL_CFG_MNODE, - TSDB_SQL_SHOW, - TSDB_SQL_RETRIEVE, - TSDB_SQL_KILL_QUERY, - TSDB_SQL_KILL_STREAM, - TSDB_SQL_KILL_CONNECTION, - - TSDB_SQL_READ, // SQL below is for read operation - TSDB_SQL_CONNECT, - TSDB_SQL_USE_DB, // 30 - TSDB_SQL_META, - TSDB_SQL_STABLEVGROUP, - TSDB_SQL_MULTI_META, - TSDB_SQL_HB, - - TSDB_SQL_LOCAL, // SQL below for client local - TSDB_SQL_DESCRIBE_TABLE, - TSDB_SQL_RETRIEVE_LOCALMERGE, - TSDB_SQL_TABLE_JOIN_RETRIEVE, - - /* - * build empty result instead of accessing dnode to fetch result - * reset the client cache - */ - TSDB_SQL_RETRIEVE_EMPTY_RESULT, - - TSDB_SQL_RESET_CACHE, // 40 - TSDB_SQL_SERV_STATUS, - TSDB_SQL_CURRENT_DB, - TSDB_SQL_SERV_VERSION, - TSDB_SQL_CLI_VERSION, - TSDB_SQL_CURRENT_USER, - TSDB_SQL_CFG_LOCAL, - - TSDB_SQL_MAX // 47 -}; - - -// create table operation type -enum TSQL_TYPE { - TSQL_CREATE_TABLE = 0x1, - TSQL_CREATE_STABLE = 0x2, - TSQL_CREATE_TABLE_FROM_STABLE = 0x3, - TSQL_CREATE_STREAM = 0x4, -}; - -#ifdef __cplusplus -} -#endif - -#endif // TDENGINE_QSQLCMD_H diff --git a/src/rpc/inc/rpcHead.h b/src/rpc/inc/rpcHead.h index 8b5410a596ae64a27ebecc9e3591fa90de044323..520edadc7dd072849720cad53c7f6f4ba605a06c 100644 --- a/src/rpc/inc/rpcHead.h +++ b/src/rpc/inc/rpcHead.h @@ -49,6 +49,7 @@ typedef struct { char encrypt:3; // encrypt algorithm, 0: no encryption uint16_t tranId; // transcation ID uint32_t linkUid; // for unique connection ID assigned by client + uint64_t ahandle; // ahandle assigned by client uint32_t sourceId; // source ID, an index for connection list uint32_t destId; // destination ID, an index for connection list uint32_t destIp; // destination IP address, for NAT scenario diff --git a/src/rpc/src/rpcCache.c b/src/rpc/src/rpcCache.c index edbb9b3e12be6dc24acfbb60fff1f91fe8e1c898..7a96571ab9d1a220e98d6d48a817fcc251097374 100644 --- a/src/rpc/src/rpcCache.c +++ b/src/rpc/src/rpcCache.c @@ -146,7 +146,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in rpcUnlockCache(pCache->lockedBy+hash); pCache->total++; - tTrace("%p %s:%hu:%d:%d:%p added into cache, connections:%d", data, fqdn, port, connType, hash, pNode, pCache->count[hash]); + // tTrace("%p %s:%hu:%d:%d:%p added into cache, connections:%d", data, fqdn, port, connType, hash, pNode, pCache->count[hash]); return; } @@ -200,9 +200,9 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy rpcUnlockCache(pCache->lockedBy+hash); if (pData) { - tTrace("%p %s:%hu:%d:%d:%p retrieved from cache, connections:%d", pData, fqdn, port, connType, hash, pNode, pCache->count[hash]); + //tTrace("%p %s:%hu:%d:%d:%p retrieved from cache, connections:%d", pData, fqdn, port, connType, hash, pNode, pCache->count[hash]); } else { - tTrace("%s:%hu:%d:%d failed to retrieve conn from cache, connections:%d", fqdn, port, connType, hash, pCache->count[hash]); + //tTrace("%s:%hu:%d:%d failed to retrieve conn from cache, connections:%d", fqdn, port, connType, hash, pCache->count[hash]); } return pData; @@ -240,8 +240,8 @@ static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash pNext = pNode->next; pCache->total--; pCache->count[hash]--; - tTrace("%p %s:%hu:%d:%d:%p removed from cache, connections:%d", pNode->data, pNode->fqdn, pNode->port, pNode->connType, hash, pNode, - pCache->count[hash]); + //tTrace("%p %s:%hu:%d:%d:%p removed from cache, connections:%d", pNode->data, pNode->fqdn, pNode->port, pNode->connType, hash, pNode, + // pCache->count[hash]); taosMemPoolFree(pCache->connHashMemPool, (char *)pNode); pNode = pNext; } diff --git a/src/rpc/src/rpcHaship.c b/src/rpc/src/rpcHaship.c deleted file mode 100644 index 0183c87f70dd059a69db1c49122432c2f1b4c22c..0000000000000000000000000000000000000000 --- a/src/rpc/src/rpcHaship.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "os.h" -#include "tmempool.h" -#include "rpcLog.h" - -typedef struct SIpHash { - uint32_t ip; - uint16_t port; - int hash; - struct SIpHash *prev; - struct SIpHash *next; - void *data; -} SIpHash; - -typedef struct { - SIpHash **ipHashList; - mpool_h ipHashMemPool; - int maxSessions; -} SHashObj; - -int rpcHashIp(void *handle, uint32_t ip, uint16_t port) { - SHashObj *pObj = (SHashObj *)handle; - int hash = 0; - - hash = (int)(ip >> 16); - hash += (unsigned short)(ip & 0xFFFF); - hash += port; - - hash = hash % pObj->maxSessions; - - return hash; -} - -void *rpcAddIpHash(void *handle, void *data, uint32_t ip, uint16_t port) { - int hash; - SIpHash *pNode; - SHashObj *pObj; - - pObj = (SHashObj *)handle; - if (pObj == NULL || pObj->maxSessions == 0) return NULL; - - hash = rpcHashIp(pObj, ip, port); - pNode = (SIpHash *)taosMemPoolMalloc(pObj->ipHashMemPool); - pNode->ip = ip; - pNode->port = port; - pNode->data = data; - pNode->prev = 0; - pNode->next = pObj->ipHashList[hash]; - pNode->hash = hash; - - if (pObj->ipHashList[hash] != 0) (pObj->ipHashList[hash])->prev = pNode; - pObj->ipHashList[hash] = pNode; - - return pObj; -} - -void rpcDeleteIpHash(void *handle, uint32_t ip, uint16_t port) { - int hash; - SIpHash *pNode; - SHashObj *pObj; - - pObj = (SHashObj *)handle; - if (pObj == NULL || pObj->maxSessions == 0) return; - - hash = rpcHashIp(pObj, ip, port); - - pNode = pObj->ipHashList[hash]; - while (pNode) { - if (pNode->ip == ip && pNode->port == port) break; - - pNode = pNode->next; - } - - if (pNode) { - if (pNode->prev) { - pNode->prev->next = pNode->next; - } else { - pObj->ipHashList[hash] = pNode->next; - } - - if (pNode->next) { - pNode->next->prev = pNode->prev; - } - - taosMemPoolFree(pObj->ipHashMemPool, (char *)pNode); - } -} - -void *rpcGetIpHash(void *handle, uint32_t ip, uint16_t port) { - int hash; - SIpHash *pNode; - SHashObj *pObj; - - pObj = (SHashObj *)handle; - if (pObj == NULL || pObj->maxSessions == 0) return NULL; - - hash = rpcHashIp(pObj, ip, port); - pNode = pObj->ipHashList[hash]; - - while (pNode) { - if (pNode->ip == ip && pNode->port == port) { - break; - } - pNode = pNode->next; - } - - if (pNode) { - return pNode->data; - } - return NULL; -} - -void *rpcOpenIpHash(int maxSessions) { - SIpHash **ipHashList; - mpool_h ipHashMemPool; - SHashObj *pObj; - - ipHashMemPool = taosMemPoolInit(maxSessions, sizeof(SIpHash)); - if (ipHashMemPool == 0) return NULL; - - ipHashList = calloc(sizeof(SIpHash *), (size_t)maxSessions); - if (ipHashList == 0) { - taosMemPoolCleanUp(ipHashMemPool); - return NULL; - } - - pObj = malloc(sizeof(SHashObj)); - if (pObj == NULL) { - taosMemPoolCleanUp(ipHashMemPool); - free(ipHashList); - return NULL; - } - - pObj->maxSessions = maxSessions; - pObj->ipHashMemPool = ipHashMemPool; - pObj->ipHashList = ipHashList; - - return pObj; -} - -void rpcCloseIpHash(void *handle) { - SHashObj *pObj; - - pObj = (SHashObj *)handle; - if (pObj == NULL || pObj->maxSessions == 0) return; - - if (pObj->ipHashMemPool) taosMemPoolCleanUp(pObj->ipHashMemPool); - - if (pObj->ipHashList) free(pObj->ipHashList); - - memset(pObj, 0, sizeof(SHashObj)); - free(pObj); -} diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 3e638eb3d3bf0300023494941859b6d33b2b4a3e..445e542387961cde730943f02a56c2d9fe81d610 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -87,6 +87,7 @@ typedef struct { } SRpcReqContext; typedef struct SRpcConn { + char info[50];// debug info: label + pConn + ahandle int sid; // session ID uint32_t ownId; // own link ID uint32_t peerId; // peer link ID @@ -275,7 +276,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } - tTrace("%s RPC is openned, numOfThreads:%d", pRpc->label, pRpc->numOfThreads); + tTrace("%s rpc is openned, threads:%d sessions:%d", pRpc->label, pRpc->numOfThreads, pInit->sessions); return pRpc; } @@ -299,7 +300,7 @@ void rpcClose(void *param) { tfree(pRpc->connList); pthread_mutex_destroy(&pRpc->mutex); - tTrace("%s RPC is closed", pRpc->label); + tTrace("%s rpc is closed", pRpc->label); tfree(pRpc); } @@ -361,9 +362,10 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) // connection type is application specific. // for TDengine, all the query, show commands shall have TCP connection char type = pMsg->msgType; - if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE || type == TSDB_MSG_TYPE_FETCH || - type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_CM_TABLES_META || - type == TSDB_MSG_TYPE_CM_SHOW ) + if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE + || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP + || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META + || type == TSDB_MSG_TYPE_CM_SHOW ) pContext->connType = RPC_CONN_TCPC; rpcSendReqToServer(pRpc, pContext); @@ -374,8 +376,6 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) void rpcSendResponse(const SRpcMsg *pRsp) { int msgLen = 0; SRpcConn *pConn = (SRpcConn *)pRsp->handle; - SRpcInfo *pRpc = pConn->pRpc; - SRpcMsg rpcMsg = *pRsp; SRpcMsg *pMsg = &rpcMsg; @@ -393,7 +393,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { rpcLockConn(pConn); if ( pConn->inType == 0 || pConn->user[0] == 0 ) { - tTrace("%s %p, connection is already released, rsp wont be sent", pRpc->label, pConn); + tTrace("%s, connection is already released, rsp wont be sent", pConn->info); rpcUnlockConn(pConn); return; } @@ -409,7 +409,8 @@ void rpcSendResponse(const SRpcMsg *pRsp) { pHead->linkUid = pConn->linkUid; pHead->port = htons(pConn->localPort); pHead->code = htonl(pMsg->code); - + pHead->ahandle = (uint64_t) pConn->ahandle; + // set pConn parameters pConn->inType = 0; @@ -491,6 +492,7 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort, uint32_t peerIp = taosGetIpFromFqdn(peerFqdn); if (peerIp == -1) { tError("%s, failed to resolve FQDN:%s", pRpc->label, peerFqdn); + terrno = TSDB_CODE_APP_ERROR; return NULL; } @@ -506,11 +508,7 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort, if (taosOpenConn[connType]) { void *shandle = (connType & RPC_CONN_TCP)? pRpc->tcphandle:pRpc->udphandle; pConn->chandle = (*taosOpenConn[connType])(shandle, pConn, pConn->peerIp, pConn->peerPort); - if (pConn->chandle) { - tTrace("%s %p, rpc connection is set up, sid:%d id:%s %s:%hu connType:%d", pRpc->label, - pConn, pConn->sid, pRpc->user, peerFqdn, pConn->peerPort, pConn->connType); - } else { - tError("%s %p, failed to set up connection to %s:%hu", pRpc->label, pConn, peerFqdn, pConn->peerPort); + if (pConn->chandle == NULL) { terrno = TSDB_CODE_NETWORK_UNAVAIL; rpcCloseConn(pConn); pConn = NULL; @@ -557,7 +555,7 @@ static void rpcCloseConn(void *thandle) { taosFreeId(pRpc->idPool, pConn->sid); pConn->pContext = NULL; - tTrace("%s %p, rpc connection is closed", pRpc->label, pConn); + tTrace("%s, rpc connection is closed", pConn->info); rpcUnlockConn(pConn); } @@ -619,7 +617,6 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) { } if (terrno != 0) { - tWarn("%s %p, user not there or server not ready", pRpc->label, pConn); taosFreeId(pRpc->idPool, sid); // sid shall be released pConn = NULL; } @@ -634,8 +631,6 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) { } taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES); - tTrace("%s %p, rpc connection is allocated, sid:%d id:%s port:%u", - pRpc->label, pConn, sid, pConn->user, pConn->localPort); } return pConn; @@ -660,7 +655,6 @@ static SRpcConn *rpcGetConnObj(SRpcInfo *pRpc, int sid, SRecvInfo *pRecv) { if (pConn) { if (pConn->linkUid != pHead->linkUid) { - tTrace("%s %p, linkUid:0x%x not matched, received:0x%x", pRpc->label, pConn, pConn->linkUid, pHead->linkUid); terrno = TSDB_CODE_MISMATCHED_METER_ID; pConn = NULL; } @@ -677,21 +671,25 @@ static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) { pConn = rpcGetConnFromCache(pRpc->pCache, pIpSet->fqdn[pIpSet->inUse], pIpSet->port[pIpSet->inUse], pContext->connType); if ( pConn == NULL || pConn->user[0] == 0) { pConn = rpcOpenConn(pRpc, pIpSet->fqdn[pIpSet->inUse], pIpSet->port[pIpSet->inUse], pContext->connType); + } + + if (pConn) { + pConn->ahandle = pContext->ahandle; + sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle); } else { - tTrace("%s %p, connection is retrieved from cache", pRpc->label, pConn); + tError("%s %p, failed to set up connection(%s)", pRpc->label, pContext->ahandle, tstrerror(terrno)); } return pConn; } static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) { - SRpcInfo *pRpc= pConn->pRpc; if (pConn->peerId == 0) { pConn->peerId = pHead->sourceId; } else { if (pConn->peerId != pHead->sourceId) { - tTrace("%s %p, source Id is changed, old:0x%08x new:0x%08x", pRpc->label, pConn, + tTrace("%s, source Id is changed, old:0x%08x new:0x%08x", pConn->info, pConn->peerId, pHead->sourceId); return TSDB_CODE_INVALID_VALUE; } @@ -700,17 +698,16 @@ static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) { if (pConn->inTranId == pHead->tranId) { if (pConn->inType == pHead->msgType) { if (pHead->code == 0) { - tTrace("%s %p, %s is retransmitted", pRpc->label, pConn, taosMsg[pHead->msgType]); + tTrace("%s, %s is retransmitted", pConn->info, taosMsg[pHead->msgType]); rpcSendQuickRsp(pConn, TSDB_CODE_ACTION_IN_PROGRESS); } else { // do nothing, it is heart beat from client } } else if (pConn->inType == 0) { - tTrace("%s %p, %s is already processed, tranId:%d", pRpc->label, pConn, - taosMsg[pHead->msgType], pConn->inTranId); + tTrace("%s, %s is already processed, tranId:%d", pConn->info, taosMsg[pHead->msgType], pConn->inTranId); rpcSendMsgToPeer(pConn, pConn->pRspMsg, pConn->rspMsgLen); // resend the response } else { - tTrace("%s %p, mismatched message %s and tranId", pRpc->label, pConn, taosMsg[pHead->msgType]); + tTrace("%s, mismatched message %s and tranId", pConn->info, taosMsg[pHead->msgType]); } // do not reply any message @@ -718,7 +715,7 @@ static int rpcProcessReqHead(SRpcConn *pConn, SRpcHead *pHead) { } if (pConn->inType != 0) { - tTrace("%s %p, last session is not finished, inTranId:%d tranId:%d", pRpc->label, pConn, + tTrace("%s, last session is not finished, inTranId:%d tranId:%d", pConn->info, pConn->inTranId, pHead->tranId); return TSDB_CODE_LAST_SESSION_NOT_FINISHED; } @@ -750,7 +747,7 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) { if (pHead->code == TSDB_CODE_ACTION_IN_PROGRESS) { if (pConn->tretry <= tsRpcMaxRetry) { - tTrace("%s %p, peer is still processing the transaction", pRpc->label, pConn); + tTrace("%s, peer is still processing the transaction", pConn->info); pConn->tretry++; rpcSendReqHead(pConn); taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer); @@ -789,7 +786,15 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) { } pConn = rpcGetConnObj(pRpc, sid, pRecv); - if (pConn == NULL) return NULL; + if (pConn == NULL) { + tError("%s %p, failed to get connection obj(%s)", pRpc->label, pHead->ahandle, tstrerror(terrno)); + return NULL; + } else { + if (rpcIsReq(pHead->msgType)) { + pConn->ahandle = (void *)pHead->ahandle; + sprintf(pConn->info, "%s %p %p", pRpc->label, pConn, pConn->ahandle); + } + } rpcLockConn(pConn); sid = pConn->sid; @@ -826,7 +831,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) { static void rpcProcessBrokenLink(SRpcConn *pConn) { SRpcInfo *pRpc = pConn->pRpc; - tTrace("%s %p, link is broken", pRpc->label, pConn); + tTrace("%s, link is broken", pConn->info); // pConn->chandle = NULL; if (pConn->outType) { @@ -837,7 +842,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) { if (pConn->inType) { // if there are pending request, notify the app - tTrace("%s %p, connection is gone, notify the app", pRpc->label, pConn); + tTrace("%s, connection is gone, notify the app", pConn->info); /* SRpcMsg rpcMsg; rpcMsg.pCont = NULL; @@ -872,17 +877,17 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { pConn = rpcProcessMsgHead(pRpc, pRecv); if (pHead->msgType < TSDB_MSG_TYPE_CM_HEARTBEAT || (rpcDebugFlag & 16)) { - tTrace("%s %p, %s received from 0x%x:%hu, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", - pRpc->label, pConn, taosMsg[pHead->msgType], pRecv->ip, pRecv->port, terrno, + tTrace("%s %p %p, %s received from 0x%x:%hu, parse code:0x%x len:%d sig:0x%08x:0x%08x:%d code:0x%x", + pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType], pRecv->ip, pRecv->port, terrno, pRecv->msgLen, pHead->sourceId, pHead->destId, pHead->tranId, pHead->code); } int32_t code = terrno; if (code != TSDB_CODE_ALREADY_PROCESSED) { if (code != 0) { // parsing error - if ( rpcIsReq(pHead->msgType) ) { + if (rpcIsReq(pHead->msgType)) { rpcSendErrorMsgToPeer(pRecv, code); - tTrace("%s %p, %s is sent with error code:%x", pRpc->label, pConn, taosMsg[pHead->msgType+1], code); + tTrace("%s %p %p, %s is sent with error code:%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); } } else { // parsing OK rpcProcessIncomingMsg(pConn, pHead); @@ -898,9 +903,9 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { if (pContext->pRsp) { // for synchronous API - tsem_post(pContext->pSem); memcpy(pContext->pSet, &pContext->ipSet, sizeof(SRpcIpSet)); memcpy(pContext->pRsp, pMsg, sizeof(SRpcMsg)); + tsem_post(pContext->pSem); } else { // for asynchronous API SRpcIpSet *pIpSet = NULL; @@ -924,6 +929,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) { rpcMsg.pCont = pHead->content; rpcMsg.msgType = pHead->msgType; rpcMsg.code = pHead->code; + rpcMsg.ahandle = pConn->ahandle; if ( rpcIsReq(pHead->msgType) ) { rpcMsg.handle = pConn; @@ -948,14 +954,14 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) { pContext->redirect++; if (pContext->redirect > TSDB_MAX_REPLICA) { pHead->code = TSDB_CODE_NETWORK_UNAVAIL; - tWarn("%s %p, too many redirects, quit", pRpc->label, pConn); + tWarn("%s, too many redirects, quit", pConn->info); } } if (pHead->code == TSDB_CODE_REDIRECT) { pContext->numOfTry = 0; memcpy(&pContext->ipSet, pHead->content, sizeof(pContext->ipSet)); - tTrace("%s %p, redirect is received, numOfIps:%d", pRpc->label, pConn, pContext->ipSet.numOfIps); + tTrace("%s, redirect is received, numOfIps:%d", pConn->info, pContext->ipSet.numOfIps); for (int i=0; iipSet.numOfIps; ++i) pContext->ipSet.port[i] = htons(pContext->ipSet.port[i]); rpcSendReqToServer(pRpc, pContext); @@ -1061,6 +1067,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { return; } + pConn->ahandle = pContext->ahandle; rpcLockConn(pConn); // set the message header @@ -1074,6 +1081,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { pHead->destId = pConn->peerId; pHead->port = 0; pHead->linkUid = pConn->linkUid; + pHead->ahandle = (uint64_t)pConn->ahandle; if (!pConn->secured) memcpy(pHead->user, pConn->user, tListLen(pHead->user)); // set the connection parameters @@ -1091,29 +1099,28 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { int writtenLen = 0; - SRpcInfo *pRpc = pConn->pRpc; SRpcHead *pHead = (SRpcHead *)msg; msgLen = rpcAddAuthPart(pConn, msg, msgLen); if ( rpcIsReq(pHead->msgType)) { if (pHead->msgType < TSDB_MSG_TYPE_CM_HEARTBEAT || (rpcDebugFlag & 16)) - tTrace("%s %p, %s is sent to %s:%hu, len:%d sig:0x%08x:0x%08x:%d", - pRpc->label, pConn, taosMsg[pHead->msgType], pConn->peerFqdn, - pConn->peerPort, msgLen, pHead->sourceId, pHead->destId, pHead->tranId); + tTrace("%s, %s is sent to %s:%hu, len:%d sig:0x%08x:0x%08x:%d", + pConn->info, taosMsg[pHead->msgType], pConn->peerFqdn, pConn->peerPort, + msgLen, pHead->sourceId, pHead->destId, pHead->tranId); } else { if (pHead->code == 0) pConn->secured = 1; // for success response, set link as secured if (pHead->msgType < TSDB_MSG_TYPE_CM_HEARTBEAT || (rpcDebugFlag & 16)) - tTrace( "%s %p, %s is sent to 0x%x:%hu, code:0x%x len:%d sig:0x%08x:0x%08x:%d", - pRpc->label, pConn, taosMsg[pHead->msgType], pConn->peerIp, pConn->peerPort, + tTrace("%s, %s is sent to 0x%x:%hu, code:0x%x len:%d sig:0x%08x:0x%08x:%d", + pConn->info, taosMsg[pHead->msgType], pConn->peerIp, pConn->peerPort, htonl(pHead->code), msgLen, pHead->sourceId, pHead->destId, pHead->tranId); } + //tTrace("connection type is: %d", pConn->connType); writtenLen = (*taosSendData[pConn->connType])(pConn->peerIp, pConn->peerPort, pHead, msgLen, pConn->chandle); if (writtenLen != msgLen) { - tError("%s %p, failed to send, dataLen:%d writtenLen:%d, reason:%s", pRpc->label, pConn, - msgLen, writtenLen, strerror(errno)); + tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno)); } tDump(msg, msgLen); @@ -1128,7 +1135,7 @@ static void rpcProcessConnError(void *param, void *id) { return; } - tTrace("%s connection error happens", pRpc->label); + tTrace("%s %p, connection error happens", pRpc->label, pContext->ahandle); if (pContext->numOfTry >= pContext->ipSet.numOfIps) { rpcMsg.msgType = pContext->msgType+1; @@ -1154,23 +1161,21 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) { rpcLockConn(pConn); if (pConn->outType && pConn->user[0]) { - tTrace("%s %p, expected %s is not received", pRpc->label, pConn, taosMsg[(int)pConn->outType + 1]); + tTrace("%s, expected %s is not received", pConn->info, taosMsg[(int)pConn->outType + 1]); pConn->pTimer = NULL; pConn->retry++; if (pConn->retry < 4) { - tTrace("%s %p, re-send msg:%s to %s:%hu", pRpc->label, pConn, - taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); + tTrace("%s, re-send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen); taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer); } else { // close the connection - tTrace("%s %p, failed to send msg:%s to %s:%hu", pRpc->label, pConn, - taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); + tTrace("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort); reportDisc = 1; } } else { - tTrace("%s %p, retry timer not processed", pRpc->label, pConn); + tTrace("%s, retry timer not processed", pConn->info); } rpcUnlockConn(pConn); @@ -1187,10 +1192,10 @@ static void rpcProcessIdleTimer(void *param, void *tmrId) { SRpcInfo *pRpc = pConn->pRpc; if (pConn->user[0]) { - tTrace("%s %p, close the connection since no activity", pRpc->label, pConn); + tTrace("%s, close the connection since no activity", pConn->info); if (pConn->inType && pRpc->cfp) { // if there are pending request, notify the app - tTrace("%s %p, notify the app, connection is gone", pRpc->label, pConn); + tTrace("%s, notify the app, connection is gone", pConn->info); /* SRpcMsg rpcMsg; rpcMsg.pCont = NULL; @@ -1203,7 +1208,7 @@ static void rpcProcessIdleTimer(void *param, void *tmrId) { } rpcCloseConn(pConn); } else { - tTrace("%s %p, idle timer:%p not processed", pRpc->label, pConn, tmrId); + tTrace("%s, idle timer:%p not processed", pConn->info, tmrId); } } @@ -1214,11 +1219,11 @@ static void rpcProcessProgressTimer(void *param, void *tmrId) { rpcLockConn(pConn); if (pConn->inType && pConn->user[0]) { - tTrace("%s %p, progress timer expired, send progress", pRpc->label, pConn); + tTrace("%s, progress timer expired, send progress", pConn->info); rpcSendQuickRsp(pConn, TSDB_CODE_ACTION_IN_PROGRESS); taosTmrReset(rpcProcessProgressTimer, tsRpcTimer/2, pConn, pRpc->tmrCtrl, &pConn->pTimer); } else { - tTrace("%s %p, progress timer:%p not processed", pRpc->label, pConn, tmrId); + tTrace("%s, progress timer:%p not processed", pConn->info, tmrId); } rpcUnlockConn(pConn); @@ -1252,7 +1257,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) { memcpy(pCont + overhead, buf, compLen); pHead->comp = 1; - tTrace("compress rpc msg, before:%d, after:%d", contLen, compLen); + //tTrace("compress rpc msg, before:%d, after:%d", contLen, compLen); finalLen = compLen + overhead; } else { finalLen = contLen; @@ -1286,7 +1291,7 @@ static SRpcHead *rpcDecompressRpcMsg(SRpcHead *pHead) { pNewHead->msgLen = rpcMsgLenFromCont(origLen); rpcFreeMsg(pHead); // free the compressed message buffer pHead = pNewHead; - tTrace("decompress rpc msg, compLen:%d, after:%d", compLen, contLen); + //tTrace("decompress rpc msg, compLen:%d, after:%d", compLen, contLen); } else { tError("failed to allocate memory to decompress msg, contLen:%d", contLen); } @@ -1343,7 +1348,6 @@ static int rpcAddAuthPart(SRpcConn *pConn, char *msg, int msgLen) { static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) { SRpcHead *pHead = (SRpcHead *)msg; - SRpcInfo *pRpc = pConn->pRpc; int code = 0; if ((pConn->secured && pHead->spi == 0) || (pHead->spi == 0 && pConn->spi == 0)){ @@ -1371,20 +1375,20 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) { delta = (int32_t)htonl(pDigest->timeStamp); delta -= (int32_t)taosGetTimestampSec(); if (abs(delta) > 900) { - tWarn("%s %p, time diff:%d is too big, msg discarded", pRpc->label, pConn, delta); + tWarn("%s, time diff:%d is too big, msg discarded", pConn->info, delta); code = TSDB_CODE_INVALID_TIME_STAMP; } else { if (rpcAuthenticateMsg(pHead, msgLen-TSDB_AUTH_LEN, pDigest->auth, pConn->secret) < 0) { - tError("%s %p, authentication failed, msg discarded", pRpc->label, pConn); + tError("%s, authentication failed, msg discarded", pConn->info); code = TSDB_CODE_AUTH_FAILURE; } else { pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen) - sizeof(SRpcDigest); if ( !rpcIsReq(pHead->msgType) ) pConn->secured = 1; // link is secured for client - tTrace("%s %p, message is authenticated", pRpc->label, pConn); + //tTrace("%s, message is authenticated", pConn->info); } } } else { - tTrace("%s %p, auth spi:%d not matched with received:%d", pRpc->label, pConn, pConn->spi, pHead->spi); + tError("%s, auth spi:%d not matched with received:%d", pConn->info, pConn->spi, pHead->spi); code = TSDB_CODE_AUTH_FAILURE; } diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index c551f6b1db1acfe847ce51ef70b1f883ed66cb05..3a40f27e26f124a5c3caa40c27481f21a187eb14 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -19,7 +19,6 @@ #include "ttimer.h" #include "tutil.h" #include "rpcLog.h" -#include "rpcHaship.h" #include "rpcUdp.h" #include "rpcHead.h" @@ -28,8 +27,6 @@ #define RPC_UDP_BUF_TIME 5 // mseconds #define RPC_MAX_UDP_SIZE 65480 -int tsUdpDelay = 0; - typedef struct { void *signature; int index; @@ -38,8 +35,6 @@ typedef struct { uint16_t localPort; // local port char label[12]; // copy from udpConnSet; pthread_t thread; - pthread_mutex_t mutex; - void *tmrCtrl; // copy from UdpConnSet; void *hash; void *shandle; // handle passed by upper layer during server initialization void *pSet; @@ -55,26 +50,11 @@ typedef struct { void *shandle; // handle passed by upper layer during server initialization int threads; char label[12]; - void *tmrCtrl; void *(*fp)(SRecvInfo *pPacket); SUdpConn udpConn[]; } SUdpConnSet; -typedef struct { - void *signature; - uint32_t ip; // dest IP - uint16_t port; // dest Port - SUdpConn *pConn; - struct sockaddr_in destAdd; - void *msgHdr; - int totalLen; - void *timer; - int emptyNum; -} SUdpBuf; - static void *taosRecvUdpData(void *param); -static SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, uint16_t port); -static void taosProcessUdpBufTimer(void *param, void *tmrId); void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads, void *fp, void *shandle) { SUdpConn *pConn; @@ -94,16 +74,6 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads pSet->fp = fp; strcpy(pSet->label, label); - if ( tsUdpDelay ) { - char udplabel[12]; - sprintf(udplabel, "%s.b", label); - pSet->tmrCtrl = taosTmrInit(RPC_MAX_UDP_CONNS * threads, 5, 5000, udplabel); - if (pSet->tmrCtrl == NULL) { - tError("%s failed to initialize tmrCtrl") taosCleanUpUdpConnection(pSet); - return NULL; - } - } - uint16_t ownPort; for (int i = 0; i < threads; ++i) { pConn = pSet->udpConn + i; @@ -135,11 +105,6 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads pConn->index = i; pConn->pSet = pSet; pConn->signature = pConn; - if (tsUdpDelay) { - pConn->hash = rpcOpenIpHash(RPC_MAX_UDP_CONNS); - pthread_mutex_init(&pConn->mutex, NULL); - pConn->tmrCtrl = pSet->tmrCtrl; - } pthread_attr_t thAttr; pthread_attr_init(&thAttr); @@ -173,10 +138,6 @@ void taosCleanUpUdpConnection(void *handle) { free(pConn->buffer); pthread_cancel(pConn->thread); taosCloseSocket(pConn->fd); - if (pConn->hash) { - rpcCloseIpHash(pConn->hash); - pthread_mutex_destroy(&pConn->mutex); - } } for (int i = 0; i < pSet->threads; ++i) { @@ -185,7 +146,6 @@ void taosCleanUpUdpConnection(void *handle) { tTrace("chandle:%p is closed", pConn); } - taosTmrCleanUp(pSet->tmrCtrl); tfree(pSet); } @@ -205,64 +165,42 @@ void *taosOpenUdpConnection(void *shandle, void *thandle, uint32_t ip, uint16_t static void *taosRecvUdpData(void *param) { SUdpConn *pConn = param; struct sockaddr_in sourceAdd; - int dataLen; + ssize_t dataLen; unsigned int addLen; uint16_t port; - int minSize = sizeof(SRpcHead); SRecvInfo recvInfo; memset(&sourceAdd, 0, sizeof(sourceAdd)); addLen = sizeof(sourceAdd); tTrace("%s UDP thread is created, index:%d", pConn->label, pConn->index); + char *msg = pConn->buffer; while (1) { dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen); port = ntohs(sourceAdd.sin_port); - tTrace("%s msg is recv from 0x%x:%hu len:%d", pConn->label, sourceAdd.sin_addr.s_addr, port, dataLen); if (dataLen < sizeof(SRpcHead)) { tError("%s recvfrom failed, reason:%s\n", pConn->label, strerror(errno)); continue; } - int processedLen = 0, leftLen = 0; - int msgLen = 0; - int count = 0; - char *msg = pConn->buffer; - while (processedLen < dataLen) { - leftLen = dataLen - processedLen; - SRpcHead *pHead = (SRpcHead *)msg; - msgLen = htonl((uint32_t)pHead->msgLen); - if (leftLen < minSize || msgLen > leftLen || msgLen < minSize) { - tError("%s msg is messed up, dataLen:%d processedLen:%d count:%d msgLen:%d", pConn->label, dataLen, - processedLen, count, msgLen); - break; - } - - char *tmsg = malloc((size_t)msgLen + tsRpcOverhead); - if (NULL == tmsg) { - tError("%s failed to allocate memory, size:%d", pConn->label, msgLen); - break; - } - - tmsg += tsRpcOverhead; // overhead for SRpcReqContext - memcpy(tmsg, msg, (size_t)msgLen); - recvInfo.msg = tmsg; - recvInfo.msgLen = msgLen; - recvInfo.ip = sourceAdd.sin_addr.s_addr; - recvInfo.port = port; - recvInfo.shandle = pConn->shandle; - recvInfo.thandle = NULL; - recvInfo.chandle = pConn; - recvInfo.connType = 0; - (*(pConn->processData))(&recvInfo); - - processedLen += msgLen; - msg += msgLen; - count++; + char *tmsg = malloc(dataLen + tsRpcOverhead); + if (NULL == tmsg) { + tError("%s failed to allocate memory, size:%d", pConn->label, dataLen); + continue; } - // tTrace("%s %d UDP packets are received together", pConn->label, count); + tmsg += tsRpcOverhead; // overhead for SRpcReqContext + memcpy(tmsg, msg, dataLen); + recvInfo.msg = tmsg; + recvInfo.msgLen = dataLen; + recvInfo.ip = sourceAdd.sin_addr.s_addr; + recvInfo.port = port; + recvInfo.shandle = pConn->shandle; + recvInfo.thandle = NULL; + recvInfo.chandle = pConn; + recvInfo.connType = 0; + (*(pConn->processData))(&recvInfo); } return NULL; @@ -270,141 +208,17 @@ static void *taosRecvUdpData(void *param) { int taosSendUdpData(uint32_t ip, uint16_t port, void *data, int dataLen, void *chandle) { SUdpConn *pConn = (SUdpConn *)chandle; - SUdpBuf *pBuf; if (pConn == NULL || pConn->signature != pConn) return -1; - if (pConn->hash == NULL) { - struct sockaddr_in destAdd; - memset(&destAdd, 0, sizeof(destAdd)); - destAdd.sin_family = AF_INET; - destAdd.sin_addr.s_addr = ip; - destAdd.sin_port = htons(port); - - //tTrace("%s msg is sent to 0x%x:%hu len:%d ret:%d localPort:%hu chandle:0x%x", pConn->label, destAdd.sin_addr.s_addr, - // port, dataLen, ret, pConn->localPort, chandle); - int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd)); - - return ret; - } - - pthread_mutex_lock(&pConn->mutex); - - pBuf = (SUdpBuf *)rpcGetIpHash(pConn->hash, ip, port); - if (pBuf == NULL) { - pBuf = taosCreateUdpBuf(pConn, ip, port); - rpcAddIpHash(pConn->hash, pBuf, ip, port); - } - - if ((pBuf->totalLen + dataLen > RPC_MAX_UDP_SIZE) || (taosMsgHdrSize(pBuf->msgHdr) >= RPC_MAX_UDP_PKTS)) { - taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); - - taosSendMsgHdr(pBuf->msgHdr, pConn->fd); - pBuf->totalLen = 0; - } - - taosSetMsgHdrData(pBuf->msgHdr, data, dataLen); - - pBuf->totalLen += dataLen; - - pthread_mutex_unlock(&pConn->mutex); - - return dataLen; -} - -void taosFreeMsgHdr(void *hdr) { - struct msghdr *msgHdr = (struct msghdr *)hdr; - free(msgHdr->msg_iov); -} - -int taosMsgHdrSize(void *hdr) { - struct msghdr *msgHdr = (struct msghdr *)hdr; - return (int)msgHdr->msg_iovlen; -} - -void taosSendMsgHdr(void *hdr, int fd) { - struct msghdr *msgHdr = (struct msghdr *)hdr; - sendmsg(fd, msgHdr, 0); - msgHdr->msg_iovlen = 0; -} - -void taosInitMsgHdr(void **hdr, void *dest, int maxPkts) { - struct msghdr *msgHdr = (struct msghdr *)malloc(sizeof(struct msghdr)); - memset(msgHdr, 0, sizeof(struct msghdr)); - *hdr = msgHdr; - struct sockaddr_in *destAdd = (struct sockaddr_in *)dest; - - msgHdr->msg_name = destAdd; - msgHdr->msg_namelen = sizeof(struct sockaddr_in); - int size = (int)sizeof(struct iovec) * maxPkts; - msgHdr->msg_iov = (struct iovec *)malloc((size_t)size); - memset(msgHdr->msg_iov, 0, (size_t)size); -} - -void taosSetMsgHdrData(void *hdr, char *data, int dataLen) { - struct msghdr *msgHdr = (struct msghdr *)hdr; - msgHdr->msg_iov[msgHdr->msg_iovlen].iov_base = data; - msgHdr->msg_iov[msgHdr->msg_iovlen].iov_len = (size_t)dataLen; - msgHdr->msg_iovlen++; -} - -void taosRemoveUdpBuf(SUdpBuf *pBuf) { - taosTmrStopA(&pBuf->timer); - rpcDeleteIpHash(pBuf->pConn->hash, pBuf->ip, pBuf->port); - - // tTrace("%s UDP buffer to:0x%lld:%d is removed", pBuf->pConn->label, - // pBuf->ip, pBuf->port); - - pBuf->signature = NULL; - taosFreeMsgHdr(pBuf->msgHdr); - free(pBuf); -} - -void taosProcessUdpBufTimer(void *param, void *tmrId) { - SUdpBuf *pBuf = (SUdpBuf *)param; - if (pBuf->signature != param) return; - if (pBuf->timer != tmrId) return; - - SUdpConn *pConn = pBuf->pConn; - - pthread_mutex_lock(&pConn->mutex); - - if (taosMsgHdrSize(pBuf->msgHdr) > 0) { - taosSendMsgHdr(pBuf->msgHdr, pConn->fd); - pBuf->totalLen = 0; - pBuf->emptyNum = 0; - } else { - pBuf->emptyNum++; - if (pBuf->emptyNum > 200) { - taosRemoveUdpBuf(pBuf); - pBuf = NULL; - } - } - - pthread_mutex_unlock(&pConn->mutex); - - if (pBuf) taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); -} - -static SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, uint16_t port) { - SUdpBuf *pBuf = (SUdpBuf *)malloc(sizeof(SUdpBuf)); - memset(pBuf, 0, sizeof(SUdpBuf)); - - pBuf->ip = ip; - pBuf->port = port; - pBuf->pConn = pConn; - - pBuf->destAdd.sin_family = AF_INET; - pBuf->destAdd.sin_addr.s_addr = ip; - pBuf->destAdd.sin_port = (uint16_t)htons(port); - taosInitMsgHdr(&(pBuf->msgHdr), &(pBuf->destAdd), RPC_MAX_UDP_PKTS); - pBuf->signature = pBuf; - taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); + struct sockaddr_in destAdd; + memset(&destAdd, 0, sizeof(destAdd)); + destAdd.sin_family = AF_INET; + destAdd.sin_addr.s_addr = ip; + destAdd.sin_port = htons(port); - // tTrace("%s UDP buffer to:0x%lld:%d is created", pBuf->pConn->label, - // pBuf->ip, pBuf->port); + int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd)); - return pBuf; + return ret; } - diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index fbf6e0f272ce7e5b3b01c56942f070c88015c302..8a0a9e1208b7fb45994be95200a3f2a2a0ceb4ff 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -29,23 +29,23 @@ extern "C" { extern int tsdbDebugFlag; -#define tsdbError(...) \ - if (tsdbDebugFlag & DEBUG_ERROR) { \ - taosPrintLog("ERROR TSDB ", tsdbDebugFlag, __VA_ARGS__); \ +#define tsdbError(...) \ + if (tsdbDebugFlag & DEBUG_ERROR) { \ + taosPrintLog("ERROR TDB ", tsdbDebugFlag, __VA_ARGS__); \ } -#define tsdbWarn(...) \ - if (tsdbDebugFlag & DEBUG_WARN) { \ - taosPrintLog("WARN TSDB ", tsdbDebugFlag, __VA_ARGS__); \ +#define tsdbWarn(...) \ + if (tsdbDebugFlag & DEBUG_WARN) { \ + taosPrintLog("WARN TDB ", tsdbDebugFlag, __VA_ARGS__); \ } -#define tsdbTrace(...) \ - if (tsdbDebugFlag & DEBUG_TRACE) { \ - taosPrintLog("TSDB ", tsdbDebugFlag, __VA_ARGS__); \ +#define tsdbTrace(...) \ + if (tsdbDebugFlag & DEBUG_TRACE) { \ + taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); \ } #define tsdbPrint(...) \ - { taosPrintLog("TSDB ", 255, __VA_ARGS__); } + { taosPrintLog("TDB ", 255, __VA_ARGS__); } // ------------------------------ TSDB META FILE INTERFACES ------------------------------ -#define TSDB_META_FILE_NAME "META" +#define TSDB_META_FILE_NAME "meta" #define TSDB_META_HASH_FRACTION 1.1 typedef int (*iterFunc)(void *, void *cont, int contLen); @@ -63,9 +63,9 @@ typedef struct { } SMetaFile; SMetaFile *tsdbInitMetaFile(char *rootDir, int32_t maxTables, iterFunc iFunc, afterFunc aFunc, void *appH); -int32_t tsdbInsertMetaRecord(SMetaFile *mfh, int64_t uid, void *cont, int32_t contLen); -int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, int64_t uid); -int32_t tsdbUpdateMetaRecord(SMetaFile *mfh, int64_t uid, void *cont, int32_t contLen); +int32_t tsdbInsertMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t contLen); +int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, uint64_t uid); +int32_t tsdbUpdateMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t contLen); void tsdbCloseMetaFile(SMetaFile *mfh); // ------------------------------ TSDB META INTERFACES ------------------------------ @@ -82,7 +82,7 @@ typedef struct { typedef struct STable { int8_t type; STableId tableId; - int64_t superUid; // Super table UID + uint64_t superUid; // Super table UID int32_t sversion; STSchema * schema; STSchema * tagSchema; @@ -153,7 +153,7 @@ STsdbMeta *tsdbGetMeta(TsdbRepoT *pRepo); STable *tsdbIsValidTableToInsert(STsdbMeta *pMeta, STableId tableId); // int32_t tsdbInsertRowToTableImpl(SSkipListNode *pNode, STable *pTable); -STable *tsdbGetTableByUid(STsdbMeta *pMeta, int64_t uid); +STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid); char *getTSTupleKey(const void * data); typedef struct { @@ -210,16 +210,21 @@ typedef enum { extern const char *tsdbFileSuffix[]; typedef struct { - int64_t size; // total size of the file - int64_t tombSize; // unused file size - int32_t totalBlocks; - int32_t totalSubBlocks; -} SFileInfo; + uint32_t offset; + uint32_t len; + uint64_t size; // total size of the file + uint64_t tombSize; // unused file size + uint32_t totalBlocks; + uint32_t totalSubBlocks; +} STsdbFileInfo; + +void *tsdbEncodeSFileInfo(void *buf, const STsdbFileInfo *pInfo); +void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo); typedef struct { int fd; char fname[128]; - SFileInfo info; + STsdbFileInfo info; } SFile; #define TSDB_IS_FILE_OPENED(f) ((f)->fd != -1) @@ -242,8 +247,7 @@ typedef struct { STsdbFileH *tsdbInitFileH(char *dataDir, STsdbCfg *pCfg); void tsdbCloseFileH(STsdbFileH *pFileH); -int tsdbCreateFile(char *dataDir, int fileId, const char *suffix, int maxTables, SFile *pFile, int writeHeader, - int toClose); +int tsdbCreateFile(char *dataDir, int fileId, const char *suffix, SFile *pFile); SFileGroup *tsdbCreateFGroup(STsdbFileH *pFileH, char *dataDir, int fid, int maxTables); int tsdbOpenFile(SFile *pFile, int oflag); int tsdbCloseFile(SFile *pFile); @@ -266,15 +270,18 @@ void tsdbSeekFileGroupIter(SFileGroupIter *pIter, int fid); SFileGroup *tsdbGetFileGroupNext(SFileGroupIter *pIter); typedef struct { - int32_t len; - int32_t offset; - int32_t padding; // For padding purpose - int32_t hasLast : 1; - int32_t numOfBlocks : 31; - int64_t uid; - TSKEY maxKey; + uint32_t len; + uint32_t offset; + uint32_t padding; // For padding purpose + uint32_t hasLast : 2; + uint32_t numOfBlocks : 30; + uint64_t uid; + TSKEY maxKey; } SCompIdx; /* sizeof(SCompIdx) = 28 */ +void *tsdbEncodeSCompIdx(void *buf, SCompIdx *pIdx); +void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx); + /** * if numOfSubBlocks == 0, then the SCompBlock is a sub-block * if numOfSubBlocks >= 1, then the SCompBlock is a super-block @@ -304,7 +311,7 @@ typedef struct { typedef struct { int32_t delimiter; // For recovery usage int32_t checksum; // TODO: decide if checksum logic in this file or make it one API - int64_t uid; + uint64_t uid; SCompBlock blocks[]; } SCompInfo; @@ -325,13 +332,20 @@ typedef struct { int16_t len; // Column length // TODO: int16_t is not enough int32_t type : 8; int32_t offset : 24; + int64_t sum; + int64_t max; + int64_t min; + int16_t maxIndex; + int16_t minIndex; + int16_t numOfNull; + char padding[2]; } SCompCol; // TODO: Take recover into account typedef struct { int32_t delimiter; // For recovery usage int32_t numOfCols; // For recovery usage - int64_t uid; // For recovery usage + uint64_t uid; // For recovery usage SCompCol cols[]; } SCompData; @@ -343,7 +357,7 @@ SFileGroup *tsdbSearchFGroup(STsdbFileH *pFileH, int fid); void tsdbGetKeyRangeOfFileId(int32_t daysPerFile, int8_t precision, int32_t fileId, TSKEY *minKey, TSKEY *maxKey); // TSDB repository definition -typedef struct _tsdb_repo { +typedef struct STsdbRepo { char *rootDir; // TSDB configuration STsdbCfg config; @@ -427,9 +441,9 @@ typedef struct { } SHelperFile; typedef struct { - int64_t uid; - int32_t tid; - int32_t sversion; + uint64_t uid; + int32_t tid; + int32_t sversion; } SHelperTable; typedef struct { @@ -451,7 +465,7 @@ typedef struct { SCompData *pCompData; SDataCols *pDataCols[2]; - void *blockBuffer; // Buffer to hold the whole data block + void *pBuffer; // Buffer to hold the whole data block void *compBuffer; // Buffer for temperary compress/decompress purpose } SRWHelper; @@ -498,6 +512,7 @@ void tsdbFitRetention(STsdbRepo *pRepo); int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks); void tsdbAdjustCacheBlocks(STsdbCache *pCache); int32_t tsdbGetMetaFileName(char *rootDir, char *fname); +int tsdbUpdateFileHeader(SFile *pFile, uint32_t version); #ifdef __cplusplus } diff --git a/src/tsdb/src/tsdbCache.c b/src/tsdb/src/tsdbCache.c index 08970eab3e3912a7c417e70e46e267346e97bd34..2761ed5e8eadc1f06e1c307685422c019350d35e 100644 --- a/src/tsdb/src/tsdbCache.c +++ b/src/tsdb/src/tsdbCache.c @@ -154,7 +154,7 @@ int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { for (int i = 0; i < blocksToAdd; i++) { if (tsdbAddCacheBlockToPool(pCache) < 0) { tsdbUnLockRepo((TsdbRepoT *)pRepo); - tsdbError("tsdbId %d: failed to add cache block to cache pool", pRepo->config.tsdbId); + tsdbError("tsdbId:%d, failed to add cache block to cache pool", pRepo->config.tsdbId); return -1; } } @@ -164,7 +164,7 @@ int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { } tsdbUnLockRepo((TsdbRepoT *)pRepo); - tsdbTrace("vgId: %d tsdb total cache blocks changed from %d to %d", pRepo->config.tsdbId, oldNumOfBlocks, totalBlocks); + tsdbTrace("vgId:%d, tsdb total cache blocks changed from %d to %d", pRepo->config.tsdbId, oldNumOfBlocks, totalBlocks); return 0; } diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index ad98bb7b207cc363b6bfa97e1ac46b87e3c4f43e..dcea2737fff235dbba4ea592500107c1b3daeac9 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -37,8 +37,6 @@ const char *tsdbFileSuffix[] = { static int compFGroupKey(const void *key, const void *fgroup); static int compFGroup(const void *arg1, const void *arg2); -static int tsdbWriteFileHead(SFile *pFile); -static int tsdbWriteHeadFileIdx(SFile *pFile, int maxTables); static int tsdbOpenFGroup(STsdbFileH *pFileH, char *dataDir, int fid); STsdbFileH *tsdbInitFileH(char *dataDir, STsdbCfg *pCfg) { @@ -84,11 +82,23 @@ void tsdbCloseFileH(STsdbFileH *pFileH) { } static int tsdbInitFile(char *dataDir, int fid, const char *suffix, SFile *pFile) { + uint32_t version; + char buf[512] = "\0"; + tsdbGetFileName(dataDir, fid, suffix, pFile->fname); if (access(pFile->fname, F_OK|R_OK|W_OK) < 0) return -1; pFile->fd = -1; - // TODO: recover the file info - // pFile->info = {0}; + if (tsdbOpenFile(pFile, O_RDONLY) < 0) return -1; + + if (tread(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) return -1; + if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) return -1; + + void *pBuf = buf; + pBuf = taosDecodeFixed32(pBuf, &version); + pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info)); + + tsdbCloseFile(pFile); + return 0; } @@ -121,8 +131,7 @@ SFileGroup *tsdbCreateFGroup(STsdbFileH *pFileH, char *dataDir, int fid, int max if (pGroup == NULL) { // if not exists, create one pFGroup->fileId = fid; for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) { - if (tsdbCreateFile(dataDir, fid, tsdbFileSuffix[type], maxTables, &(pFGroup->files[type]), - type == TSDB_FILE_TYPE_HEAD ? 1 : 0, 1) < 0) + if (tsdbCreateFile(dataDir, fid, tsdbFileSuffix[type], &(pFGroup->files[type])) < 0) goto _err; } @@ -286,41 +295,6 @@ static int compFGroup(const void *arg1, const void *arg2) { return ((SFileGroup *)arg1)->fileId - ((SFileGroup *)arg2)->fileId; } -static int tsdbWriteFileHead(SFile *pFile) { - char head[TSDB_FILE_HEAD_SIZE] = "\0"; - - pFile->info.size += TSDB_FILE_HEAD_SIZE; - - // TODO: write version and File statistic to the head - lseek(pFile->fd, 0, SEEK_SET); - if (write(pFile->fd, head, TSDB_FILE_HEAD_SIZE) < 0) return -1; - - return 0; -} - -static int tsdbWriteHeadFileIdx(SFile *pFile, int maxTables) { - int size = sizeof(SCompIdx) * maxTables + sizeof(TSCKSUM); - void *buf = calloc(1, size); - if (buf == NULL) return -1; - - if (lseek(pFile->fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) { - free(buf); - return -1; - } - - taosCalcChecksumAppend(0, (uint8_t *)buf, size); - - if (write(pFile->fd, buf, size) < 0) { - free(buf); - return -1; - } - - pFile->info.size += size; - - free(buf); - return 0; -} - int tsdbGetFileName(char *dataDir, int fileId, const char *suffix, char *fname) { if (dataDir == NULL || fname == NULL) return -1; @@ -354,7 +328,7 @@ SFileGroup * tsdbOpenFilesForCommit(STsdbFileH *pFileH, int fid) { return pGroup; } -int tsdbCreateFile(char *dataDir, int fileId, const char *suffix, int maxTables, SFile *pFile, int writeHeader, int toClose) { +int tsdbCreateFile(char *dataDir, int fileId, const char *suffix, SFile *pFile) { memset((void *)pFile, 0, sizeof(SFile)); pFile->fd = -1; @@ -370,19 +344,14 @@ int tsdbCreateFile(char *dataDir, int fileId, const char *suffix, int maxTables, return -1; } - if (writeHeader) { - if (tsdbWriteHeadFileIdx(pFile, maxTables) < 0) { - tsdbCloseFile(pFile); - return -1; - } - } + pFile->info.size = TSDB_FILE_HEAD_SIZE; - if (tsdbWriteFileHead(pFile) < 0) { + if (tsdbUpdateFileHeader(pFile, 0) < 0) { tsdbCloseFile(pFile); return -1; } - if (toClose) tsdbCloseFile(pFile); + tsdbCloseFile(pFile); return 0; } diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index fb25e9dcd976581731ac6e741c843908459edc45..36cb937d331fcb058596f3b3ebef98cb2f2cd717 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -18,7 +18,7 @@ int tsdbDebugFlag = 135; #define TSDB_MIN_ID 0 #define TSDB_MAX_ID INT_MAX -#define TSDB_CFG_FILE_NAME "CONFIG" +#define TSDB_CFG_FILE_NAME "config" #define TSDB_DATA_DIR_NAME "data" #define TSDB_DEFAULT_FILE_BLOCK_ROW_OPTION 0.7 #define TSDB_MAX_LAST_FILE_SIZE (1024 * 1024 * 10) // 10M @@ -29,7 +29,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); static int32_t tsdbSetRepoEnv(STsdbRepo *pRepo); static int32_t tsdbDestroyRepoEnv(STsdbRepo *pRepo); // static int tsdbOpenMetaFile(char *tsdbDir); -static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now); +static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now, int * affectedrows); static int32_t tsdbRestoreCfg(STsdbRepo *pRepo, STsdbCfg *pCfg); static int32_t tsdbGetDataDirName(STsdbRepo *pRepo, char *fname); static void * tsdbCommitData(void *arg); @@ -91,7 +91,7 @@ void tsdbFreeCfg(STsdbCfg *pCfg) { int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg, void *limiter /* TODO */) { if (mkdir(rootDir, 0755) != 0) { - tsdbError("id %d: failed to create rootDir! rootDir %s, reason %s", pCfg->tsdbId, rootDir, strerror(errno)); + tsdbError("vgId:%d, failed to create rootDir! rootDir:%s, reason:%s", pCfg->tsdbId, rootDir, strerror(errno)); if (errno == EACCES) { return TSDB_CODE_NO_DISK_PERMISSIONS; } else if (errno == ENOSPC) { @@ -150,7 +150,7 @@ int32_t tsdbDropRepo(TsdbRepoT *repo) { free(pRepo->rootDir); free(pRepo); - tsdbTrace("vgId: %d tsdb repository is dropped!", id); + tsdbTrace("vgId:%d, tsdb repository is dropped!", id); return 0; } @@ -169,6 +169,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) { if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err; for (int i = 1; i < pRepo->config.maxTables; i++) { STable * pTable = pMeta->tables[i]; + if (pTable == NULL) continue; SCompIdx *pIdx = &rhelper.pCompIdx[i]; if (pIdx->offset > 0 && pTable->lastKey < pIdx->maxKey) pTable->lastKey = pIdx->maxKey; @@ -242,7 +243,7 @@ TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH) { pRepo->state = TSDB_REPO_STATE_ACTIVE; - tsdbTrace("vgId: %d open tsdb repository successfully!", pRepo->config.tsdbId); + tsdbTrace("vgId:%d, open tsdb repository successfully!", pRepo->config.tsdbId); return (TsdbRepoT *)pRepo; } @@ -258,7 +259,7 @@ TsdbRepoT *tsdbOpenRepo(char *tsdbDir, STsdbAppH *pAppH) { * * @return 0 for success, -1 for failure and the error number is set */ -int32_t tsdbCloseRepo(TsdbRepoT *repo) { +int32_t tsdbCloseRepo(TsdbRepoT *repo, int toCommit) { STsdbRepo *pRepo = (STsdbRepo *)repo; if (pRepo == NULL) return 0; int id = pRepo->config.tsdbId; @@ -285,7 +286,7 @@ int32_t tsdbCloseRepo(TsdbRepoT *repo) { tsdbUnLockRepo(repo); if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START); - tsdbCommitData((void *)repo); + if (toCommit) tsdbCommitData((void *)repo); tsdbCloseFileH(pRepo->tsdbFileH); @@ -296,7 +297,7 @@ int32_t tsdbCloseRepo(TsdbRepoT *repo) { tfree(pRepo->rootDir); tfree(pRepo); - tsdbTrace("vgId: %d repository is closed!", id); + tsdbTrace("vgId:%d, repository is closed!", id); return 0; } @@ -358,7 +359,7 @@ int32_t tsdbTriggerCommit(TsdbRepoT *repo) { pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); pthread_create(&(pRepo->commitThread), &thattr, tsdbCommitData, (void *)repo); - tsdbTrace("vgId: %d start to commit!", pRepo->config.tsdbId); + tsdbTrace("vgId:%d, start to commit!", pRepo->config.tsdbId); return 0; } @@ -391,7 +392,7 @@ int tsdbAlterTable(TsdbRepoT *pRepo, STableCfg *pCfg) { return 0; } -TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, int64_t uid) { +TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, uint64_t uid) { STsdbRepo *pRepo = (STsdbRepo *)repo; STable *pTable = tsdbGetTableByUid(pRepo->tsdbMeta, uid); @@ -406,29 +407,30 @@ STableInfo *tsdbGetTableInfo(TsdbRepoT *pRepo, STableId tableId) { } // TODO: need to return the number of data inserted -int32_t tsdbInsertData(TsdbRepoT *repo, SSubmitMsg *pMsg) { +int32_t tsdbInsertData(TsdbRepoT *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg * pRsp) { SSubmitMsgIter msgIter; STsdbRepo *pRepo = (STsdbRepo *)repo; tsdbInitSubmitMsgIter(pMsg, &msgIter); SSubmitBlk *pBlock = NULL; int32_t code = TSDB_CODE_SUCCESS; + int32_t affectedrows = 0; TSKEY now = taosGetTimestamp(pRepo->config.precision); while ((pBlock = tsdbGetSubmitMsgNext(&msgIter)) != NULL) { - if ((code = tsdbInsertDataToTable(repo, pBlock, now)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbInsertDataToTable(repo, pBlock, now, &affectedrows)) != TSDB_CODE_SUCCESS) { return code; } } - + pRsp->affectedRows = htonl(affectedrows); return code; } /** * Initialize a table configuration */ -int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t tid) { +int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid) { if (config == NULL) return -1; if (type != TSDB_NORMAL_TABLE && type != TSDB_CHILD_TABLE) return -1; @@ -445,7 +447,7 @@ int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t ti /** * Set the super table UID of the created table */ -int tsdbTableSetSuperUid(STableCfg *config, int64_t uid) { +int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid) { if (config->type != TSDB_CHILD_TABLE) return -1; if (uid == TSDB_INVALID_SUPER_TABLE_ID) return -1; @@ -611,7 +613,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { pCfg->precision = TSDB_DEFAULT_PRECISION; } else { if (!IS_VALID_PRECISION(pCfg->precision)) { - tsdbError("id %d: invalid precision configuration! precision %d", pCfg->tsdbId, pCfg->precision); + tsdbError("vgId:%d, invalid precision configuration! precision:%d", pCfg->tsdbId, pCfg->precision); return -1; } } @@ -621,7 +623,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { pCfg->compression = TSDB_DEFAULT_COMPRESSION; } else { if (!IS_VALID_COMPRESSION(pCfg->compression)) { - tsdbError("id %d: invalid compression configuration! compression %d", pCfg->tsdbId, pCfg->precision); + tsdbError("vgId:%d: invalid compression configuration! compression:%d", pCfg->tsdbId, pCfg->precision); return -1; } } @@ -634,7 +636,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { pCfg->maxTables = TSDB_DEFAULT_TABLES; } else { if (pCfg->maxTables < TSDB_MIN_TABLES || pCfg->maxTables > TSDB_MAX_TABLES) { - tsdbError("id %d: invalid maxTables configuration! maxTables %d TSDB_MIN_TABLES %d TSDB_MAX_TABLES %d", + tsdbError("vgId:%d: invalid maxTables configuration! maxTables:%d TSDB_MIN_TABLES:%d TSDB_MAX_TABLES:%d", pCfg->tsdbId, pCfg->maxTables, TSDB_MIN_TABLES, TSDB_MAX_TABLES); return -1; } @@ -646,7 +648,7 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } else { if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) { tsdbError( - "id %d: invalid daysPerFile configuration! daysPerFile %d TSDB_MIN_DAYS_PER_FILE %d TSDB_MAX_DAYS_PER_FILE " + "vgId:%d, invalid daysPerFile configuration! daysPerFile:%d TSDB_MIN_DAYS_PER_FILE:%d TSDB_MAX_DAYS_PER_FILE:" "%d", pCfg->tsdbId, pCfg->daysPerFile, TSDB_MIN_DAYS_PER_FILE, TSDB_MAX_DAYS_PER_FILE); return -1; @@ -659,8 +661,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } else { if (pCfg->minRowsPerFileBlock < TSDB_MIN_MIN_ROW_FBLOCK || pCfg->minRowsPerFileBlock > TSDB_MAX_MIN_ROW_FBLOCK) { tsdbError( - "id %d: invalid minRowsPerFileBlock configuration! minRowsPerFileBlock %d TSDB_MIN_MIN_ROW_FBLOCK %d " - "TSDB_MAX_MIN_ROW_FBLOCK %d", + "vgId:%d, invalid minRowsPerFileBlock configuration! minRowsPerFileBlock:%d TSDB_MIN_MIN_ROW_FBLOCK:%d " + "TSDB_MAX_MIN_ROW_FBLOCK:%d", pCfg->tsdbId, pCfg->minRowsPerFileBlock, TSDB_MIN_MIN_ROW_FBLOCK, TSDB_MAX_MIN_ROW_FBLOCK); return -1; } @@ -671,8 +673,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } else { if (pCfg->maxRowsPerFileBlock < TSDB_MIN_MAX_ROW_FBLOCK || pCfg->maxRowsPerFileBlock > TSDB_MAX_MAX_ROW_FBLOCK) { tsdbError( - "id %d: invalid maxRowsPerFileBlock configuration! maxRowsPerFileBlock %d TSDB_MIN_MAX_ROW_FBLOCK %d " - "TSDB_MAX_MAX_ROW_FBLOCK %d", + "vgId:%d, invalid maxRowsPerFileBlock configuration! maxRowsPerFileBlock:%d TSDB_MIN_MAX_ROW_FBLOCK:%d " + "TSDB_MAX_MAX_ROW_FBLOCK:%d", pCfg->tsdbId, pCfg->maxRowsPerFileBlock, TSDB_MIN_MIN_ROW_FBLOCK, TSDB_MAX_MIN_ROW_FBLOCK); return -1; } @@ -686,8 +688,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } else { if (pCfg->keep < TSDB_MIN_KEEP || pCfg->keep > TSDB_MAX_KEEP) { tsdbError( - "id %d: invalid keep configuration! keep %d TSDB_MIN_KEEP %d " - "TSDB_MAX_KEEP %d", + "vgId:%d, invalid keep configuration! keep:%d TSDB_MIN_KEEP:%d " + "TSDB_MAX_KEEP:%d", pCfg->tsdbId, pCfg->keep, TSDB_MIN_KEEP, TSDB_MAX_KEEP); return -1; } @@ -754,13 +756,13 @@ static int32_t tsdbSetRepoEnv(STsdbRepo *pRepo) { if (tsdbGetDataDirName(pRepo, dirName) < 0) return -1; if (mkdir(dirName, 0755) < 0) { - tsdbError("vgId: %d failed to create repository directory! reason %s", pRepo->config.tsdbId, strerror(errno)); + tsdbError("vgId:%d, failed to create repository directory! reason:%s", pRepo->config.tsdbId, strerror(errno)); return -1; } tsdbTrace( - "vgId: %d set up tsdb environment succeed! cacheBlockSize %d, totalBlocks %d, maxTables %d, daysPerFile %d, keep " - "%d, minRowsPerFileBlock %d, maxRowsPerFileBlock %d, precision %d, compression%d", + "vgId:%d, set up tsdb environment succeed! cacheBlockSize:%d, totalBlocks:%d, maxTables:%d, daysPerFile:%d, keep:" + "%d, minRowsPerFileBlock:%d, maxRowsPerFileBlock:%d, precision:%d, compression:%d", pRepo->config.tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->maxTables, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression); return 0; @@ -840,19 +842,19 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable pTable->mem->numOfPoints = tSkipListGetSize(pTable->mem->pData); - tsdbTrace("vgId: %d, tid: %d, uid: " PRId64 "a row is inserted to table! key" PRId64, - pRepo->config.tsdbId, pTable->tableId.tid, pTable->tableId.uid, dataRowKey(row)); + tsdbTrace("vgId:%d, tid:%d, uid:%" PRId64 ", table:%s a row is inserted to table! key:%" PRId64, pRepo->config.tsdbId, + pTable->tableId.tid, pTable->tableId.uid, varDataVal(pTable->name), dataRowKey(row)); return 0; } -static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now) { +static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) { STsdbRepo *pRepo = (STsdbRepo *)repo; STableId tableId = {.uid = pBlock->uid, .tid = pBlock->tid}; STable *pTable = tsdbIsValidTableToInsert(pRepo->tsdbMeta, tableId); if (pTable == NULL) { - tsdbError("id %d: failed to get table for insert, uid:%" PRIu64 ", tid:%d", pRepo->config.tsdbId, pBlock->uid, + tsdbError("vgId:%d, failed to get table for insert, uid:" PRIu64 ", tid:%d", pRepo->config.tsdbId, pBlock->uid, pBlock->tid); return TSDB_CODE_INVALID_TABLE_ID; } @@ -866,15 +868,16 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY tsdbInitSubmitBlkIter(pBlock, &blkIter); while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { - tsdbError( - "tsdbId: %d, table tid: %d, talbe uid: %ld timestamp is out of range. now: %ld maxKey: %ld, minKey: %ld", - pRepo->config.tsdbId, pTable->tableId.tid, pTable->tableId.uid, now, minKey, maxKey); + tsdbError("vgId:%d, table:%s, tid:%d, talbe uid:%ld timestamp is out of range. now:" PRId64 ", maxKey:" PRId64 + ", minKey:" PRId64, + pRepo->config.tsdbId, varDataVal(pTable->name), pTable->tableId.tid, pTable->tableId.uid, now, minKey, maxKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } if (tdInsertRowToTable(pRepo, row, pTable) < 0) { return -1; } + (*affectedrows)++; } return TSDB_CODE_SUCCESS; @@ -1018,10 +1021,16 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters // Create and open files for commit tsdbGetDataDirName(pRepo, dataDir); - if ((pGroup = tsdbCreateFGroup(pFileH, dataDir, fid, pCfg->maxTables)) == NULL) goto _err; + if ((pGroup = tsdbCreateFGroup(pFileH, dataDir, fid, pCfg->maxTables)) == NULL) { + tsdbError("vgId:%d, failed to create file group %d", pRepo->config.tsdbId, fid); + goto _err; + } // Open files for write/read - if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) goto _err; + if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { + tsdbError("vgId:%d, failed to set helper file", pRepo->config.tsdbId); + goto _err; + } // Loop to commit data in each table for (int tid = 1; tid < pCfg->maxTables; tid++) { @@ -1058,13 +1067,22 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters ASSERT(pDataCols->numOfPoints == 0); // Move the last block to the new .l file if neccessary - if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) goto _err; + if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { + tsdbError("vgId:%d, failed to move last block", pRepo->config.tsdbId); + goto _err; + } // Write the SCompBlock part - if (tsdbWriteCompInfo(pHelper) < 0) goto _err; + if (tsdbWriteCompInfo(pHelper) < 0) { + tsdbError("vgId:%d, failed to write compInfo part", pRepo->config.tsdbId); + goto _err; + } } - if (tsdbWriteCompIdx(pHelper) < 0) goto _err; + if (tsdbWriteCompIdx(pHelper) < 0) { + tsdbError("vgId:%d, failed to write compIdx part", pRepo->config.tsdbId); + goto _err; + } tsdbCloseHelperFile(pHelper, 0); // TODO: make it atomic with some methods @@ -1109,7 +1127,7 @@ static int tsdbHasDataToCommit(SSkipListIterator **iters, int nIters, TSKEY minK static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) { int8_t oldCompRession = pRepo->config.compression; pRepo->config.compression = compression; - tsdbTrace("vgId: %d tsdb compression is changed from %d to %d", oldCompRession, compression); + tsdbTrace("vgId:%d, tsdb compression is changed from %d to %d", oldCompRession, compression); } static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) { @@ -1126,13 +1144,21 @@ static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) { } pRepo->tsdbFileH->maxFGroups = maxFiles; } - tsdbTrace("vgId: %d keep is changed from %d to %d", pRepo->config.tsdbId, oldKeep, keep); + tsdbTrace("vgId:%d, keep is changed from %d to %d", pRepo->config.tsdbId, oldKeep, keep); } static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) { - // TODO int oldMaxTables = pRepo->config.maxTables; - tsdbTrace("vgId: %d tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables); + if (oldMaxTables < pRepo->config.maxTables) { + // TODO + } + + STsdbMeta *pMeta = pRepo->tsdbMeta; + + pMeta->maxTables = maxTables; + pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *)); + + tsdbTrace("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables); } uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, int32_t *size) { diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index d5ba605e1a359261ef20e3f49e4ff22127798abc..55162314833f0e018401df593b9f7d3e55f78ad2 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -6,7 +6,7 @@ #include "tsdbMain.h" #define TSDB_SUPER_TABLE_SL_LEVEL 5 // TODO: may change here -#define TSDB_META_FILE_NAME "META" +// #define TSDB_META_FILE_NAME "META" const int32_t DEFAULT_TAG_INDEX_COLUMN = 0; // skip list built based on the first column of tags @@ -80,16 +80,16 @@ STable *tsdbDecodeTable(void *cont, int contLen) { T_READ_MEMBER(ptr, int8_t, pTable->type); int len = *(int *)ptr; ptr = (char *)ptr + sizeof(int); - pTable->name = calloc(1, len + VARSTR_HEADER_SIZE); + pTable->name = calloc(1, len + VARSTR_HEADER_SIZE + 1); if (pTable->name == NULL) return NULL; varDataSetLen(pTable->name, len); memcpy(pTable->name->data, ptr, len); ptr = (char *)ptr + len; - T_READ_MEMBER(ptr, int64_t, pTable->tableId.uid); + T_READ_MEMBER(ptr, uint64_t, pTable->tableId.uid); T_READ_MEMBER(ptr, int32_t, pTable->tableId.tid); - T_READ_MEMBER(ptr, int64_t, pTable->superUid); + T_READ_MEMBER(ptr, uint64_t, pTable->superUid); T_READ_MEMBER(ptr, int32_t, pTable->sversion); if (pTable->type == TSDB_SUPER_TABLE) { @@ -154,7 +154,6 @@ STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables) { STsdbMeta *pMeta = (STsdbMeta *)malloc(sizeof(STsdbMeta)); if (pMeta == NULL) return NULL; - pMeta->maxTables = maxTables; pMeta->nTables = 0; pMeta->superList = NULL; pMeta->tables = (STable **)calloc(maxTables, sizeof(STable *)); @@ -310,7 +309,7 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) { // todo refactor extract method size_t size = strnlen(pCfg->sname, TSDB_TABLE_NAME_LEN); - super->name = malloc(size + VARSTR_HEADER_SIZE); + super->name = calloc(1, size + VARSTR_HEADER_SIZE + 1); STR_WITH_SIZE_TO_VARSTR(super->name, pCfg->sname, size); // index the first tag column @@ -339,7 +338,7 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) { table->tableId = pCfg->tableId; size_t size = strnlen(pCfg->name, TSDB_TABLE_NAME_LEN); - table->name = malloc(size + VARSTR_HEADER_SIZE); + table->name = calloc(1, size + VARSTR_HEADER_SIZE + 1); STR_WITH_SIZE_TO_VARSTR(table->name, pCfg->name, size); table->lastKey = 0; @@ -356,12 +355,12 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) { // Register to meta if (newSuper) { tsdbAddTableToMeta(pMeta, super, true); - tsdbTrace("vgId: %d super table is created! uid " PRId64, pRepo->config.tsdbId, + tsdbTrace("vgId:%d, super table %s is created! uid:%" PRId64, pRepo->config.tsdbId, varDataVal(super->name), super->tableId.uid); } tsdbAddTableToMeta(pMeta, table, true); - tsdbTrace("vgId: %d table is created! tid %d, uid " PRId64, pRepo->config.tsdbId, table->tableId.tid, - table->tableId.uid); + tsdbTrace("vgId:%d, table %s is created! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, varDataVal(table->name), + table->tableId.tid, table->tableId.uid); // Write to meta file int bufLen = 0; @@ -404,12 +403,13 @@ int tsdbDropTable(TsdbRepoT *repo, STableId tableId) { STable *pTable = tsdbGetTableByUid(pMeta, tableId.uid); if (pTable == NULL) { - tsdbError("vgId %d: failed to drop table since table not exists! tid %d, uid " PRId64, pRepo->config.tsdbId, + tsdbError("vgId:%d, failed to drop table since table not exists! tid:%d, uid:" PRId64, pRepo->config.tsdbId, tableId.tid, tableId.uid); return -1; } - tsdbTrace("vgId: %d table is dropped! tid %d, uid " PRId64, pRepo->config.tsdbId, tableId.tid, tableId.uid); + tsdbTrace("vgId:%d, table %s is dropped! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name), + tableId.tid, tableId.uid); if (tsdbRemoveTableFromMeta(pMeta, pTable) < 0) return -1; return 0; @@ -455,7 +455,7 @@ static int32_t tsdbCheckTableCfg(STableCfg *pCfg) { return 0; } -STable *tsdbGetTableByUid(STsdbMeta *pMeta, int64_t uid) { +STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { void *ptr = taosHashGet(pMeta->map, (char *)(&uid), sizeof(uid)); if (ptr == NULL) return NULL; @@ -508,10 +508,7 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable) { ASSERT(tTable != NULL && tTable->type == TSDB_CHILD_TABLE); - pMeta->tables[tTable->tableId.tid] = NULL; - taosHashRemove(pMeta->map, (char *)(&(pTable->tableId.uid)), sizeof(pTable->tableId.uid)); - pMeta->nTables--; - tsdbFreeTable(tTable); + tsdbRemoveTableFromMeta(pMeta, tTable); } tSkipListDestroyIter(pIter); @@ -534,8 +531,8 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable) { pMeta->nTables--; } - tsdbFreeTable(pTable); taosHashRemove(pMeta->map, (char *)(&(pTable->tableId.uid)), sizeof(pTable->tableId.uid)); + tsdbFreeTable(pTable); return 0; } diff --git a/src/tsdb/src/tsdbMetaFile.c b/src/tsdb/src/tsdbMetaFile.c index f9d10ec57983d36731d4612460c9ac28e579f17b..19fcae94e367816d01aa3cb29dd2f05890c3769d 100644 --- a/src/tsdb/src/tsdbMetaFile.c +++ b/src/tsdb/src/tsdbMetaFile.c @@ -23,9 +23,9 @@ #define TSDB_META_FILE_HEADER_SIZE 512 typedef struct { - int32_t offset; - int32_t size; - int64_t uid; + int32_t offset; + int32_t size; + uint64_t uid; } SRecordInfo; // static int32_t tsdbGetMetaFileName(char *rootDir, char *fname); @@ -76,7 +76,7 @@ SMetaFile *tsdbInitMetaFile(char *rootDir, int32_t maxTables, iterFunc iFunc, af return mfh; } -int32_t tsdbInsertMetaRecord(SMetaFile *mfh, int64_t uid, void *cont, int32_t contLen) { +int32_t tsdbInsertMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t contLen) { if (taosHashGet(mfh->map, (char *)(&uid), sizeof(uid)) != NULL) { return -1; } @@ -112,7 +112,7 @@ int32_t tsdbInsertMetaRecord(SMetaFile *mfh, int64_t uid, void *cont, int32_t co return 0; } -int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, int64_t uid) { +int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, uint64_t uid) { char *ptr = taosHashGet(mfh->map, (char *)(&uid), sizeof(uid)); if (ptr == NULL) return -1; @@ -139,7 +139,7 @@ int32_t tsdbDeleteMetaRecord(SMetaFile *mfh, int64_t uid) { return 0; } -int32_t tsdbUpdateMetaRecord(SMetaFile *mfh, int64_t uid, void *cont, int32_t contLen) { +int32_t tsdbUpdateMetaRecord(SMetaFile *mfh, uint64_t uid, void *cont, int32_t contLen) { char *ptr = taosHashGet(mfh->map, (char *)(&uid), sizeof(uid)); if (ptr == NULL) return -1; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index e32a64629678a4d16a54885ccb9184a09a50a15a..c5ef00c2332b071e03133c3f1cd21aecccf93365 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -18,6 +18,7 @@ #include "tchecksum.h" #include "tscompression.h" #include "talgo.h" +#include "tcoding.h" // Local function definitions // static int tsdbCheckHelperCfg(SHelperCfg *pCfg); @@ -131,10 +132,10 @@ static int tsdbInitHelper(SRWHelper *pHelper, STsdbRepo *pRepo, tsdb_rw_helper_t // Init block part if (tsdbInitHelperBlock(pHelper) < 0) goto _err; - pHelper->blockBuffer = + pHelper->pBuffer = tmalloc(sizeof(SCompData) + (sizeof(SCompCol) + sizeof(TSCKSUM) + COMP_OVERFLOW_BYTES) * pHelper->config.maxCols + pHelper->config.maxRowSize * pHelper->config.maxRowsPerFileBlock + sizeof(TSCKSUM)); - if (pHelper->blockBuffer == NULL) goto _err; + if (pHelper->pBuffer == NULL) goto _err; return 0; @@ -154,7 +155,7 @@ int tsdbInitWriteHelper(SRWHelper *pHelper, STsdbRepo *pRepo) { void tsdbDestroyHelper(SRWHelper *pHelper) { if (pHelper) { - tzfree(pHelper->blockBuffer); + tzfree(pHelper->pBuffer); tzfree(pHelper->compBuffer); tsdbDestroyHelperFile(pHelper); tsdbDestroyHelperTable(pHelper); @@ -211,13 +212,15 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { // Create and open .h if (tsdbOpenFile(&(pHelper->files.nHeadF), O_WRONLY | O_CREAT) < 0) return -1; - size_t tsize = TSDB_FILE_HEAD_SIZE + sizeof(SCompIdx) * pHelper->config.maxTables + sizeof(TSCKSUM); - if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, tsize) < tsize) goto _err; + // size_t tsize = TSDB_FILE_HEAD_SIZE + sizeof(SCompIdx) * pHelper->config.maxTables + sizeof(TSCKSUM); + if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) + goto _err; // Create and open .l file if should if (tsdbShouldCreateNewLast(pHelper)) { if (tsdbOpenFile(&(pHelper->files.nLastF), O_WRONLY | O_CREAT) < 0) goto _err; - if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) goto _err; + if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) + goto _err; } } else { if (tsdbOpenFile(&(pHelper->files.dataF), O_RDONLY) < 0) goto _err; @@ -238,6 +241,7 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { pHelper->files.headF.fd = -1; } if (pHelper->files.dataF.fd > 0) { + if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.dataF), 0); close(pHelper->files.dataF.fd); pHelper->files.dataF.fd = -1; } @@ -246,6 +250,7 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { pHelper->files.lastF.fd = -1; } if (pHelper->files.nHeadF.fd > 0) { + if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nHeadF), 0); close(pHelper->files.nHeadF.fd); pHelper->files.nHeadF.fd = -1; if (hasError) { @@ -257,6 +262,7 @@ int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) { } if (pHelper->files.nLastF.fd > 0) { + if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nLastF), 0); close(pHelper->files.nLastF.fd); pHelper->files.nLastF.fd = -1; if (hasError) { @@ -416,7 +422,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END); pIdx->uid = pHelper->tableInfo.uid; if (pIdx->offset < 0) return -1; - ASSERT(pIdx->offset >= tsizeof(pHelper->pCompIdx)); + ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE); if (twrite(pHelper->files.nHeadF.fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) return -1; } @@ -426,13 +432,27 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { int tsdbWriteCompIdx(SRWHelper *pHelper) { ASSERT(TSDB_HELPER_TYPE(pHelper) == TSDB_WRITE_HELPER); - if (lseek(pHelper->files.nHeadF.fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) return -1; + off_t offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END); + if (offset < 0) return -1; + + SFile *pFile = &(pHelper->files.nHeadF); + pFile->info.offset = offset; + + // TODO: change the implementation of pHelper->pBuffer + void *buf = pHelper->pBuffer; + for (uint32_t i = 0; i < pHelper->config.maxTables; i++) { + SCompIdx *pCompIdx = pHelper->pCompIdx + i; + if (pCompIdx->offset > 0) { + buf = taosEncodeVariant32(buf, i); + buf = tsdbEncodeSCompIdx(buf, pCompIdx); + } + } - ASSERT(tsizeof(pHelper->pCompIdx) == sizeof(SCompIdx) * pHelper->config.maxTables + sizeof(TSCKSUM)); - taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompIdx, tsizeof(pHelper->pCompIdx)); + int tsize = (char *)buf - (char *)pHelper->pBuffer + sizeof(TSCKSUM); + taosCalcChecksumAppend(0, (uint8_t *)pHelper->pBuffer, tsize); - if (twrite(pHelper->files.nHeadF.fd, (void *)pHelper->pCompIdx, tsizeof(pHelper->pCompIdx)) < tsizeof(pHelper->pCompIdx)) - return -1; + if (twrite(pHelper->files.nHeadF.fd, (void *)pHelper->pBuffer, tsize) < tsize) return -1; + pFile->info.len = tsize; return 0; } @@ -441,14 +461,36 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) { if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) { // If not load from file, just load it in object - int fd = pHelper->files.headF.fd; + SFile *pFile = &(pHelper->files.headF); + int fd = pFile->fd; + + memset(pHelper->pCompIdx, 0, tsizeof(pHelper->pCompIdx)); + if (pFile->info.offset > 0) { + ASSERT(pFile->info.offset > TSDB_FILE_HEAD_SIZE); + + if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) return -1; + if (tread(fd, (void *)(pHelper->pBuffer), pFile->info.len) < pFile->info.len) + return -1; + if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pBuffer), pFile->info.len)) { + // TODO: File is broken, try to deal with it + return -1; + } - if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) return -1; - if (tread(fd, (void *)(pHelper->pCompIdx), tsizeof((void *)pHelper->pCompIdx)) < tsizeof(pHelper->pCompIdx)) return -1; - if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pCompIdx), tsizeof((void *)pHelper->pCompIdx))) { - // TODO: File is broken, try to deal with it - return -1; + // Decode it + void *ptr = pHelper->pBuffer; + while (((char *)ptr - (char *)pHelper->pBuffer) < (pFile->info.len - sizeof(TSCKSUM))) { + uint32_t tid = 0; + if ((ptr = taosDecodeVariant32(ptr, &tid)) == NULL) return -1; + ASSERT(tid > 0 && tid < pHelper->config.maxTables); + + if ((ptr = tsdbDecodeSCompIdx(ptr, pHelper->pCompIdx + tid)) == NULL) return -1; + + ASSERT((char *)ptr - (char *)pHelper->pBuffer <= pFile->info.len - sizeof(TSCKSUM)); + } + + ASSERT(((char *)ptr - (char *)pHelper->pBuffer) == (pFile->info.len - sizeof(TSCKSUM))); } + } helperSetState(pHelper, TSDB_HELPER_IDX_LOAD); @@ -590,9 +632,9 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32 static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols) { ASSERT(pCompBlock->numOfSubBlocks <= 1); - ASSERT(tsizeof(pHelper->blockBuffer) >= pCompBlock->len); + ASSERT(tsizeof(pHelper->pBuffer) >= pCompBlock->len); - SCompData *pCompData = (SCompData *)pHelper->blockBuffer; + SCompData *pCompData = (SCompData *)pHelper->pBuffer; int fd = (pCompBlock->last) ? pHelper->files.lastF.fd : pHelper->files.dataF.fd; if (lseek(fd, pCompBlock->offset, SEEK_SET) < 0) goto _err; @@ -685,7 +727,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa ASSERT(rowsToWrite > 0 && rowsToWrite <= pDataCols->numOfPoints && rowsToWrite <= pHelper->config.maxRowsPerFileBlock); - SCompData *pCompData = (SCompData *)(pHelper->blockBuffer); + SCompData *pCompData = (SCompData *)(pHelper->pBuffer); int64_t offset = 0; offset = lseek(pFile->fd, 0, SEEK_END); @@ -701,8 +743,15 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa continue; } + memset(pCompCol, 0, sizeof(*pCompCol)); + pCompCol->colId = pDataCol->colId; pCompCol->type = pDataCol->type; + if (tDataTypeDesc[pDataCol->type].getStatisFunc) { + (*tDataTypeDesc[pDataCol->type].getStatisFunc)( + (TSKEY *)(pDataCols->cols[0].pData), pDataCol->pData, rowsToWrite, &(pCompCol->min), &(pCompCol->max), + &(pCompCol->sum), &(pCompCol->minIndex), &(pCompCol->maxIndex), &(pCompCol->numOfNull)); + } nColsNotAllNull++; } @@ -733,7 +782,7 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa } pCompCol->len = (*(tDataTypeDesc[pDataCol->type].compFunc))( - (char *)pDataCol->pData, tlen, rowsToWrite, tptr, tsizeof(pHelper->blockBuffer) - lsize, + (char *)pDataCol->pData, tlen, rowsToWrite, tptr, tsizeof(pHelper->pBuffer) - lsize, pHelper->config.compress, pHelper->compBuffer, tsizeof(pHelper->compBuffer)); } else { pCompCol->len = tlen; @@ -1161,4 +1210,73 @@ static int tsdbGetRowsInRange(SDataCols *pDataCols, TSKEY minKey, TSKEY maxKey) if ((TSKEY *)ptr2 - (TSKEY *)ptr1 < 0) return 0; return ((TSKEY *)ptr2 - (TSKEY *)ptr1) + 1; +} + +void *tsdbEncodeSCompIdx(void *buf, SCompIdx *pIdx) { + buf = taosEncodeVariant32(buf, pIdx->len); + buf = taosEncodeVariant32(buf, pIdx->offset); + buf = taosEncodeFixed8(buf, pIdx->hasLast); + buf = taosEncodeVariant32(buf, pIdx->numOfBlocks); + buf = taosEncodeFixed64(buf, pIdx->uid); + buf = taosEncodeFixed64(buf, pIdx->maxKey); + + return buf; +} + +void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) { + uint8_t hasLast = 0; + uint32_t numOfBlocks = 0; + uint64_t value = 0; + + if ((buf = taosDecodeVariant32(buf, &(pIdx->len))) == NULL) return NULL; + if ((buf = taosDecodeVariant32(buf, &(pIdx->offset))) == NULL) return NULL; + if ((buf = taosDecodeFixed8(buf, &(hasLast))) == NULL) return NULL; + pIdx->hasLast = hasLast; + if ((buf = taosDecodeVariant32(buf, &(numOfBlocks))) == NULL) return NULL; + pIdx->numOfBlocks = numOfBlocks; + if ((buf = taosDecodeFixed64(buf, &value)) == NULL) return NULL; + pIdx->uid = (int64_t)value; + if ((buf = taosDecodeFixed64(buf, &value)) == NULL) return NULL; + pIdx->maxKey = (TSKEY)value; + + return buf; +} + +int tsdbUpdateFileHeader(SFile *pFile, uint32_t version) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + + void *pBuf = (void *)buf; + pBuf = taosEncodeFixed32(pBuf, version); + pBuf = tsdbEncodeSFileInfo(pBuf, &(pFile->info)); + + taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); + + if (lseek(pFile->fd, 0, SEEK_SET) < 0) return -1; + if (twrite(pFile->fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) return -1; + + return 0; +} + + + +void *tsdbEncodeSFileInfo(void *buf, const STsdbFileInfo *pInfo) { + buf = taosEncodeFixed32(buf, pInfo->offset); + buf = taosEncodeFixed32(buf, pInfo->len); + buf = taosEncodeFixed64(buf, pInfo->size); + buf = taosEncodeFixed64(buf, pInfo->tombSize); + buf = taosEncodeFixed32(buf, pInfo->totalBlocks); + buf = taosEncodeFixed32(buf, pInfo->totalSubBlocks); + + return buf; +} + +void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo) { + buf = taosDecodeFixed32(buf, &(pInfo->offset)); + buf = taosDecodeFixed32(buf, &(pInfo->len)); + buf = taosDecodeFixed64(buf, &(pInfo->size)); + buf = taosDecodeFixed64(buf, &(pInfo->tombSize)); + buf = taosDecodeFixed32(buf, &(pInfo->totalBlocks)); + buf = taosDecodeFixed32(buf, &(pInfo->totalSubBlocks)); + + return buf; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index a5cba70219a542963cc92b7e276866a8f1461c53..d6a070589dc170fefd463409505932a921022b6e 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1516,8 +1516,9 @@ static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) return TSDB_CODE_SUCCESS; } -int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, int64_t uid, const char *pTagCond, size_t len, int16_t tagNameRelType, - const char* tbnameCond, STableGroupInfo *pGroupInfo, SColIndex *pColIndex, int32_t numOfCols) { +int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTagCond, size_t len, + int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, + SColIndex* pColIndex, int32_t numOfCols) { STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); if (pTable == NULL) { uError("%p failed to get stable, uid:%" PRIu64, tsdb, uid); @@ -1589,7 +1590,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, int64_t uid, const char *pTagC return ret; } -int32_t tsdbGetOneTableGroup(TsdbRepoT* tsdb, int64_t uid, STableGroupInfo* pGroupInfo) { +int32_t tsdbGetOneTableGroup(TsdbRepoT* tsdb, uint64_t uid, STableGroupInfo* pGroupInfo) { STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); if (pTable == NULL) { return TSDB_CODE_INVALID_TABLE_ID; diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index c7ed6fcae11eb3c86c4728c4fd980a10638c16f7..e26caa4ae7bd28841bdcb5fd27bee1dd4cc53744 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -16,7 +16,7 @@ typedef struct { TsdbRepoT *pRepo; bool isAscend; int tid; - int64_t uid; + uint64_t uid; int sversion; TSKEY startTime; TSKEY interval; diff --git a/src/util/inc/tcoding.h b/src/util/inc/tcoding.h index 9f64f127e19c658d6770a052bf4791e4015355d5..b4f7f596c56ff6bde8242881d1f8e06a7264986a 100644 --- a/src/util/inc/tcoding.h +++ b/src/util/inc/tcoding.h @@ -29,6 +29,11 @@ extern "C" { static const int32_t TNUMBER = 1; #define IS_LITTLE_ENDIAN() (*(uint8_t *)(&TNUMBER) != 0) +static FORCE_INLINE void *taosEncodeFixed8(void *buf, uint8_t value) { + ((uint8_t *)buf)[0] = value; + return POINTER_SHIFT(buf, sizeof(value)); +} + static FORCE_INLINE void *taosEncodeFixed16(void *buf, uint16_t value) { if (IS_LITTLE_ENDIAN()) { memcpy(buf, &value, sizeof(value)); @@ -70,6 +75,11 @@ static FORCE_INLINE void *taosEncodeFixed64(void *buf, uint64_t value) { return POINTER_SHIFT(buf, sizeof(value)); } +static FORCE_INLINE void *taosDecodeFixed8(void *buf, uint8_t *value) { + *value = ((uint8_t *)buf)[0]; + return POINTER_SHIFT(buf, sizeof(*value)); +} + static FORCE_INLINE void *taosDecodeFixed16(void *buf, uint16_t *value) { if (IS_LITTLE_ENDIAN()) { memcpy(value, buf, sizeof(*value)); diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 759ecbb7edd264c981aa999c26eb0b9dbbd658e2..686e5ab3132807bb55a770359842cbf635e6c5cf 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -174,7 +174,7 @@ void tSkipListNewNodeInfo(SSkipList *pSkipList, int32_t *level, int32_t *headSiz SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode); /** - * get only *one* node of which key is equalled to pKey, even there are more than one nodes are of the same key + * get *all* nodes which key are equivalent to pKey * * @param pSkipList * @param pKey @@ -234,14 +234,13 @@ SSkipListNode *tSkipListIterGet(SSkipListIterator *iter); void *tSkipListDestroyIter(SSkipListIterator *iter); /* - * remove only one node of the pKey value. - * If more than one node has the same value, any one will be removed + * remove nodes of the pKey value. + * If more than one node has the same value, all will be removed * * @Return - * true: one node has been removed - * false: no node has been removed + * the count of removed nodes */ -bool tSkipListRemove(SSkipList *pSkipList, SSkipListKey key); +uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key); /* * remove the specified node in parameters diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 25ea49b60d3786f956e0af679752e765e6462da1..b72db6a8d81a9379ad2fb3b4721ecf96c992580e 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -74,7 +74,47 @@ static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **forward, SSk static SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode); static SSkipListNode* tSkipListPushFront(SSkipList* pSkipList, SSkipListNode *pNode); static SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t order); -static SSkipListNode* tSkipListDoGet(SSkipList *pSkipList, SSkipListKey key); + + +// when order is TSDB_ORDER_ASC, return the last node with key less than val +// when order is TSDB_ORDER_DESC, return the first node with key large than val +static SSkipListNode* getPriorNode(SSkipList* pSkipList, const char* val, int32_t order) { + __compar_fn_t comparFn = pSkipList->comparFn; + SSkipListNode *pNode = NULL; + + if (order == TSDB_ORDER_ASC) { + pNode = pSkipList->pHead; + for (int32_t i = pSkipList->level - 1; i >= 0; --i) { + SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, i); + while (p != pSkipList->pTail) { + char *key = SL_GET_NODE_KEY(pSkipList, p); + if (comparFn(key, val) < 0) { + pNode = p; + p = SL_GET_FORWARD_POINTER(p, i); + } else { + break; + } + } + } + } else { + pNode = pSkipList->pTail; + for (int32_t i = pSkipList->level - 1; i >= 0; --i) { + SSkipListNode *p = SL_GET_BACKWARD_POINTER(pNode, i); + while (p != pSkipList->pHead) { + char *key = SL_GET_NODE_KEY(pSkipList, p); + if (comparFn(key, val) > 0) { + pNode = p; + p = SL_GET_BACKWARD_POINTER(p, i); + } else { + break; + } + } + } + } + + return pNode; +} + static bool initForwardBackwardPtr(SSkipList* pSkipList) { uint32_t maxLevel = pSkipList->maxLevel; @@ -110,7 +150,11 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint8_t keyLen, ui maxLevel = MAX_SKIP_LIST_LEVEL; } - pSkipList->keyInfo = (SSkipListKeyInfo){.type = keyType, .len = keyLen, .dupKey = dupKey, .freeNode = freeNode}; + pSkipList->keyInfo.type = keyType; + pSkipList->keyInfo.len = keyLen; + pSkipList->keyInfo.dupKey = dupKey; + pSkipList->keyInfo.freeNode = freeNode; + pSkipList->keyFn = fn; pSkipList->comparFn = getKeyComparFunc(keyType); pSkipList->maxLevel = maxLevel; @@ -240,13 +284,37 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, SSkipListNode *pNode) { return pNode; } + + SArray* tSkipListGet(SSkipList *pSkipList, SSkipListKey key) { SArray* sa = taosArrayInit(1, POINTER_BYTES); - SSkipListNode* pNode = tSkipListDoGet(pSkipList, key); - taosArrayPush(sa, &pNode); + + if (pSkipList->lock) { + pthread_rwlock_wrlock(pSkipList->lock); + } + + SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC); + while (1) { + SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0); + if (p == pSkipList->pTail) { + break; + } + if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { + break; + } + taosArrayPush(sa, &p); + pNode = p; + } + + if (pSkipList->lock) { + pthread_rwlock_unlock(pSkipList->lock); + } + return sa; } + + size_t tSkipListGetSize(const SSkipList* pSkipList) { if (pSkipList == NULL) { return 0; @@ -375,14 +443,52 @@ size_t tSkipListGetSize(const SSkipList* pSkipList) { // return true; //} -bool tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) { - SSkipListNode* pNode = tSkipListDoGet(pSkipList, key); - if (pNode != NULL) { - tSkipListRemoveNode(pSkipList, pNode); - return true; +uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) { + uint32_t count = 0; + + if (pSkipList->lock) { + pthread_rwlock_wrlock(pSkipList->lock); } - - return false; + + SSkipListNode* pNode = getPriorNode(pSkipList, key, TSDB_ORDER_ASC); + while (1) { + SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, 0); + if (p == pSkipList->pTail) { + break; + } + if (pSkipList->comparFn(key, SL_GET_NODE_KEY(pSkipList, p)) != 0) { + break; + } + + for (int32_t j = p->level - 1; j >= 0; --j) { + SSkipListNode* prev = SL_GET_BACKWARD_POINTER(p, j); + SSkipListNode* next = SL_GET_FORWARD_POINTER(p, j); + SL_GET_FORWARD_POINTER(prev, j) = next; + SL_GET_BACKWARD_POINTER(next, j) = prev; + } + + if (pSkipList->keyInfo.freeNode) { + tfree(p); + } + + ++count; + } + + // compress the minimum level of skip list + while (pSkipList->level > 0) { + if (SL_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) != NULL) { + break; + } + pSkipList->level--; + } + + pSkipList->size -= count; + + if (pSkipList->lock) { + pthread_rwlock_unlock(pSkipList->lock); + } + + return count; } void tSkipListRemoveNode(SSkipList *pSkipList, SSkipListNode *pNode) { @@ -425,54 +531,25 @@ SSkipListIterator* tSkipListCreateIter(SSkipList *pSkipList) { } SSkipListIterator *tSkipListCreateIterFromVal(SSkipList* pSkipList, const char* val, int32_t type, int32_t order) { - if (pSkipList == NULL) { - return NULL; - } - assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC); - + assert(pSkipList != NULL); + + SSkipListIterator* iter = doCreateSkipListIterator(pSkipList, order); if (val == NULL) { - return doCreateSkipListIterator(pSkipList, order); - } else { - - SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; - - int32_t ret = -1; - __compar_fn_t filterComparFn = getKeyComparFunc(pSkipList->keyInfo.type); - SSkipListNode* pNode = pSkipList->pHead; - - for (int32_t i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, i); - while (p != pSkipList->pTail) { - char *key = SL_GET_NODE_KEY(pSkipList, p); - - if ((ret = filterComparFn(key, val)) < 0) { - pNode = p; - p = SL_GET_FORWARD_POINTER(p, i); - } else { - break; - } - } - - forward[i] = pNode; - } - - SSkipListIterator* iter = doCreateSkipListIterator(pSkipList, order); - - // set the initial position - if (order == TSDB_ORDER_ASC) { - iter->cur = forward[0]; // greater equals than the value - } else { - iter->cur = SL_GET_FORWARD_POINTER(forward[0], 0); - - if (ret == 0) { - assert(iter->cur != pSkipList->pTail); - iter->cur = SL_GET_FORWARD_POINTER(iter->cur, 0); - } - } - return iter; } + + if (pSkipList->lock) { + pthread_rwlock_rdlock(pSkipList->lock); + } + + iter->cur = getPriorNode(pSkipList, val, order); + + if (pSkipList->lock) { + pthread_rwlock_unlock(pSkipList->lock); + } + + return iter; } bool tSkipListIterNext(SSkipListIterator *iter) { @@ -487,17 +564,9 @@ bool tSkipListIterNext(SSkipListIterator *iter) { } if (iter->order == TSDB_ORDER_ASC) { // ascending order iterate - if (iter->cur == NULL) { - iter->cur = SL_GET_FORWARD_POINTER(pSkipList->pHead, 0); - } else { - iter->cur = SL_GET_FORWARD_POINTER(iter->cur, 0); - } + iter->cur = SL_GET_FORWARD_POINTER(iter->cur, 0); } else { // descending order iterate - if (iter->cur == NULL) { - iter->cur = SL_GET_BACKWARD_POINTER(pSkipList->pTail, 0); - } else { - iter->cur = SL_GET_BACKWARD_POINTER(iter->cur, 0); - } + iter->cur = SL_GET_BACKWARD_POINTER(iter->cur, 0); } if (pSkipList->lock) { @@ -638,57 +707,16 @@ SSkipListNode* tSkipListPushBack(SSkipList *pSkipList, SSkipListNode *pNode) { return pNode; } -SSkipListNode* tSkipListDoGet(SSkipList *pSkipList, SSkipListKey skey) { - SSkipListNode *pNode = pSkipList->pHead; - SSkipListNode *pRes = NULL; - - if (pSkipList->lock) { - pthread_rwlock_rdlock(pSkipList->lock); - } - -#if SKIP_LIST_RECORD_PERFORMANCE - pSkipList->state.queryCount++; -#endif - - __compar_fn_t cmparFn = getComparFunc(pSkipList->keyInfo.type, 0); - - int32_t ret = -1; - for (int32_t i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_GET_FORWARD_POINTER(pNode, i); - while (p != pSkipList->pTail) { - char *key = SL_GET_NODE_KEY(pSkipList, p); - - if ((ret = cmparFn(key, skey)) < 0) { - pNode = p; - p = SL_GET_FORWARD_POINTER(p, i); - } else { - break; - } - } - - // find the qualified key - if (ret == 0) { - pRes = SL_GET_FORWARD_POINTER(pNode, i); - break; - // skip list does not allowed duplicated key, abort further retrieve data -// if (!pSkipList->keyInfo.dupKey) { -// break; -// } - } - } - - if (pSkipList->lock) { - pthread_rwlock_unlock(pSkipList->lock); - } - - return pRes; -} - SSkipListIterator* doCreateSkipListIterator(SSkipList *pSkipList, int32_t order) { SSkipListIterator* iter = calloc(1, sizeof(SSkipListIterator)); iter->pSkipList = pSkipList; iter->order = order; + if(order == TSDB_ORDER_ASC) { + iter->cur = pSkipList->pHead; + } else { + iter->cur = pSkipList->pTail; + } return iter; } \ No newline at end of file diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index 3713e71a0169acf6ec08f79cb2c65f4e0e9b7df9..70445a3d651cbd6acdf00a6f3999e5ea6b0a2c1d 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -281,34 +281,55 @@ void skiplistPerformanceTest() { // todo not support duplicated key yet void duplicatedKeyTest() { -#if 0 - SSkipListKey key; - key.nType = TSDB_DATA_TYPE_INT; - - SSkipListNode **pNodes = NULL; + SSkipList *pSkipList = tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_INT, sizeof(int), true, false, true, getkey); - SSkipList *pSkipList = tSkipListCreate(MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_INT, sizeof(int)); - - for (int32_t i = 0; i < 10000; ++i) { + for (int32_t i = 0; i < 200; ++i) { for (int32_t j = 0; j < 5; ++j) { - key.i64Key = i; - tSkipListPut(pSkipList, "", &key, 1); + int32_t level, size; + tSkipListNewNodeInfo(pSkipList, &level, &size); + SSkipListNode* d = (SSkipListNode*)calloc(1, size + sizeof(int32_t)); + d->level = level; + int32_t* key = (int32_t*)SL_GET_NODE_KEY(pSkipList, d); + key[0] = i; + tSkipListPut(pSkipList, d); } } - tSkipListPrint(pSkipList, 1); - for (int32_t i = 0; i < 100; ++i) { - key.i64Key = rand() % 1000; - int32_t size = tSkipListGets(pSkipList, &key, &pNodes); - - assert(size == 5); + SSkipListKey key; + SArray* nodes = tSkipListGet(pSkipList, (char*)(&i)); + assert( taosArrayGetSize(nodes) == 5 ); + taosArrayDestroy(nodes); + } - tfree(pNodes); + int32_t key = 101; + uint32_t num = tSkipListRemove(pSkipList, (char*)(&key)); + assert(num == 5); + + SArray* nodes = tSkipListGet(pSkipList, (char*)(&key)); + assert( taosArrayGetSize(nodes) == 0 ); + taosArrayDestroy(nodes); + + key = 102; + SSkipListIterator* iter = tSkipListCreateIterFromVal(pSkipList, (char*)(&key), TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC); + for(int i = 0; i < 6; i++) { + assert(tSkipListIterNext(iter) == true); + SSkipListNode* node = tSkipListIterGet(iter); + int32_t* val = (int32_t*)SL_GET_NODE_KEY(pSkipList, node); + assert((i < 5) == ((*val) == key)); } + tSkipListDestroyIter(iter); + + iter = tSkipListCreateIterFromVal(pSkipList, (char*)(&key), TSDB_DATA_TYPE_INT, TSDB_ORDER_DESC); + for(int i = 0; i < 6; i++) { + assert(tSkipListIterNext(iter) == true); + SSkipListNode* node = tSkipListIterGet(iter); + int32_t* val = (int32_t*)SL_GET_NODE_KEY(pSkipList, node); + assert((i < 5) == ((*val) == key)); + } + tSkipListDestroyIter(iter); tSkipListDestroy(pSkipList); -#endif } } // namespace diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index d41957d8f47da10555a74c115b30482e0e9b4058..dea9369dd8d08c6cc26dc6049b65fa6ec11e7682 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -37,8 +37,8 @@ typedef struct { int32_t refCount; // reference count int status; int8_t role; - int64_t version; - int64_t savedVersion; + int64_t version; // current version + int64_t fversion; // version on saved data file void *wqueue; void *rqueue; void *wal; @@ -46,11 +46,11 @@ typedef struct { void *sync; void *events; void *cq; // continuous query - int32_t cfgVersion; - STsdbCfg tsdbCfg; - SSyncCfg syncCfg; - SWalCfg walCfg; - char * rootDir; + int32_t cfgVersion; + STsdbCfg tsdbCfg; + SSyncCfg syncCfg; + SWalCfg walCfg; + char *rootDir; } SVnodeObj; int vnodeWriteToQueue(void *param, void *pHead, int type); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 838b9d290f3823a501f344c1a97fef0c42f9fdff..6dabc98ae829dddce569df8a19ee022cdeac30ec 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -37,10 +37,10 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode); static int32_t vnodeSaveVersion(SVnodeObj *pVnode); static bool vnodeReadVersion(SVnodeObj *pVnode); static int vnodeProcessTsdbStatus(void *arg, int status); -static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size); +static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion); static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); static void vnodeNotifyRole(void *ahandle, int8_t role); -static void vnodeNotifyFileSynced(void *ahandle); +static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion); static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT; @@ -119,9 +119,14 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { } int32_t vnodeDrop(int32_t vgId) { + if (tsDnodeVnodesHash == NULL) { + vTrace("vgId:%d, failed to drop, vgId not exist", vgId); + return TSDB_CODE_INVALID_VGROUP_ID; + } + SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); if (ppVnode == NULL || *ppVnode == NULL) { - vTrace("vgId:%d, failed to drop, vgId not exist", vgId); + vTrace("vgId:%d, failed to drop, vgId not find", vgId); return TSDB_CODE_INVALID_VGROUP_ID; } @@ -191,6 +196,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { } vnodeReadVersion(pVnode); + pVnode->fversion = pVnode->version; pVnode->wqueue = dnodeAllocateWqueue(pVnode); pVnode->rqueue = dnodeAllocateRqueue(pVnode); @@ -359,7 +365,6 @@ void vnodeBuildStatusMsg(void *param) { if (*pVnode == NULL) continue; vnodeBuildVloadMsg(*pVnode, pStatus); - pStatus++; } taosHashDestroyIter(pIter); @@ -376,7 +381,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { cqClose(pVnode->cq); pVnode->cq = NULL; - tsdbCloseRepo(pVnode->tsdb); + tsdbCloseRepo(pVnode->tsdb, 1); pVnode->tsdb = NULL; walClose(pVnode->wal); @@ -390,7 +395,7 @@ static int vnodeProcessTsdbStatus(void *arg, int status) { SVnodeObj *pVnode = arg; if (status == TSDB_STATUS_COMMIT_START) { - pVnode->savedVersion = pVnode->version; + pVnode->fversion = pVnode->version; return walRenew(pVnode->wal); } @@ -400,8 +405,9 @@ static int vnodeProcessTsdbStatus(void *arg, int status) { return 0; } -static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size) { +static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion) { SVnodeObj *pVnode = ahandle; + *fversion = pVnode->fversion; return tsdbGetFileInfo(pVnode->tsdb, name, index, size); } @@ -412,6 +418,7 @@ static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index) { static void vnodeNotifyRole(void *ahandle, int8_t role) { SVnodeObj *pVnode = ahandle; + vPrint("vgId:%d, sync role changed from %d to %d", pVnode->vgId, pVnode->role, role); pVnode->role = role; if (pVnode->role == TAOS_SYNC_ROLE_MASTER) @@ -420,14 +427,18 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) { cqStop(pVnode->cq); } -static void vnodeNotifyFileSynced(void *ahandle) { +static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) { SVnodeObj *pVnode = ahandle; - vTrace("vgId:%d, data file is synced", pVnode->vgId); + vTrace("vgId:%d, data file is synced, fversion:%" PRId64 "", pVnode->vgId, fversion); + + pVnode->fversion = fversion; + pVnode->version = fversion; + vnodeSaveVersion(pVnode); char rootDir[128] = "\0"; sprintf(rootDir, "%s/tsdb", pVnode->rootDir); // clsoe tsdb, then open tsdb - tsdbCloseRepo(pVnode->tsdb); + tsdbCloseRepo(pVnode->tsdb, 0); STsdbAppH appH = {0}; appH.appH = (void *)pVnode; appH.notifyStatus = vnodeProcessTsdbStatus; @@ -701,14 +712,14 @@ static int32_t vnodeSaveVersion(SVnodeObj *pVnode) { char * content = calloc(1, maxLen + 1); len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->savedVersion); + len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->fversion); len += snprintf(content + len, maxLen - len, "}\n"); fwrite(content, 1, len, fp); fclose(fp); free(content); - vPrint("vgId:%d, save vnode version:%" PRId64 " succeed", pVnode->vgId, pVnode->savedVersion); + vPrint("vgId:%d, save vnode version:%" PRId64 " succeed", pVnode->vgId, pVnode->fversion); return 0; } diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index ec0a3b2f0b55e544aefd26ffa6588a1015023c68..635c4669782114fa34c420bd9e7d752fc5f24828 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -91,17 +91,16 @@ static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pR // save insert result into item vTrace("vgId:%d, submit msg is processed", pVnode->vgId); - code = tsdbInsertData(pVnode->tsdb, pCont); - + pRet->len = sizeof(SShellSubmitRspMsg); pRet->rsp = rpcMallocCont(pRet->len); SShellSubmitRspMsg *pRsp = pRet->rsp; - + code = tsdbInsertData(pVnode->tsdb, pCont, pRsp); + pRsp->numOfFailedBlocks = 0; //TODO + //pRet->len += pRsp->numOfFailedBlocks * sizeof(SShellSubmitRspBlock); //TODO pRsp->code = 0; pRsp->numOfRows = htonl(1); - pRsp->affectedRows = htonl(1); - pRsp->numOfFailedBlocks = 0; - + return code; } diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md new file mode 100644 index 0000000000000000000000000000000000000000..7c42d47d1b9c95a863539e3dbe1f4b94abf6c753 --- /dev/null +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -0,0 +1,235 @@ +### Prepare development environment + +1. sudo apt install + build-essential cmake net-tools python-pip python-setuptools python3-pip + python3-setuptools valgrind psmisc curl + +2. git clone ; cd TDengine + +3. mkdir debug; cd debug; cmake ..; make ; sudo make install + +4. pip install src/connector/python/linux/python2 ; pip3 install + src/connector/python/linux/python3 + +### How to run Python test suite + +1. cd \/tests/pytest + +2. ./smoketest.sh \# for smoke test + +3. ./smoketest.sh -g \# for memory leak detection test with valgrind + +4. ./fulltest.sh \# for full test + +> Note1: TDengine daemon's configuration and data files are stored in +> \/sim directory. As a historical design, it's same place with +> TSIM script. So after the TSIM script ran with sudo privilege, the directory +> has been used by TSIM then the python script cannot write it by a normal +> user. You need to remove the directory completely first before running the +> Python test case. We should consider using two different locations to store +> for TSIM and Python script. + +> Note2: if you need to debug crash problem with a core dump, you need +> manually edit smoketest.sh or fulltest.sh to add "ulimit -c unlimited" +> before the script line. Then you can look for the core file in +> \/tests/pytest after the program crash. + + +### How to add a new test case + +**1. TSIM test cases:** + +TSIM was the testing framework has been used internally. Now it still be used to run the test cases we develop in the past as a legacy system. We are turning to use Python to develop new test case and are abandoning TSIM gradually. + +**2. Python test cases:** + +**2.1 Please refer to \/tests/pytest/insert/basic.py to add a new +test case.** The new test case must implement 3 functions, where self.init() +and self.stop() simply copy the contents of insert/basic.py and the test +logic is implemented in self.run(). You can refer to the code in the util +directory for more information. + +**2.2 Edit smoketest.sh to add the path and filename of the new test case** + +Note: The Python test framework may continue to be improved in the future, +hopefully, to provide more functionality and ease of writing test cases. The +method of writing the test case above does not exclude that it will also be +affected. + +**2.3 What test.py does in detail:** + +test.py is the entry program for test case execution and monitoring. + +test.py has the following functions. + +\-f --file, Specifies the test case file name to be executed +-p --path, Specifies deployment path + +\-m --master, Specifies the master server IP for cluster deployment +-c--cluster, test cluster function +-s--stop, terminates all running nodes + +\-g--valgrind, load valgrind for memory leak detection test + +\-h--help, display help + +**2.4 What util/log.py does in detail:** + +log.py is quite simple, the main thing is that you can print the output in +different colors as needed. The success() should be called for successful +test case execution and the success() will print green text. The exit() will +print red text and exit the program, exit() should be called for test +failure. + +**util/log.py** + +... + +    def info(self, info): + +        printf("%s %s" % (datetime.datetime.now(), info)) + +  + +    def sleep(self, sec): + +        printf("%s sleep %d seconds" % (datetime.datetime.now(), sec)) + +        time.sleep(sec) + +  + +    def debug(self, err): + +        printf("\\033[1;36m%s %s\\033[0m" % (datetime.datetime.now(), err)) + +  + +    def success(self, info): + +        printf("\\033[1;32m%s %s\\033[0m" % (datetime.datetime.now(), info)) + +  + +    def notice(self, err): + +        printf("\\033[1;33m%s %s\\033[0m" % (datetime.datetime.now(), err)) + +  + +    def exit(self, err): + +        printf("\\033[1;31m%s %s\\033[0m" % (datetime.datetime.now(), err)) + +        sys.exit(1) + +  + +    def printNoPrefix(self, info): + +        printf("\\033[1;36m%s\\033[0m" % (info) + +... + +**2.5 What util/sql.py does in detail:** + +SQL.py is mainly used to execute SQL statements to manipulate the database, +and the code is extracted and commented as follows: + +**util/sql.py** + +\# prepare() is mainly used to set up the environment for testing table and +data, and to set up the database db for testing. do not call prepare() if you +need to test the database operation command. + +def prepare(self): + +tdLog.info("prepare database:db") + +self.cursor.execute('reset query cache') + +self.cursor.execute('drop database if exists db') + +self.cursor.execute('create database db') + +self.cursor.execute('use db') + +... + +\# query() is mainly used to execute select statements for normal syntax input + +def query(self, sql): + +... + +\# error() is mainly used to execute the select statement with the wrong syntax +input, the error will be caught as a reasonable behavior, if not caught it will +prove that the test failed + +def error() + +... + +\# checkRows() is used to check the number of returned lines after calling +query(select ...) after calling the query(select ...) to check the number of +rows of returned results. + +def checkRows(self, expectRows): + +... + +\# checkData() is used to check the returned result data after calling +query(select ...) after the query(select ...) is called, failure to meet +expectation is + +def checkData(self, row, col, data): + +... + +\# getData() returns the result data after calling query(select ...) to return +the resulting data after calling query(select ...) + +def getData(self, row, col): + +... + +\# execute() used to execute sql and return the number of affected rows + +def execute(self, sql): + +... + +\# executeTimes() Multiple executions of the same sql statement + +def executeTimes(self, sql, times): + +... + +\# CheckAffectedRows() Check if the number of affected rows is as expected + +def checkAffectedRows(self, expectAffectedRows): + +... + +> Note: Both Python2 and Python3 are currently supported by the Python test +> case. Since Python2 is no longer officially supported by January 1, 2020, it +> is recommended that subsequent test case development be guaranteed to run +> correctly on Python3. For Python2, please consider being compatible if +> appropriate without additional +> burden.   + +### CI submission adoption principle. + +- Every commit / PR compilation must pass. Currently, the warning is treated + as an error, so the warning must also be resolved. + +- Test cases that already exist must pass. + +- Because CI is very important to support build and automatically test + procedure, it is necessary to manually test the test case before adding it + and do as many iterations as possible to ensure that the test case provides + stable and reliable test results when added. + +> Note: In the future, according to the requirements and test development +> progress will add stress testing, performance testing, code style, +> and other features based on functional testing. diff --git a/tests/pytest/alter/__init__.py b/tests/pytest/alter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/pytest/alter/alter_replica.py b/tests/pytest/alter/alter_replica.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf0f65825920a10ab1a8de175325e535f5828e6 --- /dev/null +++ b/tests/pytest/alter/alter_replica.py @@ -0,0 +1,129 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self): + tdLog.debug("start to execute %s" % __file__) + tdLog.info("prepare cluster") + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + + self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + tdSql.init(self.conn.cursor()) + tdSql.execute('reset query cache') + tdSql.execute('create dnode 192.168.0.2') + tdDnodes.deploy(2) + tdDnodes.start(2) + + self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + tdSql.init(self.conn.cursor()) + tdSql.execute('reset query cache') + tdSql.execute('create dnode 192.168.0.3') + tdDnodes.deploy(3) + tdDnodes.start(3) + + def run(self): + tdSql.execute('create database db replica 3 days 7') + tdSql.execute('use db') + for tid in range(1, 11): + tdSql.execute('create table tb%d(ts timestamp, i int)' % tid) + tdLog.sleep(10) + + tdLog.info("================= step1") + startTime = 1520000010000 + for rid in range(1, 11): + for tid in range(1, 11): + tdSql.execute( + 'insert into tb%d values(%ld, %d)' % + (tid, startTime, rid)) + startTime += 1 + tdSql.query('select * from tb1') + tdSql.checkRows(10) + tdLog.sleep(5) + + tdLog.info("================= step2") + tdSql.execute('alter database db replica 2') + tdLog.sleep(10) + + tdLog.info("================= step3") + for rid in range(1, 11): + for tid in range(1, 11): + tdSql.execute( + 'insert into tb%d values(%ld, %d)' % + (tid, startTime, rid)) + startTime += 1 + tdSql.query('select * from tb1') + tdSql.checkRows(20) + tdLog.sleep(5) + + tdLog.info("================= step4") + tdSql.execute('alter database db replica 1') + tdLog.sleep(10) + + tdLog.info("================= step5") + for rid in range(1, 11): + for tid in range(1, 11): + tdSql.execute( + 'insert into tb%d values(%ld, %d)' % + (tid, startTime, rid)) + startTime += 1 + tdSql.query('select * from tb1') + tdSql.checkRows(30) + tdLog.sleep(5) + + tdLog.info("================= step6") + tdSql.execute('alter database db replica 2') + tdLog.sleep(10) + + tdLog.info("================= step7") + for rid in range(1, 11): + for tid in range(1, 11): + tdSql.execute( + 'insert into tb%d values(%ld, %d)' % + (tid, startTime, rid)) + startTime += 1 + tdSql.query('select * from tb1') + tdSql.checkRows(40) + tdLog.sleep(5) + + tdLog.info("================= step8") + tdSql.execute('alter database db replica 3') + tdLog.sleep(10) + + tdLog.info("================= step9") + for rid in range(1, 11): + for tid in range(1, 11): + tdSql.execute( + 'insert into tb%d values(%ld, %d)' % + (tid, startTime, rid)) + startTime += 1 + tdSql.query('select * from tb1') + tdSql.checkRows(50) + tdLog.sleep(5) + + def stop(self): + tdSql.close() + self.conn.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addCluster(__file__, TDTestCase()) diff --git a/tests/pytest/alter/alter_stable.py b/tests/pytest/alter/alter_stable.py new file mode 100644 index 0000000000000000000000000000000000000000..5772edcf7ff52a2ef4113bbea21ed877af4c85e4 --- /dev/null +++ b/tests/pytest/alter/alter_stable.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.types = [ + "int", + "bigint", + "float", + "double", + "smallint", + "tinyint", + "binary(10)", + "nchar(10)", + "timestamp"] + self.rowNum = 300 + self.ts = 1537146000000 + self.step = 1000 + self.sqlHead = "select count(*), count(c1) " + self.sqlTail = " from stb" + + def addColumnAndCount(self): + for colIdx in range(len(self.types)): + tdSql.execute( + "alter table stb add column c%d %s" % + (colIdx + 2, self.types[colIdx])) + self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2) + tdSql.query(self.sqlHead + self.sqlTail) + + # count non-NULL values in each column + tdSql.checkData(0, 0, self.rowNum * (colIdx + 1)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 1)) + for i in range(2, colIdx + 2): + print("check1: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2)) + + # insert more rows + for k in range(self.rowNum): + self.ts += self.step + sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2) + for j in range(colIdx + 1): + sql += ", %d" % (colIdx + 2) + sql += ")" + tdSql.execute(sql) + + # count non-NULL values in each column + tdSql.query(self.sqlHead + self.sqlTail) + tdSql.checkData(0, 0, self.rowNum * (colIdx + 2)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 2)) + for i in range(2, colIdx + 2): + print("check2: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + + def dropColumnAndCount(self): + tdSql.query(self.sqlHead + self.sqlTail) + res = [] + for i in range(len(self.types)): + res.append(tdSql.getData(0, i + 2)) + + print(res) + + for colIdx in range(len(self.types), 0, -1): + tdSql.execute("alter table stb drop column c%d" % (colIdx + 2)) + # self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2) + tdSql.query(self.sqlHead + self.sqlTail) + + # count non-NULL values in each column + tdSql.checkData(0, 0, self.rowNum * (colIdx + 1)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 1)) + for i in range(2, colIdx + 2): + print("check1: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2)) + + # insert more rows + for k in range(self.rowNum): + self.ts += self.step + sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2) + for j in range(colIdx + 1): + sql += ", %d" % (colIdx + 2) + sql += ")" + tdSql.execute(sql) + + # count non-NULL values in each column + tdSql.query(self.sqlHead + self.sqlTail) + tdSql.checkData(0, 0, self.rowNum * (colIdx + 2)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 2)) + for i in range(2, colIdx + 2): + print("check2: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + + def run(self): + # Setup params + db = "db" + + # Create db + tdSql.execute("drop database if exists %s" % (db)) + tdSql.execute("reset query cache") + tdSql.execute("create database %s maxrows 200 maxtables 4" % (db)) + tdSql.execute("use %s" % (db)) + + # Create a table with one colunm of int type and insert 300 rows + tdLog.info("Create stb and tb") + tdSql.execute("create table stb (ts timestamp, c1 int) tags (tg1 int)") + tdSql.execute("create table tb using stb tags (0)") + tdLog.info("Insert %d rows into tb" % (self.rowNum)) + for k in range(1, self.rowNum + 1): + self.ts += self.step + tdSql.execute("insert into tb values (%d, 1)" % (self.ts)) + + # Alter tb and add a column of smallint type, then query tb to see if + # all added column are NULL + self.addColumnAndCount() + tdDnodes.stop(1) + time.sleep(5) + tdDnodes.start(1) + time.sleep(5) + tdSql.query(self.sqlHead + self.sqlTail) + for i in range(2, len(self.types) + 2): + tdSql.checkData(0, i, self.rowNum * (len(self.types) + 2 - i)) + + self.dropColumnAndCount() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +#tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0c591da695b5f8f573fe470017e1f4ba31b068 --- /dev/null +++ b/tests/pytest/alter/alter_table.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.types = [ + "int", + "bigint", + "float", + "double", + "smallint", + "tinyint", + "binary(10)", + "nchar(10)", + "timestamp"] + self.rowNum = 300 + self.ts = 1537146000000 + self.step = 1000 + self.sqlHead = "select count(*), count(c1) " + self.sqlTail = " from tb" + + def addColumnAndCount(self): + for colIdx in range(len(self.types)): + tdSql.execute( + "alter table tb add column c%d %s" % + (colIdx + 2, self.types[colIdx])) + self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2) + tdSql.query(self.sqlHead + self.sqlTail) + + # count non-NULL values in each column + tdSql.checkData(0, 0, self.rowNum * (colIdx + 1)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 1)) + for i in range(2, colIdx + 2): + print("check1: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2)) + + # insert more rows + for k in range(self.rowNum): + self.ts += self.step + sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2) + for j in range(colIdx + 1): + sql += ", %d" % (colIdx + 2) + sql += ")" + tdSql.execute(sql) + + # count non-NULL values in each column + tdSql.query(self.sqlHead + self.sqlTail) + tdSql.checkData(0, 0, self.rowNum * (colIdx + 2)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 2)) + for i in range(2, colIdx + 2): + print("check2: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + + def dropColumnAndCount(self): + + tdSql.query(self.sqlHead + self.sqlTail) + res = [] + for i in range(len(self.types)): + res[i] = tdSql.getData(0, i + 2) + + print(res.join) + + for colIdx in range(len(self.types), 0, -1): + tdSql.execute("alter table tb drop column c%d" % (colIdx + 2)) + # self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2) + tdSql.query(self.sqlHead + self.sqlTail) + + # count non-NULL values in each column + tdSql.checkData(0, 0, self.rowNum * (colIdx + 1)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 1)) + for i in range(2, colIdx + 2): + print("check1: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2)) + + # insert more rows + for k in range(self.rowNum): + self.ts += self.step + sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2) + for j in range(colIdx + 1): + sql += ", %d" % (colIdx + 2) + sql += ")" + tdSql.execute(sql) + + # count non-NULL values in each column + tdSql.query(self.sqlHead + self.sqlTail) + tdSql.checkData(0, 0, self.rowNum * (colIdx + 2)) + tdSql.checkData(0, 1, self.rowNum * (colIdx + 2)) + for i in range(2, colIdx + 2): + print("check2: i=%d colIdx=%d" % (i, colIdx)) + tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + + def run(self): + # Setup params + db = "db" + + # Create db + tdSql.execute("drop database if exists %s" % (db)) + tdSql.execute("reset query cache") + tdSql.execute("create database %s maxrows 200 maxtables 4" % (db)) + tdSql.execute("use %s" % (db)) + + # Create a table with one colunm of int type and insert 300 rows + tdLog.info("Create table tb") + tdSql.execute("create table tb (ts timestamp, c1 int)") + tdLog.info("Insert %d rows into tb" % (self.rowNum)) + for k in range(1, self.rowNum + 1): + self.ts += self.step + tdSql.execute("insert into tb values (%d, 1)" % (self.ts)) + + # Alter tb and add a column of smallint type, then query tb to see if + # all added column are NULL + self.addColumnAndCount() + tdDnodes.stop(1) + time.sleep(5) + tdDnodes.start(1) + time.sleep(5) + tdSql.query(self.sqlHead + self.sqlTail) + for i in range(2, len(self.types) + 2): + tdSql.checkData(0, i, self.rowNum * (len(self.types) + 2 - i)) + + self.dropColumnAndCount() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +#tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/alter/file_corrupt.py b/tests/pytest/alter/file_corrupt.py new file mode 100644 index 0000000000000000000000000000000000000000..51ea8822704829f49b1bcfde882b9055199c838e --- /dev/null +++ b/tests/pytest/alter/file_corrupt.py @@ -0,0 +1,77 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + tdSql.execute( + 'create table st (ts timestamp, v1 int, v2 int, v3 int, v4 int, v5 int) tags (t int)') + + totalTables = 100 + batchSize = 500 + totalBatch = 60 + + tdLog.info( + "create %d tables, insert %d rows per table" % + (totalTables, batchSize * totalBatch)) + + for t in range(0, totalTables): + tdSql.execute('create table t%d using st tags(%d)' % (t, t)) + # 2019-06-10 00:00:00 + beginTs = 1560096000000 + interval = 10000 + for r in range(0, totalBatch): + sql = 'insert into t%d values ' % (t) + for b in range(0, batchSize): + ts = beginTs + (r * batchSize + b) * interval + sql += '(%d, 1, 2, 3, 4, 5)' % (ts) + tdSql.execute(sql) + + tdLog.info("insert data finished") + tdSql.execute('alter table st add column v6 int') + tdLog.sleep(5) + tdLog.info("alter table finished") + + tdSql.query("select count(*) from t50") + tdSql.checkData(0, 0, (int)(batchSize * totalBatch)) + + tdLog.info("insert") + tdSql.execute( + "insert into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)") + + tdLog.info("import") + tdSql.execute( + "import into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)") + + tdLog.info("query") + tdSql.query("select count(*) from t50") + tdSql.checkData(0, 0, batchSize * totalBatch + 1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/dbmgmt/__init__.py b/tests/pytest/dbmgmt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/pytest/dbmgmt/createTableAndDropDnodes.py b/tests/pytest/dbmgmt/createTableAndDropDnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..6e29c023abac3d46f873ec85ed79809f28c26d03 --- /dev/null +++ b/tests/pytest/dbmgmt/createTableAndDropDnodes.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- + +import sys +import taos +import threading +import traceback +import random +import datetime +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + + def init(self): + tdLog.debug("start to execute %s" % __file__) + tdLog.info("prepare cluster") + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + + self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + tdSql.init(self.conn.cursor()) + tdSql.execute('reset query cache') + tdSql.execute('create dnode 192.168.0.2') + tdDnodes.deploy(2) + tdDnodes.start(2) + tdSql.execute('create dnode 192.168.0.3') + tdDnodes.deploy(3) + tdDnodes.start(3) + time.sleep(3) + + self.db = "db" + self.stb = "stb" + self.tbPrefix = "tb" + self.tbNum = 100000 + self.count = 0 + # self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + self.threadNum = 1 + # threadLock = threading.Lock() + # global counter for number of tables created by all threads + self.global_counter = 0 + + tdSql.init(self.conn.cursor()) + + def _createTable(self, threadId): + print("Thread%d : createTable" % (threadId)) + conn = taos.connect(config=tdDnodes.getSimCfgPath()) + cursor = conn.cursor() + i = 0 + try: + sql = "use %s" % (self.db) + cursor.execute(sql) + while i < self.tbNum: + if (i % self.threadNum == threadId): + cursor.execute( + "create table tb%d using %s tags(%d)" % + (i + 1, self.stb, i + 1)) + with threading.Lock(): + self.global_counter += 1 + i += 1 + except Exception as e: + tdLog.info( + "Failure when creating table tb%d, exception: %s" % + (i + 1, str(e))) + finally: + cursor.close() + conn.close() + + def _interfereDnodes(self, threadId, dnodeId): + conn = taos.connect(config=tdDnodes.getSimCfgPath()) + cursor = conn.cursor() + # interfere dnode while creating table + print("Thread%d to interfere dnode%d" % (threadId, dnodeId)) + while self.global_counter < self.tbNum * 0.05: + time.sleep(0.2) + cursor.execute("drop dnode 192.168.0.%d" % (dnodeId)) + while self.global_counter < self.tbNum * 0.15: + time.sleep(0.2) + cursor.execute("create dnode 192.168.0.%d" % (dnodeId)) + while self.global_counter < self.tbNum * 0.35: + time.sleep(0.2) + cursor.execute("drop dnode 192.168.0.%d" % (dnodeId)) + while self.global_counter < self.tbNum * 0.45: + time.sleep(0.2) + cursor.execute("create dnode 192.168.0.%d" % (dnodeId)) + while self.global_counter < self.tbNum * 0.65: + time.sleep(0.2) + cursor.execute("drop dnode 192.168.0.%d" % (dnodeId)) + while self.global_counter < self.tbNum * 0.85: + time.sleep(0.2) + cursor.execute("create dnode 192.168.0.%d" % (dnodeId)) + + def run(self): + tdLog.info("================= creating database with replica 2") + threadId = 0 + threads = [] + try: + tdSql.execute("drop database if exists %s" % (self.db)) + tdSql.execute( + "create database %s replica 2 cache 2048 ablocks 2.0 tblocks 10 tables 2000" % + (self.db)) + tdLog.sleep(3) + tdSql.execute("use %s" % (self.db)) + tdSql.execute( + "create table %s (ts timestamp, c1 bigint, stime timestamp) tags(tg1 bigint)" % + (self.stb)) + tdLog.info("Start to create tables") + while threadId < self.threadNum: + tdLog.info("Thread-%d starts to create tables" % (threadId)) + cThread = threading.Thread( + target=self._createTable, + name="thread-%d" % + (threadId), + args=( + threadId, + )) + cThread.start() + threads.append(cThread) + threadId += 1 + + except Exception as e: + tdLog.info("Failed to create tb%d, exception: %s" % (i, str(e))) + # tdDnodes.stopAll() + finally: + time.sleep(1) + + threading.Thread( + target=self._interfereDnodes, + name="thread-interfereDnode%d" % + (3), + args=( + 1, + 3, + )).start() + for t in range(len(threads)): + tdLog.info("Join threads") + # threads[t].start() + threads[t].join() + + tdSql.query("show stables") + tdSql.checkData(0, 4, self.tbNum) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addCluster(__file__, TDTestCase()) diff --git a/tests/pytest/dbmgmt/createTableAndKillDnodes.py b/tests/pytest/dbmgmt/createTableAndKillDnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..7772ecdc68e2b85206a5f9e64c10021afef38279 --- /dev/null +++ b/tests/pytest/dbmgmt/createTableAndKillDnodes.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- + +import sys +import taos +import threading +import traceback +import random +import datetime +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + + def init(self): + tdLog.debug("start to execute %s" % __file__) + tdLog.info("prepare cluster") + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + + self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + tdSql.init(self.conn.cursor()) + tdSql.execute('reset query cache') + tdSql.execute('create dnode 192.168.0.2') + tdDnodes.deploy(2) + tdDnodes.start(2) + tdSql.execute('create dnode 192.168.0.3') + tdDnodes.deploy(3) + tdDnodes.start(3) + time.sleep(3) + + self.db = "db" + self.stb = "stb" + self.tbPrefix = "tb" + self.tbNum = 100000 + self.count = 0 + # self.conn = taos.connect(config=tdDnodes.getSimCfgPath()) + self.threadNum = 1 + # threadLock = threading.Lock() + # global counter for number of tables created by all threads + self.global_counter = 0 + + tdSql.init(self.conn.cursor()) + + def _createTable(self, threadId): + print("Thread%d : createTable" % (threadId)) + conn = taos.connect(config=tdDnodes.getSimCfgPath()) + cursor = conn.cursor() + i = 0 + try: + sql = "use %s" % (self.db) + cursor.execute(sql) + while i < self.tbNum: + if (i % self.threadNum == threadId): + cursor.execute( + "create table tb%d using %s tags(%d)" % + (i + 1, self.stb, i + 1)) + with threading.Lock(): + self.global_counter += 1 + time.sleep(0.01) + i += 1 + except Exception as e: + tdLog.info( + "Failure when creating table tb%d, exception: %s" % + (i + 1, str(e))) + finally: + cursor.close() + conn.close() + + def _interfereDnodes(self, threadId, dnodeId): + # interfere dnode while creating table + print("Thread%d to interfere dnode%d" % (threadId, dnodeId)) + percent = 0.05 + loop = int(1 / (2 * percent)) + for t in range(1, loop): + while self.global_counter < self.tbNum * (t * percent): + time.sleep(0.2) + tdDnodes.forcestop(dnodeId) + while self.global_counter < self.tbNum * ((t + 1) * percent): + time.sleep(0.2) + tdDnodes.start(dnodeId) + + # while self.global_counter < self.tbNum * 0.05: + # time.sleep(0.2) + # tdDnodes.forcestop(dnodeId) + # while self.global_counter < self.tbNum * 0.10: + # time.sleep(0.2) + # tdDnodes.start(dnodeId) + # while self.global_counter < self.tbNum * 0.15: + # time.sleep(0.2) + # tdDnodes.forcestop(dnodeId) + # while self.global_counter < self.tbNum * 0.20: + # time.sleep(0.2) + # tdDnodes.start(dnodeId) + # while self.global_counter < self.tbNum * 0.25: + # time.sleep(0.2) + # tdDnodes.forcestop(dnodeId) + # while self.global_counter < self.tbNum * 0.30: + # time.sleep(0.2) + # tdDnodes.start(dnodeId) + # while self.global_counter < self.tbNum * 0.35: + # time.sleep(0.2) + # tdDnodes.forcestop(dnodeId) + # while self.global_counter < self.tbNum * 0.40: + # time.sleep(0.2) + # tdDnodes.start(dnodeId) + # while self.global_counter < self.tbNum * 0.45: + # time.sleep(0.2) + # tdDnodes.forcestop(dnodeId) + # while self.global_counter < self.tbNum * 0.50: + # time.sleep(0.2) + # tdDnodes.start(dnodeId) + + def run(self): + tdLog.info("================= creating database with replica 2") + threadId = 0 + threads = [] + try: + tdSql.execute("drop database if exists %s" % (self.db)) + tdSql.execute( + "create database %s replica 2 cache 1024 ablocks 2.0 tblocks 4 tables 1000" % + (self.db)) + tdLog.sleep(3) + tdSql.execute("use %s" % (self.db)) + tdSql.execute( + "create table %s (ts timestamp, c1 bigint, stime timestamp) tags(tg1 bigint)" % + (self.stb)) + tdLog.info("Start to create tables") + while threadId < self.threadNum: + tdLog.info("Thread-%d starts to create tables" % (threadId)) + cThread = threading.Thread( + target=self._createTable, + name="thread-%d" % + (threadId), + args=( + threadId, + )) + cThread.start() + threads.append(cThread) + threadId += 1 + + except Exception as e: + tdLog.info("Failed to create tb%d, exception: %s" % (i, str(e))) + # tdDnodes.stopAll() + finally: + time.sleep(1) + + threading.Thread( + target=self._interfereDnodes, + name="thread-interfereDnode%d" % + (3), + args=( + 1, + 3, + )).start() + for t in range(len(threads)): + tdLog.info("Join threads") + # threads[t].start() + threads[t].join() + + tdSql.query("show stables") + tdSql.checkData(0, 4, self.tbNum) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addCluster(__file__, TDTestCase()) diff --git a/tests/pytest/dbmgmt/database-name-boundary.py b/tests/pytest/dbmgmt/database-name-boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6dce22ae5751ecf1f353818833a7cdeb40b611 --- /dev/null +++ b/tests/pytest/dbmgmt/database-name-boundary.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import datetime +import string +import random +import subprocess + +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + + chars = string.ascii_uppercase + string.ascii_lowercase + + getDbNameLen = "grep -w '#define TSDB_DB_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'" + dbNameMaxLen = int(subprocess.check_output(getDbNameLen, shell=True)) + tdLog.info("DB name max length is %d" % dbNameMaxLen) + + tdLog.info("=============== step1") + db_name = ''.join(random.choices(chars, k=(dbNameMaxLen + 1))) + tdLog.info('db_name length %d' % len(db_name)) + tdLog.info('create database %s' % db_name) + tdSql.error('create database %s' % db_name) + + tdLog.info("=============== step2") + db_name = ''.join(random.choices(chars, k=dbNameMaxLen)) + tdLog.info('db_name length %d' % len(db_name)) + tdLog.info('create database %s' % db_name) + tdSql.execute('create database %s' % db_name) + + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0, 0, db_name.lower()) + + tdLog.info("=============== step3") + db_name = ''.join(random.choices(chars, k=(dbNameMaxLen - 1))) + tdLog.info('db_name length %d' % len(db_name)) + tdLog.info('create database %s' % db_name) + tdSql.execute('create database %s' % db_name) + + tdSql.query('show databases') + tdSql.checkRows(2) + tdSql.checkData(0, 0, db_name.lower()) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/dbmgmt/dropDB_memory_test.py b/tests/pytest/dbmgmt/dropDB_memory_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b029945be244fac873e73dcdb6cd1c0a7abfcec3 --- /dev/null +++ b/tests/pytest/dbmgmt/dropDB_memory_test.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + tbNum = 10000 + insertRows = 1 + db = "db" + loop = 2 + tdSql.execute("drop database if exists %s" % (db)) + tdSql.execute("reset query cache") + tdLog.sleep(1) + for k in range(1, loop + 1): + tdLog.info("===========Loop%d starts============" % (k)) + tdSql.execute( + "create database %s cache 163840 ablocks 40 maxtables 5000 wal 0" % + (db)) + tdSql.execute("use %s" % (db)) + tdSql.execute( + "create table stb (ts timestamp, c1 int) tags(t1 bigint, t2 double)") + for j in range(1, tbNum): + tdSql.execute( + "create table tb%d using stb tags(%d, %d)" % + (j, j, j)) + + for j in range(1, tbNum): + for i in range(0, insertRows): + tdSql.execute( + "insert into tb%d values (now + %dm, %d)" % + (j, i, i)) + tdSql.query("select * from tb%d" % (j)) + tdSql.checkRows(insertRows) + tdLog.info("insert %d rows into tb%d" % (insertRows, j)) + # tdSql.sleep(3) + tdSql.execute("drop database %s" % (db)) + tdLog.sleep(2) + tdLog.info("===========Loop%d completed!=============" % (k)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +#tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e37dc6748a4f613ba9d85f3c8780c8a471bcf79c..efd9f7ce52666450ab88227feccb3722c7e45be7 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -10,10 +10,20 @@ python3 ./test.py $1 -f insert/tinyint.py python3 ./test.py $1 -f insert/date.py python3 ./test.py $1 -f insert/binary.py python3 ./test.py $1 -f insert/nchar.py +python3 ./test.py $1 -f insert/nchar-boundary.py +python3 ./test.py $1 -f insert/nchar-unicode.py +python3 ./test.py $1 -f insert/multi.py python3 ./test.py $1 -f table/column_name.py python3 ./test.py $1 -f table/column_num.py python3 ./test.py $1 -f table/db_table.py +python3 ./test.py $1 -f table/tablename-boundary.py + +# tag +python3 ./test.py $1 -f tag_lite/filter.py +python3 ./test.py $1 -f tag_lite/create-tags-boundary.py + +python3 ./test.py $1 -f dbmgmt/database-name-boundary.py python3 ./test.py $1 -f import_merge/importBlock1HO.py python3 ./test.py $1 -f import_merge/importBlock1HPO.py @@ -87,4 +97,5 @@ python3 ./test.py $1 -f user/user_create.py python3 ./test.py $1 -f user/pass_len.py # table -#python3 ./test.py $1 -f table/del_stable.py \ No newline at end of file +#python3 ./test.py $1 -f table/del_stable.py + diff --git a/tests/pytest/insert/binary-boundary.py b/tests/pytest/insert/binary-boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..583217a73261379bcb0101c360a51a7e29cf785d --- /dev/null +++ b/tests/pytest/insert/binary-boundary.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table tb (ts timestamp, speed binary(4089))') + tdSql.error('create table tb (ts timestamp, speed binary(4089))') + tdLog.info('create table tb (ts timestamp, speed binary(4088))') + tdSql.error('create table tb (ts timestamp, speed binary(4088))') + tdLog.info('create table tb (ts timestamp, speed binary(4084))') + tdSql.execute('create table tb (ts timestamp, speed binary(4084))') + tdLog.info("insert into tb values (now, ) -x step1") + tdSql.error("insert into tb values (now, )") + + with open("../../README.md", "r") as inputFile: + data = inputFile.read(4084).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + + tdLog.info("insert %d length data: %s" % (len(data), data)) + + tdLog.info("insert into tb values (now+2a, data)") + tdSql.execute("insert into tb values (now+2a, '%s')" % data) + tdLog.info('select speed from tb order by ts desc') + tdSql.query('select speed from tb order by ts desc') + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + tdLog.info('==> $data01') + tdLog.info("tdSql.checkData(0, 1, '%s')" % data) + tdSql.checkData(0, 1, data) + + tdLog.info( + 'create table tb2 (ts timestamp, speed binary(2040), temp binary(2044))') + tdSql.execute( + 'create table tb2 (ts timestamp, speed binary(2040), temp binary(2044))') + speed = inputFile.read(2044).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + temp = inputFile.read(2040).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + tdLog.info("insert into tb values (now+3a, speed, temp)") + tdSql.error( + "insert into tb values (now+3a, '%s', '%s')" % + (speed, temp)) + + speed = inputFile.read(2040).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + temp = inputFile.read(2044).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + tdLog.info("insert into tb values (now+4a, speed, temp)") + tdSql.error( + "insert into tb values (now+4a, '%s', '%s')" % + (speed, temp)) + + tdLog.info('tdSql.checkRow(2)') + tdSql.checkRows(2) + tdLog.info('==> $data11') + tdLog.info("tdSql.checkData(1, 1, '%s')" % speed) + tdSql.checkData(1, 1, speed) + + tdLog.info('==> $data12') + tdLog.info("tdSql.checkData(1, 2, '%s')" % temp) + tdSql.checkData(1, 1, temp) + + inputFile.close() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/bool.py b/tests/pytest/insert/bool.py index 062563f4ab84c9e09ea3ed08ced60fac53b1add8..c175afd8b556f0a9e82c23c4a6d62eacf45a0f70 100644 --- a/tests/pytest/insert/bool.py +++ b/tests/pytest/insert/bool.py @@ -58,6 +58,14 @@ class TDTestCase: tdSql.query('select * from tb order by ts desc') tdLog.info('tdSql.checkRow(6)') tdSql.checkRows(6) + tdLog.info('=============== step7') + tdLog.info("insert into tb values (now+6m, true)") + tdSql.execute("insert into tb values (now+5m, true)") + tdLog.info('select * from tb order by ts desc') + tdSql.query('select * from tb order by ts desc') + tdLog.info('tdSql.checkRow(7)') + tdSql.checkRows(7) +# convert end # convert end def stop(self): diff --git a/tests/pytest/insert/float.py b/tests/pytest/insert/float.py index 30d7e223f18abea7d6c26ad87c63833572a2b248..414833877ea63fd405b71ced4124438b04c2d699 100644 --- a/tests/pytest/insert/float.py +++ b/tests/pytest/insert/float.py @@ -43,7 +43,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = 'select * from tb order by ts desc' tdLog.info(cmd) @@ -82,7 +82,7 @@ class TDTestCase: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = "insert into tb values (now+4a, 0)" tdLog.info(cmd) @@ -103,7 +103,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = "insert into tb values (now+5a, 2)" tdLog.info(cmd) @@ -124,7 +124,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = "insert into tb values (now+6a, 2)" tdLog.info(cmd) diff --git a/tests/pytest/insert/int.py b/tests/pytest/insert/int.py index d007cc56ea21d3b8c8fec5527e166e60cfe56cc0..350426a5bde54dcd535577265c6f405cca364f04 100644 --- a/tests/pytest/insert/int.py +++ b/tests/pytest/insert/int.py @@ -47,7 +47,7 @@ class TDTestCase: "This test failed: INT data overflow error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("INT data overflow error catched") + tdLog.info("INT data overflow error catched") cmd = 'insert into tb values (now+1m, NULL)' tdLog.info(cmd) @@ -76,7 +76,7 @@ class TDTestCase: "This test failed: INT data overflow error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("INT data overflow error catched") + tdLog.info("INT data overflow error catched") cmd = 'insert into tb values (now+3m, NULL)' tdLog.info(cmd) @@ -96,7 +96,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = 'insert into tb values (now+4m, 0)' tdLog.info(cmd) @@ -116,7 +116,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = 'insert into tb values (now+5m, 2)' tdLog.info(cmd) @@ -135,7 +135,7 @@ class TDTestCase: "This test failed: insert wrong data error _not_ catched") except Exception as e: tdLog.info(repr(e)) - tdLog.notice("insert wrong data error catched") + tdLog.info("insert wrong data error catched") cmd = 'insert into tb values (now+6m, 2)' tdLog.info(cmd) diff --git a/tests/pytest/insert/multi.py b/tests/pytest/insert/multi.py index 7725ccd210b2da63a7692b2bf17077c984dca1ab..c14d7dc2e0ac5d1317990cd2519671fdfb426b50 100644 --- a/tests/pytest/insert/multi.py +++ b/tests/pytest/insert/multi.py @@ -37,9 +37,15 @@ class TDTestCase: "CREATE TABLE if not exists dev_001 using st tags('dev_01')") print("==============step2") - tdLog.info("multiple inserts") + tdLog.info("multiple inserts by insert") tdSql.execute( - "INSERT INTO dev_001 VALUES ('2020-05-13 10:00:00.000', 1),('2020-05-13 10:00:00.001', 1)") + "insert INTO dev_001 VALUES ('2020-05-13 10:00:00.000', 1),('2020-05-13 10:00:00.001', 1)") + tdSql.checkAffectedRows(2) + + print("==============step3") + tdLog.info("multiple inserts by import") + tdSql.execute( + "import INTO dev_001 VALUES ('2020-05-13 10:00:00.000', 1),('2020-05-13 10:00:00.001', 1)") tdSql.checkAffectedRows(2) def stop(self): diff --git a/tests/pytest/insert/nchar-boundary.py b/tests/pytest/insert/nchar-boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..255cc5b79a57152c9efe1664ff1afbeb7c03abe4 --- /dev/null +++ b/tests/pytest/insert/nchar-boundary.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdSql.error('create table tb (ts timestamp, col nchar(1022))') + tdSql.execute('create table tb (ts timestamp, col nchar(1021))') + tdSql.execute("insert into tb values (now, 'taosdata')") + tdSql.query("select * from tb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 'taosdata') + + with open("../../README.md", "r") as inputFile: + data = inputFile.read(1021).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + + tdLog.info("insert %d length data: %s" % (len(data), data)) + + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(2) + tdSql.checkData(1, 1, data) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/nchar-unicode.py b/tests/pytest/insert/nchar-unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..12eef379d39c535c4357e7ce0d27ccdbdb1a0602 --- /dev/null +++ b/tests/pytest/insert/nchar-unicode.py @@ -0,0 +1,660 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdSql.error('create table tb (ts timestamp, col nchar(1022))') + tdSql.execute('create table tb (ts timestamp, col nchar(1021))') + tdSql.execute("insert into tb values (now, 'taosdata')") + tdSql.query("select * from tb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 'taosdata') + + with open("../../README.md", "r") as inputFile: + data = inputFile.read(1021).replace( + "\n", + " ").replace( + "\\", + " ").replace( + "\'", + " ").replace( + "\"", + " ").replace( + "[", + " ").replace( + "]", + " ").replace( + "!", + " ") + + tdLog.info("insert %d length data: %s" % (len(data), data)) + + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(2) + tdSql.checkData(1, 1, data) + + # https://www.ltg.ed.ac.uk/~richard/unicode-sample.html + # Basic Latin + data = r'! # $ % & ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~' + tdLog.info("insert Basic Latin %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(3) + tdSql.checkData(2, 1, data) + + # Latin-1 Supplement + data = ' ¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ ­ ® ¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö × Ø Ù Ú Û Ü Ý Þ ß à á â ã ä å æ ç è é ê ë ì í î ï ð ñ ò ó ô õ ö ÷ ø ù ú û ü ý þ ÿ' + tdLog.info( + "insert Latin-1 Supplement %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(4) + tdSql.checkData(3, 1, data) + + # Latin Extended-A + data = 'Ā ā Ă ă Ą ą Ć ć Ĉ ĉ Ċ ċ Č č Ď ď Đ đ Ē ē Ĕ ĕ Ė ė Ę ę Ě ě Ĝ ĝ Ğ ğ Ġ ġ Ģ ģ Ĥ ĥ Ħ ħ Ĩ ĩ Ī ī Ĭ ĭ Į į İ ı IJ ij Ĵ ĵ Ķ ķ ĸ Ĺ ĺ Ļ ļ Ľ ľ Ŀ ŀ Ł ł Ń ń Ņ ņ Ň ň ʼn Ŋ ŋ Ō ō Ŏ ŏ Ő ő Œ œ Ŕ ŕ Ŗ ŗ Ř ř Ś ś Ŝ ŝ Ş ş Š š Ţ ţ Ť ť Ŧ ŧ Ũ ũ Ū ū Ŭ ŭ Ů ů Ű ű Ų ų Ŵ ŵ Ŷ ŷ Ÿ Ź ź Ż ż Ž ž ſ' + tdLog.info( + "insert Latin Extended-A %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(5) + tdSql.checkData(4, 1, data) + + # Latin Extended-B + data = 'ƀ Ɓ Ƃ ƃ Ƅ ƅ Ɔ Ƈ ƈ Ɖ Ɗ Ƌ ƌ ƍ Ǝ Ə Ɛ Ƒ ƒ Ɠ Ɣ ƕ Ɩ Ɨ Ƙ ƙ ƚ ƛ Ɯ Ɲ ƞ Ɵ Ơ ơ Ƣ ƣ Ƥ ƥ Ʀ Ƨ ƨ Ʃ ƪ ƫ Ƭ ƭ Ʈ Ư ư Ʊ Ʋ Ƴ ƴ Ƶ ƶ Ʒ Ƹ ƹ ƺ ƻ Ƽ ƽ ƾ ƿ ǀ ǁ ǂ ǃ DŽ Dž dž LJ Lj lj NJ Nj nj Ǎ ǎ Ǐ ǐ Ǒ ǒ Ǔ ǔ Ǖ ǖ Ǘ ǘ Ǚ ǚ Ǜ ǜ ǝ Ǟ ǟ Ǡ ǡ Ǣ ǣ Ǥ ǥ Ǧ ǧ Ǩ ǩ Ǫ ǫ Ǭ ǭ Ǯ ǯ ǰ DZ Dz dz Ǵ ǵ Ǻ ǻ Ǽ ǽ Ǿ ǿ Ȁ ȁ Ȃ ȃ ...' + tdLog.info( + "insert Latin Extended-B %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(6) + tdSql.checkData(5, 1, data) + + # IPA Extensions + data = 'ɐ ɑ ɒ ɓ ɔ ɕ ɖ ɗ ɘ ə ɚ ɛ ɜ ɝ ɞ ɟ ɠ ɡ ɢ ɣ ɤ ɥ ɦ ɧ ɨ ɩ ɪ ɫ ɬ ɭ ɮ ɯ ɰ ɱ ɲ ɳ ɴ ɵ ɶ ɷ ɸ ɹ ɺ ɻ ɼ ɽ ɾ ɿ ʀ ʁ ʂ ʃ ʄ ʅ ʆ ʇ ʈ ʉ ʊ ʋ ʌ ʍ ʎ ʏ ʐ ʑ ʒ ʓ ʔ ʕ ʖ ʗ ʘ ʙ ʚ ʛ ʜ ʝ ʞ ʟ ʠ ʡ ʢ ʣ ʤ ʥ ʦ ʧ ʨ' + tdLog.info( + "insert IPA Extensions %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(7) + tdSql.checkData(6, 1, data) + + # Spacing Modifier Letters + data = 'ʰ ʱ ʲ ʳ ʴ ʵ ʶ ʷ ʸ ʹ ʺ ʻ ʼ ʽ ʾ ʿ ˀ ˁ ˂ ˃ ˄ ˅ ˆ ˇ ˈ ˉ ˊ ˋ ˌ ˍ ˎ ˏ ː ˑ ˒ ˓ ˔ ˕ ˖ ˗ ˘ ˙ ˚ ˛ ˜ ˝ ˞ ˠ ˡ ˢ ˣ ˤ ˥ ˦ ˧ ˨ ˩' + tdLog.info( + "insert Spacing Modifier Letters %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(8) + tdSql.checkData(7, 1, data) + + # Combining Diacritical Marks + data = '̀ ́ ̂ ̃ ̄ ̅ ̆ ̇ ̈ ̉ ̊ ̋ ̌ ̍ ̎ ̏ ̐ ̑ ̒ ̓ ̔ ̕ ̖ ̗ ̘ ̙ ̚ ̛ ̜ ̝ ̞ ̟ ̠ ̡ ̢ ̣ ̤ ̥ ̦ ̧ ̨ ̩ ̪ ̫ ̬ ̭ ̮ ̯ ̰ ̱ ̲ ̳ ̴ ̵ ̶ ̷ ̸ ̹ ̺ ̻ ̼ ̽ ̾ ̿ ̀ ́ ͂ ̓ ̈́ ͅ ͠ ͡' + tdLog.info( + "insert Combining Diacritical Marks %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(9) + tdSql.checkData(8, 1, data) + + # Greek + data = 'ʹ ͵ ͺ ; ΄ ΅ Ά · Έ Ή Ί Ό Ύ Ώ ΐ Α Β Γ Δ Ε Ζ Η Θ Ι Κ Λ Μ Ν Ξ Ο Π Ρ Σ Τ Υ Φ Χ Ψ Ω Ϊ Ϋ ά έ ή ί ΰ α β γ δ ε ζ η θ ι κ λ μ ν ξ ο π ρ ς σ τ υ φ χ ψ ω ϊ ϋ ό ύ ώ ϐ ϑ ϒ ϓ ϔ ϕ ϖ Ϛ Ϝ Ϟ Ϡ Ϣ ϣ Ϥ ϥ Ϧ ϧ Ϩ ϩ Ϫ ϫ Ϭ ϭ Ϯ ϯ ϰ ϱ ϲ ϳ' + tdLog.info("insert Greek %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(10) + tdSql.checkData(9, 1, data) + + # Cyrillic + data = 'Ё Ђ Ѓ Є Ѕ І Ї Ј Љ Њ Ћ Ќ Ў Џ А Б В Г Д Е Ж З И Й К Л М Н О П Р С Т У Ф Х Ц Ч Ш Щ Ъ Ы Ь Э Ю Я а б в г д е ж з и й к л м н о п р с т у ф х ц ч ш щ ъ ы ь э ю я ё ђ ѓ є ѕ і ї ј љ њ ћ ќ ў џ Ѡ ѡ Ѣ ѣ Ѥ ѥ Ѧ ѧ Ѩ ѩ Ѫ ѫ Ѭ ѭ Ѯ ѯ Ѱ ѱ Ѳ ѳ Ѵ ѵ Ѷ ѷ Ѹ ѹ Ѻ ѻ Ѽ ѽ Ѿ ѿ Ҁ ҁ ҂ ҃ ...' + tdLog.info("insert Cyrillic %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(11) + tdSql.checkData(10, 1, data) + + # Armenian + data = 'Ա Բ Գ Դ Ե Զ Է Ը Թ Ժ Ի Լ Խ Ծ Կ Հ Ձ Ղ Ճ Մ Յ Ն Շ Ո Չ Պ Ջ Ռ Ս Վ Տ Ր Ց Ւ Փ Ք Օ Ֆ ՙ ՚ ՛ ՜ ՝ ՞ ՟ ա բ գ դ ե զ է ը թ ժ ի լ խ ծ կ հ ձ ղ ճ մ յ ն շ ո չ պ ջ ռ ս վ տ ր ց ւ փ ք օ ֆ և ։' + tdLog.info("insert Armenian %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(12) + tdSql.checkData(11, 1, data) + + # Hebrew + data = ' ֒ ֓ ֔ ֕ ֖ ֗ ֘ ֙ ֚ ֛ ֜ ֝ ֞ ֟ ֠ ֡ ֣ ֤ ֥ ֦ ֧ ֨ ֩ ֪ ֫ ֬ ֭ ֮ ֯ ְ ֱ ֲ ֳ ִ ֵ ֶ ַ ָ ֹ ֻ ּ ֽ ־ ֿ ׀ ׁ ׂ ׃ ׄ א ב ג ד ה ו ז ח ט י ך כ ל ם מ ן נ ס ע ף פ ץ צ ק ר ש ת װ ױ ײ ׳ ״' + tdLog.info("insert Hebrew %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(13) + tdSql.checkData(12, 1, data) + + # Arabic + data = '، ؛ ؟ ء آ أ ؤ إ ئ ا ب ة ت ث ج ح خ د ذ ر ز س ش ص ض ط ظ ع غ ـ ف ق ك ل م ن ه و ى ي ً ٌ ٍ َ ُ ِ ّ ْ ٠ ١ ٢ ٣ ٤ ٥ ٦ ٧ ٨ ٩ ٪ ٫ ٬ ٭ ٰ ٱ ٲ ٳ ٴ ٵ ٶ ٷ ٸ ٹ ٺ ٻ ټ ٽ پ ٿ ڀ ځ ڂ ڃ ڄ څ چ ڇ ڈ ډ ڊ ڋ ڌ ڍ ڎ ڏ ڐ ڑ ڒ ړ ڔ ڕ ږ ڗ ژ ڙ ښ ڛ ڜ ڝ ڞ ڟ ڠ ڡ ڢ ڣ ڤ ڥ ڦ ڧ ڨ ک ڪ ګ ڬ ڭ ڮ گ ڰ ڱ ...' + tdLog.info( + "FAILED: insert Arabic %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(14) + tdSql.checkData(13, 1, data) + + # Devanagari + data = 'ँ ं ः अ आ इ ई उ ऊ ऋ ऌ ऍ ऎ ए ऐ ऑ ऒ ओ औ क ख ग घ ङ च छ ज झ ञ ट ठ ड ढ ण त थ द ध न ऩ प फ ब भ म य र ऱ ल ळ ऴ व श ष स ह ़ ऽ ा ि ी ु ू ृ ॄ ॅ ॆ े ै ॉ ॊ ो ौ ् ॐ ॑ ॒ ॓ ॔ क़ ख़ ग़ ज़ ड़ ढ़ फ़ य़ ॠ ॡ ॢ ॣ । ॥ ० १ २ ३ ४ ५ ६ ७ ८ ९ ॰' + tdLog.info("insert Devanagari %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(15) + tdSql.checkData(14, 1, data) + + # Bengali + data = 'ঁ ং ঃ অ আ ই ঈ উ ঊ ঋ ঌ এ ঐ ও ঔ ক খ গ ঘ ঙ চ ছ জ ঝ ঞ ট ঠ ড ঢ ণ ত থ দ ধ ন প ফ ব ভ ম য র ল শ ষ স হ ় া ি ী ু ূ ৃ ৄ ে ৈ ো ৌ ্ ৗ ড় ঢ় য় ৠ ৡ ৢ ৣ ০ ১ ২ ৩ ৪ ৫ ৬ ৭ ৮ ৯ ৰ ৱ ৲ ৳ ৴ ৵ ৶ ৷ ৸ ৹ ৺' + tdLog.info("insert Bengali %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(16) + tdSql.checkData(15, 1, data) + + # Gurmukhi + data = 'ਂ ਅ ਆ ਇ ਈ ਉ ਊ ਏ ਐ ਓ ਔ ਕ ਖ ਗ ਘ ਙ ਚ ਛ ਜ ਝ ਞ ਟ ਠ ਡ ਢ ਣ ਤ ਥ ਦ ਧ ਨ ਪ ਫ ਬ ਭ ਮ ਯ ਰ ਲ ਲ਼ ਵ ਸ਼ ਸ ਹ ਼ ਾ ਿ ੀ ੁ ੂ ੇ ੈ ੋ ੌ ੍ ਖ਼ ਗ਼ ਜ਼ ੜ ਫ਼ ੦ ੧ ੨ ੩ ੪ ੫ ੬ ੭ ੮ ੯ ੰ ੱ ੲ ੳ ੴ' + tdLog.info("insert Gurmukhi %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(17) + tdSql.checkData(16, 1, data) + + # Gujarati + data = 'ઁ ં ઃ અ આ ઇ ઈ ઉ ઊ ઋ ઍ એ ઐ ઑ ઓ ઔ ક ખ ગ ઘ ઙ ચ છ જ ઝ ઞ ટ ઠ ડ ઢ ણ ત થ દ ધ ન પ ફ બ ભ મ ય ર લ ળ વ શ ષ સ હ ઼ ઽ ા િ ી ુ ૂ ૃ ૄ ૅ ે ૈ ૉ ો ૌ ્ ૐ ૠ ૦ ૧ ૨ ૩ ૪ ૫ ૬ ૭ ૮ ૯' + tdLog.info("insert Gujarati %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(18) + tdSql.checkData(17, 1, data) + + # Oriya + data = 'ଁ ଂ ଃ ଅ ଆ ଇ ଈ ଉ ଊ ଋ ଌ ଏ ଐ ଓ ଔ କ ଖ ଗ ଘ ଙ ଚ ଛ ଜ ଝ ଞ ଟ ଠ ଡ ଢ ଣ ତ ଥ ଦ ଧ ନ ପ ଫ ବ ଭ ମ ଯ ର ଲ ଳ ଶ ଷ ସ ହ ଼ ଽ ା ି ୀ ୁ ୂ ୃ େ ୈ ୋ ୌ ୍ ୖ ୗ ଡ଼ ଢ଼ ୟ ୠ ୡ ୦ ୧ ୨ ୩ ୪ ୫ ୬ ୭ ୮ ୯ ୰' + tdLog.info("insert Oriya %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(19) + tdSql.checkData(18, 1, data) + + # Tamil + data = 'ஂ ஃ அ ஆ இ ஈ உ ஊ எ ஏ ஐ ஒ ஓ ஔ க ங ச ஜ ஞ ட ண த ந ன ப ம ய ர ற ல ள ழ வ ஷ ஸ ஹ ா ி ீ ு ூ ெ ே ை ொ ோ ௌ ் ௗ ௧ ௨ ௩ ௪ ௫ ௬ ௭ ௮ ௯ ௰ ௱ ௲' + tdLog.info("insert Tamil %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(20) + tdSql.checkData(19, 1, data) + + # Telugu + data = 'ఁ ం ః అ ఆ ఇ ఈ ఉ ఊ ఋ ఌ ఎ ఏ ఐ ఒ ఓ ఔ క ఖ గ ఘ ఙ చ ఛ జ ఝ ఞ ట ఠ డ ఢ ణ త థ ద ధ న ప ఫ బ భ మ య ర ఱ ల ళ వ శ ష స హ ా ి ీ ు ూ ృ ౄ ె ే ై ొ ో ౌ ్ ౕ ౖ ౠ ౡ ౦ ౧ ౨ ౩ ౪ ౫ ౬ ౭ ౮ ౯' + tdLog.info("insert Telugu %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(21) + tdSql.checkData(20, 1, data) + + # Kannada + data = 'ಂ ಃ ಅ ಆ ಇ ಈ ಉ ಊ ಋ ಌ ಎ ಏ ಐ ಒ ಓ ಔ ಕ ಖ ಗ ಘ ಙ ಚ ಛ ಜ ಝ ಞ ಟ ಠ ಡ ಢ ಣ ತ ಥ ದ ಧ ನ ಪ ಫ ಬ ಭ ಮ ಯ ರ ಱ ಲ ಳ ವ ಶ ಷ ಸ ಹ ಾ ಿ ೀ ು ೂ ೃ ೄ ೆ ೇ ೈ ೊ ೋ ೌ ್ ೕ ೖ ೞ ೠ ೡ ೦ ೧ ೨ ೩ ೪ ೫ ೬ ೭ ೮ ೯' + tdLog.info("insert Kannada %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(22) + tdSql.checkData(21, 1, data) + + # Malayalam + data = 'ം ഃ അ ആ ഇ ഈ ഉ ഊ ഋ ഌ എ ഏ ഐ ഒ ഓ ഔ ക ഖ ഗ ഘ ങ ച ഛ ജ ഝ ഞ ട ഠ ഡ ഢ ണ ത ഥ ദ ധ ന പ ഫ ബ ഭ മ യ ര റ ല ള ഴ വ ശ ഷ സ ഹ ാ ി ീ ു ൂ ൃ െ േ ൈ ൊ ോ ൌ ് ൗ ൠ ൡ ൦ ൧ ൨ ൩ ൪ ൫ ൬ ൭ ൮ ൯' + tdLog.info("insert Malayalam %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(23) + tdSql.checkData(22, 1, data) + + # Thai + data = 'ก ข ฃ ค ฅ ฆ ง จ ฉ ช ซ ฌ ญ ฎ ฏ ฐ ฑ ฒ ณ ด ต ถ ท ธ น บ ป ผ ฝ พ ฟ ภ ม ย ร ฤ ล ฦ ว ศ ษ ส ห ฬ อ ฮ ฯ ะ ั า ำ ิ ี ึ ื ุ ู ฺ ฿ เ แ โ ใ ไ ๅ ๆ ็ ่ ้ ๊ ๋ ์ ํ ๎ ๏ ๐ ๑ ๒ ๓ ๔ ๕ ๖ ๗ ๘ ๙ ๚ ๛' + tdLog.info("insert Thai %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(24) + tdSql.checkData(23, 1, data) + + # Thai + data = 'ก ข ฃ ค ฅ ฆ ง จ ฉ ช ซ ฌ ญ ฎ ฏ ฐ ฑ ฒ ณ ด ต ถ ท ธ น บ ป ผ ฝ พ ฟ ภ ม ย ร ฤ ล ฦ ว ศ ษ ส ห ฬ อ ฮ ฯ ะ ั า ำ ิ ี ึ ื ุ ู ฺ ฿ เ แ โ ใ ไ ๅ ๆ ็ ่ ้ ๊ ๋ ์ ํ ๎ ๏ ๐ ๑ ๒ ๓ ๔ ๕ ๖ ๗ ๘ ๙ ๚ ๛' + tdLog.info("insert Thai %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(25) + tdSql.checkData(24, 1, data) + + # Lao + data = 'ກ ຂ ຄ ງ ຈ ຊ ຍ ດ ຕ ຖ ທ ນ ບ ປ ຜ ຝ ພ ຟ ມ ຢ ຣ ລ ວ ສ ຫ ອ ຮ ຯ ະ ັ າ ຳ ິ ີ ຶ ື ຸ ູ ົ ຼ ຽ ເ ແ ໂ ໃ ໄ ໆ ່ ້ ໊ ໋ ໌ ໍ ໐ ໑ ໒ ໓ ໔ ໕ ໖ ໗ ໘ ໙ ໜ ໝ' + tdLog.info("insert Lao %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(26) + tdSql.checkData(25, 1, data) + + # Tibetan + data = 'ༀ ༁ ༂ ༃ ༄ ༅ ༆ ༇ ༈ ༉ ༊ ་ ༌ ། ༎ ༏ ༐ ༑ ༒ ༓ ༔ ༕ ༖ ༗ ༘ ༙ ༚ ༛ ༜ ༝ ༞ ༟ ༠ ༡ ༢ ༣ ༤ ༥ ༦ ༧ ༨ ༩ ༪ ༫ ༬ ༭ ༮ ༯ ༰ ༱ ༲ ༳ ༴ ༵ ༶ ༷ ༸ ༹ ༺ ༻ ༼ ༽ ༾ ༿ ཀ ཁ ག གྷ ང ཅ ཆ ཇ ཉ ཊ ཋ ཌ ཌྷ ཎ ཏ ཐ ད དྷ ན པ ཕ བ བྷ མ ཙ ཚ ཛ ཛྷ ཝ ཞ ཟ འ ཡ ར ལ ཤ ཥ ས ཧ ཨ ཀྵ ཱ ི ཱི ུ ཱུ ྲྀ ཷ ླྀ ཹ ེ ཻ ོ ཽ ཾ ཿ ྀ ཱྀ ྂ ྃ ྄ ྅ ྆ ྇ ...' + tdLog.info("insert Tibetan %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(27) + tdSql.checkData(26, 1, data) + + # Georgian + data = 'Ⴀ Ⴁ Ⴂ Ⴃ Ⴄ Ⴅ Ⴆ Ⴇ Ⴈ Ⴉ Ⴊ Ⴋ Ⴌ Ⴍ Ⴎ Ⴏ Ⴐ Ⴑ Ⴒ Ⴓ Ⴔ Ⴕ Ⴖ Ⴗ Ⴘ Ⴙ Ⴚ Ⴛ Ⴜ Ⴝ Ⴞ Ⴟ Ⴠ Ⴡ Ⴢ Ⴣ Ⴤ Ⴥ ა ბ გ დ ე ვ ზ თ ი კ ლ მ ნ ო პ ჟ რ ს ტ უ ფ ქ ღ ყ შ ჩ ც ძ წ ჭ ხ ჯ ჰ ჱ ჲ ჳ ჴ ჵ ჶ ჻' + tdLog.info("insert Georgian %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(28) + tdSql.checkData(27, 1, data) + + # Hangul Jamo + data = 'ᄀ ᄁ ᄂ ᄃ ᄄ ᄅ ᄆ ᄇ ᄈ ᄉ ᄊ ᄋ ᄌ ᄍ ᄎ ᄏ ᄐ ᄑ ᄒ ᄓ ᄔ ᄕ ᄖ ᄗ ᄘ ᄙ ᄚ ᄛ ᄜ ᄝ ᄞ ᄟ ᄠ ᄡ ᄢ ᄣ ᄤ ᄥ ᄦ ᄧ ᄨ ᄩ ᄪ ᄫ ᄬ ᄭ ᄮ ᄯ ᄰ ᄱ ᄲ ᄳ ᄴ ᄵ ᄶ ᄷ ᄸ ᄹ ᄺ ᄻ ᄼ ᄽ ᄾ ᄿ ᅀ ᅁ ᅂ ᅃ ᅄ ᅅ ᅆ ᅇ ᅈ ᅉ ᅊ ᅋ ᅌ ᅍ ᅎ ᅏ ᅐ ᅑ ᅒ ᅓ ᅔ ᅕ ᅖ ᅗ ᅘ ᅙ ᅟ ᅠ ᅡ ᅢ ᅣ ᅤ ᅥ ᅦ ᅧ ᅨ ᅩ ᅪ ᅫ ᅬ ᅭ ᅮ ᅯ ᅰ ᅱ ᅲ ᅳ ᅴ ᅵ ᅶ ᅷ ᅸ ᅹ ᅺ ᅻ ᅼ ᅽ ᅾ ᅿ ᆀ ᆁ ᆂ ᆃ ᆄ ...' + tdLog.info("insert Hangul Jamo %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(29) + tdSql.checkData(28, 1, data) + + # Latin Extended Additional + data = 'Ḁ ḁ Ḃ ḃ Ḅ ḅ Ḇ ḇ Ḉ ḉ Ḋ ḋ Ḍ ḍ Ḏ ḏ Ḑ ḑ Ḓ ḓ Ḕ ḕ Ḗ ḗ Ḙ ḙ Ḛ ḛ Ḝ ḝ Ḟ ḟ Ḡ ḡ Ḣ ḣ Ḥ ḥ Ḧ ḧ Ḩ ḩ Ḫ ḫ Ḭ ḭ Ḯ ḯ Ḱ ḱ Ḳ ḳ Ḵ ḵ Ḷ ḷ Ḹ ḹ Ḻ ḻ Ḽ ḽ Ḿ ḿ Ṁ ṁ Ṃ ṃ Ṅ ṅ Ṇ ṇ Ṉ ṉ Ṋ ṋ Ṍ ṍ Ṏ ṏ Ṑ ṑ Ṓ ṓ Ṕ ṕ Ṗ ṗ Ṙ ṙ Ṛ ṛ Ṝ ṝ Ṟ ṟ Ṡ ṡ Ṣ ṣ Ṥ ṥ Ṧ ṧ Ṩ ṩ Ṫ ṫ Ṭ ṭ Ṯ ṯ Ṱ ṱ Ṳ ṳ Ṵ ṵ Ṷ ṷ Ṹ ṹ Ṻ ṻ Ṽ ṽ Ṿ ṿ ...' + tdLog.info( + "insert Latin Extended Additional %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(30) + tdSql.checkData(29, 1, data) + + # Geek Extended + data = 'ἀ ἁ ἂ ἃ ἄ ἅ ἆ ἇ Ἀ Ἁ Ἂ Ἃ Ἄ Ἅ Ἆ Ἇ ἐ ἑ ἒ ἓ ἔ ἕ Ἐ Ἑ Ἒ Ἓ Ἔ Ἕ ἠ ἡ ἢ ἣ ἤ ἥ ἦ ἧ Ἠ Ἡ Ἢ Ἣ Ἤ Ἥ Ἦ Ἧ ἰ ἱ ἲ ἳ ἴ ἵ ἶ ἷ Ἰ Ἱ Ἲ Ἳ Ἴ Ἵ Ἶ Ἷ ὀ ὁ ὂ ὃ ὄ ὅ Ὀ Ὁ Ὂ Ὃ Ὄ Ὅ ὐ ὑ ὒ ὓ ὔ ὕ ὖ ὗ Ὑ Ὓ Ὕ Ὗ ὠ ὡ ὢ ὣ ὤ ὥ ὦ ὧ Ὠ Ὡ Ὢ Ὣ Ὤ Ὥ Ὦ Ὧ ὰ ά ὲ έ ὴ ή ὶ ί ὸ ό ὺ ύ ὼ ώ ᾀ ᾁ ᾂ ᾃ ᾄ ᾅ ᾆ ᾇ ᾈ ᾉ ᾊ ᾋ ᾌ ᾍ ...' + tdLog.info( + "insert Geek Extended %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(31) + tdSql.checkData(30, 1, data) + + # General Punctuation + data = '                      ‐ ‑ ‒ – — ― ‖ ‗ ‘ ’ ‚ ‛ “ ” „ ‟ † ‡ • ‣ ․ ‥ … ‧ 
 
 ' + tdLog.info( + "insert General Punctuation %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(32) + tdSql.checkData(31, 1, data) + + # Superscripts and Subscripts + data = '⁰ ⁴ ⁵ ⁶ ⁷ ⁸ ⁹ ⁺ ⁻ ⁼ ⁽ ⁾ ⁿ ₀ ₁ ₂ ₃ ₄ ₅ ₆ ₇ ₈ ₉ ₊ ₋ ₌ ₍ ₎' + tdLog.info( + "insert Superscripts and Subscripts %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(33) + tdSql.checkData(32, 1, data) + + # Currency Symbols + data = '₠ ₡ ₢ ₣ ₤ ₥ ₦ ₧ ₨ ₩ ₪ ₫' + tdLog.info( + "insert Currency Symbols %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(34) + tdSql.checkData(33, 1, data) + + # Combining Marks for Symbols + data = '⃐ ⃑ ⃒ ⃓ ⃔ ⃕ ⃖ ⃗ ⃘ ⃙ ⃚ ⃛ ⃜ ⃝ ⃞ ⃟ ⃠ ⃡' + tdLog.info( + "insert Combining Marks for Symbols %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(35) + tdSql.checkData(34, 1, data) + + # Letterlike Symbols + data = '℀ ℁ ℂ ℃ ℄ ℅ ℆ ℇ ℈ ℉ ℊ ℋ ℌ ℍ ℎ ℏ ℐ ℑ ℒ ℓ ℔ ℕ № ℗ ℘ ℙ ℚ ℛ ℜ ℝ ℞ ℟ ℠ ℡ ™ ℣ ℤ ℥ Ω ℧ ℨ ℩ K Å ℬ ℭ ℮ ℯ ℰ ℱ Ⅎ ℳ ℴ ℵ ℶ ℷ ℸ' + tdLog.info( + "insert Letterlike Symbols %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(36) + tdSql.checkData(35, 1, data) + + # Number Forms + data = '⅓ ⅔ ⅕ ⅖ ⅗ ⅘ ⅙ ⅚ ⅛ ⅜ ⅝ ⅞ ⅟ Ⅰ Ⅱ Ⅲ Ⅳ Ⅴ Ⅵ Ⅶ Ⅷ Ⅸ Ⅹ Ⅺ Ⅻ Ⅼ Ⅽ Ⅾ Ⅿ ⅰ ⅱ ⅲ ⅳ ⅴ ⅵ ⅶ ⅷ ⅸ ⅹ ⅺ ⅻ ⅼ ⅽ ⅾ ⅿ ↀ ↁ ↂ' + tdLog.info( + "insert Number Forms %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(37) + tdSql.checkData(36, 1, data) + + # Arrows + data = '← ↑ → ↓ ↔ ↕ ↖ ↗ ↘ ↙ ↚ ↛ ↜ ↝ ↞ ↟ ↠ ↡ ↢ ↣ ↤ ↥ ↦ ↧ ↨ ↩ ↪ ↫ ↬ ↭ ↮ ↯ ↰ ↱ ↲ ↳ ↴ ↵ ↶ ↷ ↸ ↹ ↺ ↻ ↼ ↽ ↾ ↿ ⇀ ⇁ ⇂ ⇃ ⇄ ⇅ ⇆ ⇇ ⇈ ⇉ ⇊ ⇋ ⇌ ⇍ ⇎ ⇏ ⇐ ⇑ ⇒ ⇓ ⇔ ⇕ ⇖ ⇗ ⇘ ⇙ ⇚ ⇛ ⇜ ⇝ ⇞ ⇟ ⇠ ⇡ ⇢ ⇣ ⇤ ⇥ ⇦ ⇧ ⇨ ⇩ ⇪' + tdLog.info("insert Arrows %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(38) + tdSql.checkData(37, 1, data) + + # Mathematical Operators + data = '∀ ∁ ∂ ∃ ∄ ∅ ∆ ∇ ∈ ∉ ∊ ∋ ∌ ∍ ∎ ∏ ∐ ∑ − ∓ ∔ ∕ ∖ ∗ ∘ ∙ √ ∛ ∜ ∝ ∞ ∟ ∠ ∡ ∢ ∣ ∤ ∥ ∦ ∧ ∨ ∩ ∪ ∫ ∬ ∭ ∮ ∯ ∰ ∱ ∲ ∳ ∴ ∵ ∶ ∷ ∸ ∹ ∺ ∻ ∼ ∽ ∾ ∿ ≀ ≁ ≂ ≃ ≄ ≅ ≆ ≇ ≈ ≉ ≊ ≋ ≌ ≍ ≎ ≏ ≐ ≑ ≒ ≓ ≔ ≕ ≖ ≗ ≘ ≙ ≚ ≛ ≜ ≝ ≞ ≟ ≠ ≡ ≢ ≣ ≤ ≥ ≦ ≧ ≨ ≩ ≪ ≫ ≬ ≭ ≮ ≯ ≰ ≱ ≲ ≳ ≴ ≵ ≶ ≷ ≸ ≹ ≺ ≻ ≼ ≽ ≾ ≿ ...' + tdLog.info( + "insert Mathematical Operators %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(39) + tdSql.checkData(38, 1, data) + + # Miscellaneous Technical + data = '⌀ ⌂ ⌃ ⌄ ⌅ ⌆ ⌇ ⌈ ⌉ ⌊ ⌋ ⌌ ⌍ ⌎ ⌏ ⌐ ⌑ ⌒ ⌓ ⌔ ⌕ ⌖ ⌗ ⌘ ⌙ ⌚ ⌛ ⌜ ⌝ ⌞ ⌟ ⌠ ⌡ ⌢ ⌣ ⌤ ⌥ ⌦ ⌧ ⌨ 〈 〉 ⌫ ⌬ ⌭ ⌮ ⌯ ⌰ ⌱ ⌲ ⌳ ⌴ ⌵ ⌶ ⌷ ⌸ ⌹ ⌺ ⌻ ⌼ ⌽ ⌾ ⌿ ⍀ ⍁ ⍂ ⍃ ⍄ ⍅ ⍆ ⍇ ⍈ ⍉ ⍊ ⍋ ⍌ ⍍ ⍎ ⍏ ⍐ ⍑ ⍒ ⍓ ⍔ ⍕ ⍖ ⍗ ⍘ ⍙ ⍚ ⍛ ⍜ ⍝ ⍞ ⍟ ⍠ ⍡ ⍢ ⍣ ⍤ ⍥ ⍦ ⍧ ⍨ ⍩ ⍪ ⍫ ⍬ ⍭ ⍮ ⍯ ⍰ ⍱ ⍲ ⍳ ⍴ ⍵ ⍶ ⍷ ⍸ ⍹ ⍺' + tdLog.info( + "insert Miscellaneous Technical %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(40) + tdSql.checkData(39, 1, data) + + # Control Pictures + data = '␀ ␁ ␂ ␃ ␄ ␅ ␆ ␇ ␈ ␉ ␊ ␋ ␌ ␍ ␎ ␏ ␐ ␑ ␒ ␓ ␔ ␕ ␖ ␗ ␘ ␙ ␚ ␛ ␜ ␝ ␞ ␟ ␠ ␡ ␢ ␣ ␤' + tdLog.info( + "insert Control Pictures %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(41) + tdSql.checkData(40, 1, data) + + # Optical Character Recognition + data = '⑀ ⑁ ⑂ ⑃ ⑄ ⑅ ⑆ ⑇ ⑈ ⑉ ⑊' + tdLog.info( + "insert Optical Character Recognition %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(42) + tdSql.checkData(41, 1, data) + + # Enclosed Alphanumerics + data = '① ② ③ ④ ⑤ ⑥ ⑦ ⑧ ⑨ ⑩ ⑪ ⑫ ⑬ ⑭ ⑮ ⑯ ⑰ ⑱ ⑲ ⑳ ⑴ ⑵ ⑶ ⑷ ⑸ ⑹ ⑺ ⑻ ⑼ ⑽ ⑾ ⑿ ⒀ ⒁ ⒂ ⒃ ⒄ ⒅ ⒆ ⒇ ⒈ ⒉ ⒊ ⒋ ⒌ ⒍ ⒎ ⒏ ⒐ ⒑ ⒒ ⒓ ⒔ ⒕ ⒖ ⒗ ⒘ ⒙ ⒚ ⒛ ⒜ ⒝ ⒞ ⒟ ⒠ ⒡ ⒢ ⒣ ⒤ ⒥ ⒦ ⒧ ⒨ ⒩ ⒪ ⒫ ⒬ ⒭ ⒮ ⒯ ⒰ ⒱ ⒲ ⒳ ⒴ ⒵ Ⓐ Ⓑ Ⓒ Ⓓ Ⓔ Ⓕ Ⓖ Ⓗ Ⓘ Ⓙ Ⓚ Ⓛ Ⓜ Ⓝ Ⓞ Ⓟ Ⓠ Ⓡ Ⓢ Ⓣ Ⓤ Ⓥ Ⓦ Ⓧ Ⓨ Ⓩ ⓐ ⓑ ⓒ ⓓ ⓔ ⓕ ⓖ ⓗ ⓘ ⓙ ⓚ ⓛ ⓜ ⓝ ⓞ ⓟ ...' + tdLog.info( + "insert Enclosed Alphanumerics %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(43) + tdSql.checkData(42, 1, data) + + # Box Drawing + data = '─ ━ │ ┃ ┄ ┅ ┆ ┇ ┈ ┉ ┊ ┋ ┌ ┍ ┎ ┏ ┐ ┑ ┒ ┓ └ ┕ ┖ ┗ ┘ ┙ ┚ ┛ ├ ┝ ┞ ┟ ┠ ┡ ┢ ┣ ┤ ┥ ┦ ┧ ┨ ┩ ┪ ┫ ┬ ┭ ┮ ┯ ┰ ┱ ┲ ┳ ┴ ┵ ┶ ┷ ┸ ┹ ┺ ┻ ┼ ┽ ┾ ┿ ╀ ╁ ╂ ╃ ╄ ╅ ╆ ╇ ╈ ╉ ╊ ╋ ╌ ╍ ╎ ╏ ═ ║ ╒ ╓ ╔ ╕ ╖ ╗ ╘ ╙ ╚ ╛ ╜ ╝ ╞ ╟ ╠ ╡ ╢ ╣ ╤ ╥ ╦ ╧ ╨ ╩ ╪ ╫ ╬ ╭ ╮ ╯ ╰ ╱ ╲ ╳ ╴ ╵ ╶ ╷ ╸ ╹ ╺ ╻ ╼ ╽ ╾ ╿' + tdLog.info("insert Box Drawing %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(44) + tdSql.checkData(43, 1, data) + + # Block Elements + data = '▀ ▁ ▂ ▃ ▄ ▅ ▆ ▇ █ ▉ ▊ ▋ ▌ ▍ ▎ ▏ ▐ ░ ▒ ▓ ▔ ▕' + tdLog.info( + "insert Block Elements %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(45) + tdSql.checkData(44, 1, data) + + # Geometric Shapes + data = '■ □ ▢ ▣ ▤ ▥ ▦ ▧ ▨ ▩ ▪ ▫ ▬ ▭ ▮ ▯ ▰ ▱ ▲ △ ▴ ▵ ▶ ▷ ▸ ▹ ► ▻ ▼ ▽ ▾ ▿ ◀ ◁ ◂ ◃ ◄ ◅ ◆ ◇ ◈ ◉ ◊ ○ ◌ ◍ ◎ ● ◐ ◑ ◒ ◓ ◔ ◕ ◖ ◗ ◘ ◙ ◚ ◛ ◜ ◝ ◞ ◟ ◠ ◡ ◢ ◣ ◤ ◥ ◦ ◧ ◨ ◩ ◪ ◫ ◬ ◭ ◮ ◯' + tdLog.info( + "insert Geometric Shapes %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(46) + tdSql.checkData(45, 1, data) + + # Miscellaneous Symbols + data = '☀ ☁ ☂ ☃ ☄ ★ ☆ ☇ ☈ ☉ ☊ ☋ ☌ ☍ ☎ ☏ ☐ ☑ ☒ ☓ ☚ ☛ ☜ ☝ ☞ ☟ ☠ ☡ ☢ ☣ ☤ ☥ ☦ ☧ ☨ ☩ ☪ ☫ ☬ ☭ ☮ ☯ ☰ ☱ ☲ ☳ ☴ ☵ ☶ ☷ ☸ ☹ ☺ ☻ ☼ ☽ ☾ ☿ ♀ ♁ ♂ ♃ ♄ ♅ ♆ ♇ ♈ ♉ ♊ ♋ ♌ ♍ ♎ ♏ ♐ ♑ ♒ ♓ ♔ ♕ ♖ ♗ ♘ ♙ ♚ ♛ ♜ ♝ ♞ ♟ ♠ ♡ ♢ ♣ ♤ ♥ ♦ ♧ ♨ ♩ ♪ ♫ ♬ ♭ ♮ ♯' + tdLog.info( + "insert Miscellaneous Symbols %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(47) + tdSql.checkData(46, 1, data) + + # Dingbats + data = '✁ ✂ ✃ ✄ ✆ ✇ ✈ ✉ ✌ ✍ ✎ ✏ ✐ ✑ ✒ ✓ ✔ ✕ ✖ ✗ ✘ ✙ ✚ ✛ ✜ ✝ ✞ ✟ ✠ ✡ ✢ ✣ ✤ ✥ ✦ ✧ ✩ ✪ ✫ ✬ ✭ ✮ ✯ ✰ ✱ ✲ ✳ ✴ ✵ ✶ ✷ ✸ ✹ ✺ ✻ ✼ ✽ ✾ ✿ ❀ ❁ ❂ ❃ ❄ ❅ ❆ ❇ ❈ ❉ ❊ ❋ ❍ ❏ ❐ ❑ ❒ ❖ ❘ ❙ ❚ ❛ ❜ ❝ ❞ ❡ ❢ ❣ ❤ ❥ ❦ ❧ ❶ ❷ ❸ ❹ ❺ ❻ ❼ ❽ ❾ ❿ ➀ ➁ ➂ ➃ ➄ ➅ ➆ ➇ ➈ ➉ ➊ ➋ ➌ ➍ ➎ ➏ ➐ ➑ ➒ ➓ ➔ ➘ ➙ ➚ ➛ ➜ ➝ ...' + tdLog.info("insert Dingbats %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(48) + tdSql.checkData(47, 1, data) + + # CJK Symbols and Punctuation + data = '、 。 〃 〄 々 〆 〇 〈 〉 《 》 「 」 『 』 【 】 〒 〓 〔 〕 〖 〗 〘 〙 〚 〛 〜 〝 〞 〟 〠 〡 〢 〣 〤 〥 〦 〧 〨 〩 〪 〫 〬 〭 〮 〯 〰 〱 〲 〳 〴 〵 〶 〷 〿' + tdLog.info( + "insert CJK Symbols and Punctuation %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(49) + tdSql.checkData(48, 1, data) + + # Hiragana + data = 'ぁ あ ぃ い ぅ う ぇ え ぉ お か が き ぎ く ぐ け げ こ ご さ ざ し じ す ず せ ぜ そ ぞ た だ ち ぢ っ つ づ て で と ど な に ぬ ね の は ば ぱ ひ び ぴ ふ ぶ ぷ へ べ ぺ ほ ぼ ぽ ま み む め も ゃ や ゅ ゆ ょ よ ら り る れ ろ ゎ わ ゐ ゑ を ん ゔ ゙ ゚ ゛ ゜ ゝ ゞ' + tdLog.info("insert Hiragana %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(50) + tdSql.checkData(49, 1, data) + + # Katakana + data = 'ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ ゲ コ ゴ サ ザ シ ジ ス ズ セ ゼ ソ ゾ タ ダ チ ヂ ッ ツ ヅ テ デ ト ド ナ ニ ヌ ネ ノ ハ バ パ ヒ ビ ピ フ ブ プ ヘ ベ ペ ホ ボ ポ マ ミ ム メ モ ャ ヤ ュ ユ ョ ヨ ラ リ ル レ ロ ヮ ワ ヰ ヱ ヲ ン ヴ ヵ ヶ ヷ ヸ ヹ ヺ ・ ー ヽ ヾ' + tdLog.info("insert Katakana %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(51) + tdSql.checkData(50, 1, data) + + # Bopomofo + data = 'ㄅ ㄆ ㄇ ㄈ ㄉ ㄊ ㄋ ㄌ ㄍ ㄎ ㄏ ㄐ ㄑ ㄒ ㄓ ㄔ ㄕ ㄖ ㄗ ㄘ ㄙ ㄚ ㄛ ㄜ ㄝ ㄞ ㄟ ㄠ ㄡ ㄢ ㄣ ㄤ ㄥ ㄦ ㄧ ㄨ ㄩ ㄪ ㄫ ㄬ' + tdLog.info("insert Bopomofo %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(52) + tdSql.checkData(51, 1, data) + + # Hangul Compatibility Jamo + data = 'ㄱ ㄲ ㄳ ㄴ ㄵ ㄶ ㄷ ㄸ ㄹ ㄺ ㄻ ㄼ ㄽ ㄾ ㄿ ㅀ ㅁ ㅂ ㅃ ㅄ ㅅ ㅆ ㅇ ㅈ ㅉ ㅊ ㅋ ㅌ ㅍ ㅎ ㅏ ㅐ ㅑ ㅒ ㅓ ㅔ ㅕ ㅖ ㅗ ㅘ ㅙ ㅚ ㅛ ㅜ ㅝ ㅞ ㅟ ㅠ ㅡ ㅢ ㅣ ㅤ ㅥ ㅦ ㅧ ㅨ ㅩ ㅪ ㅫ ㅬ ㅭ ㅮ ㅯ ㅰ ㅱ ㅲ ㅳ ㅴ ㅵ ㅶ ㅷ ㅸ ㅹ ㅺ ㅻ ㅼ ㅽ ㅾ ㅿ ㆀ ㆁ ㆂ ㆃ ㆄ ㆅ ㆆ ㆇ ㆈ ㆉ ㆊ ㆋ ㆌ ㆍ ㆎ' + tdLog.info( + "insert Hangul Compatibility Jamo %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(53) + tdSql.checkData(52, 1, data) + + # Kanbun + data = '㆐ ㆑ ㆒ ㆓ ㆔ ㆕ ㆖ ㆗ ㆘ ㆙ ㆚ ㆛ ㆜ ㆝ ㆞ ㆟' + tdLog.info("insert Kanbun %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(54) + tdSql.checkData(53, 1, data) + + # Enclosed CJK Letters and Months + data = '㈀ ㈁ ㈂ ㈃ ㈄ ㈅ ㈆ ㈇ ㈈ ㈉ ㈊ ㈋ ㈌ ㈍ ㈎ ㈏ ㈐ ㈑ ㈒ ㈓ ㈔ ㈕ ㈖ ㈗ ㈘ ㈙ ㈚ ㈛ ㈜ ㈠ ㈡ ㈢ ㈣ ㈤ ㈥ ㈦ ㈧ ㈨ ㈩ ㈪ ㈫ ㈬ ㈭ ㈮ ㈯ ㈰ ㈱ ㈲ ㈳ ㈴ ㈵ ㈶ ㈷ ㈸ ㈹ ㈺ ㈻ ㈼ ㈽ ㈾ ㈿ ㉀ ㉁ ㉂ ㉃ ㉠ ㉡ ㉢ ㉣ ㉤ ㉥ ㉦ ㉧ ㉨ ㉩ ㉪ ㉫ ㉬ ㉭ ㉮ ㉯ ㉰ ㉱ ㉲ ㉳ ㉴ ㉵ ㉶ ㉷ ㉸ ㉹ ㉺ ㉻ ㉿ ㊀ ㊁ ㊂ ㊃ ㊄ ㊅ ㊆ ㊇ ㊈ ㊉ ㊊ ㊋ ㊌ ㊍ ㊎ ㊏ ㊐ ㊑ ㊒ ㊓ ㊔ ㊕ ㊖ ㊗ ㊘ ㊙ ㊚ ㊛ ㊜ ㊝ ㊞ ㊟ ㊠ ㊡ ...' + tdLog.info( + "insert Enclosed CJK Letters and Months %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(55) + tdSql.checkData(54, 1, data) + + # CJK Compatibility + data = '㌀ ㌁ ㌂ ㌃ ㌄ ㌅ ㌆ ㌇ ㌈ ㌉ ㌊ ㌋ ㌌ ㌍ ㌎ ㌏ ㌐ ㌑ ㌒ ㌓ ㌔ ㌕ ㌖ ㌗ ㌘ ㌙ ㌚ ㌛ ㌜ ㌝ ㌞ ㌟ ㌠ ㌡ ㌢ ㌣ ㌤ ㌥ ㌦ ㌧ ㌨ ㌩ ㌪ ㌫ ㌬ ㌭ ㌮ ㌯ ㌰ ㌱ ㌲ ㌳ ㌴ ㌵ ㌶ ㌷ ㌸ ㌹ ㌺ ㌻ ㌼ ㌽ ㌾ ㌿ ㍀ ㍁ ㍂ ㍃ ㍄ ㍅ ㍆ ㍇ ㍈ ㍉ ㍊ ㍋ ㍌ ㍍ ㍎ ㍏ ㍐ ㍑ ㍒ ㍓ ㍔ ㍕ ㍖ ㍗ ㍘ ㍙ ㍚ ㍛ ㍜ ㍝ ㍞ ㍟ ㍠ ㍡ ㍢ ㍣ ㍤ ㍥ ㍦ ㍧ ㍨ ㍩ ㍪ ㍫ ㍬ ㍭ ㍮ ㍯ ㍰ ㍱ ㍲ ㍳ ㍴ ㍵ ㍶ ㍻ ㍼ ㍽ ㍾ ㍿ ㎀ ㎁ ㎂ ㎃ ...' + tdLog.info( + "insert CJK Compatibility %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(56) + tdSql.checkData(55, 1, data) + + # CJK Unified Ideographs + data = '一 丁 丂 七 丄 丅 丆 万 丈 三 上 下 丌 不 与 丏 丐 丑 丒 专 且 丕 世 丗 丘 丙 业 丛 东 丝 丞 丟 丠 両 丢 丣 两 严 並 丧 丨 丩 个 丫 丬 中 丮 丯 丰 丱 串 丳 临 丵 丶 丷 丸 丹 为 主 丼 丽 举 丿 乀 乁 乂 乃 乄 久 乆 乇 么 义 乊 之 乌 乍 乎 乏 乐 乑 乒 乓 乔 乕 乖 乗 乘 乙 乚 乛 乜 九 乞 也 习 乡 乢 乣 乤 乥 书 乧 乨 乩 乪 乫 乬 乭 乮 乯 买 乱 乲 乳 乴 乵 乶 乷 乸 乹 乺 乻 乼 乽 乾 乿 ...' + tdLog.info( + "insert CJK Unified Ideographs %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(57) + tdSql.checkData(56, 1, data) + + # Hangul Syllables + data = '一 丁 丂 七 丄 丅 丆 万 丈 三 上 下 丌 不 与 丏 丐 丑 丒 专 且 丕 世 丗 丘 丙 业 丛 东 丝 丞 丟 丠 両 丢 丣 两 严 並 丧 丨 丩 个 丫 丬 中 丮 丯 丰 丱 串 丳 临 丵 丶 丷 丸 丹 为 主 丼 丽 举 丿 乀 乁 乂 乃 乄 久 乆 乇 么 义 乊 之 乌 乍 乎 乏 乐 乑 乒 乓 乔 乕 乖 乗 乘 乙 乚 乛 乜 九 乞 也 习 乡 乢 乣 乤 乥 书 乧 乨 乩 乪 乫 乬 乭 乮 乯 买 乱 乲 乳 乴 乵 乶 乷 乸 乹 乺 乻 乼 乽 乾 乿 ...' + tdLog.info( + "insert Hangul Syllables %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(58) + tdSql.checkData(57, 1, data) + + # Private Use + data = '                                                                                                                                ...' + tdLog.info("insert Private Use %d length data: %s" % (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(59) + tdSql.checkData(58, 1, data) + + # CJK Compatibility Ideographs + data = '豈 更 車 賈 滑 串 句 龜 龜 契 金 喇 奈 懶 癩 羅 蘿 螺 裸 邏 樂 洛 烙 珞 落 酪 駱 亂 卵 欄 爛 蘭 鸞 嵐 濫 藍 襤 拉 臘 蠟 廊 朗 浪 狼 郎 來 冷 勞 擄 櫓 爐 盧 老 蘆 虜 路 露 魯 鷺 碌 祿 綠 菉 錄 鹿 論 壟 弄 籠 聾 牢 磊 賂 雷 壘 屢 樓 淚 漏 累 縷 陋 勒 肋 凜 凌 稜 綾 菱 陵 讀 拏 樂 諾 丹 寧 怒 率 異 北 磻 便 復 不 泌 數 索 參 塞 省 葉 說 殺 辰 沈 拾 若 掠 略 亮 兩 凉 梁 糧 良 諒 量 勵 ...' + tdLog.info( + "insert CJK Compatibility Ideographs %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(60) + tdSql.checkData(59, 1, data) + + # Alphabetic Presentation Forms + data = 'ff fi fl ffi ffl ſt st ﬓ ﬔ ﬕ ﬖ ﬗ ﬞ ײַ ﬠ ﬡ ﬢ ﬣ ﬤ ﬥ ﬦ ﬧ ﬨ ﬩ שׁ שׂ שּׁ שּׂ אַ אָ אּ בּ גּ דּ הּ וּ זּ טּ יּ ךּ כּ לּ מּ נּ סּ ףּ פּ צּ קּ רּ שּ תּ וֹ בֿ כֿ פֿ ﭏ' + tdLog.info( + "insert Alphabetic Presentation Forms %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(61) + tdSql.checkData(60, 1, data) + + # Arabic Presentation Forms-A + data = 'ﭐ ﭑ ﭒ ﭓ ﭔ ﭕ ﭖ ﭗ ﭘ ﭙ ﭚ ﭛ ﭜ ﭝ ﭞ ﭟ ﭠ ﭡ ﭢ ﭣ ﭤ ﭥ ﭦ ﭧ ﭨ ﭩ ﭪ ﭫ ﭬ ﭭ ﭮ ﭯ ﭰ ﭱ ﭲ ﭳ ﭴ ﭵ ﭶ ﭷ ﭸ ﭹ ﭺ ﭻ ﭼ ﭽ ﭾ ﭿ ﮀ ﮁ ﮂ ﮃ ﮄ ﮅ ﮆ ﮇ ﮈ ﮉ ﮊ ﮋ ﮌ ﮍ ﮎ ﮏ ﮐ ﮑ ﮒ ﮓ ﮔ ﮕ ﮖ ﮗ ﮘ ﮙ ﮚ ﮛ ﮜ ﮝ ﮞ ﮟ ﮠ ﮡ ﮢ ﮣ ﮤ ﮥ ﮦ ﮧ ﮨ ﮩ ﮪ ﮫ ﮬ ﮭ ﮮ ﮯ ﮰ ﮱ ﯓ ﯔ ﯕ ﯖ ﯗ ﯘ ﯙ ﯚ ﯛ ﯜ ﯝ ﯞ ﯟ ﯠ ﯡ ﯢ ﯣ ﯤ ﯥ ﯦ ﯧ ﯨ ﯩ ﯪ ﯫ ﯬ ﯭ ﯮ ﯯ ﯰ ...' + tdLog.info( + "insert Arabic Presentation Forms-A %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(62) + tdSql.checkData(61, 1, data) + + # Combining Half Marks + data = '︠ ︡ ︢ ︣' + tdLog.info( + "insert Combining Half Marks %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(63) + tdSql.checkData(62, 1, data) + + # CJK Compatibility Forms + data = '︰ ︱ ︲ ︳ ︴ ︵ ︶ ︷ ︸ ︹ ︺ ︻ ︼ ︽ ︾ ︿ ﹀ ﹁ ﹂ ﹃ ﹄ ﹉ ﹊ ﹋ ﹌ ﹍ ﹎ ﹏' + tdLog.info( + "insert CJK Compatibility Forms %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(64) + tdSql.checkData(63, 1, data) + + # Small Form Variants + data = '﹐ ﹑ ﹒ ﹔ ﹕ ﹖ ﹗ ﹘ ﹙ ﹚ ﹛ ﹜ ﹝ ﹞ ﹟ ﹠ ﹡ ﹢ ﹣ ﹤ ﹥ ﹦ ﹨ ﹩ ﹪ ﹫' + tdLog.info( + "insert Small Form Variants %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(65) + tdSql.checkData(64, 1, data) + + # Arabic Presentation Forms-B + data = 'ﹰ ﹱ ﹲ ﹴ ﹶ ﹷ ﹸ ﹹ ﹺ ﹻ ﹼ ﹽ ﹾ ﹿ ﺀ ﺁ ﺂ ﺃ ﺄ ﺅ ﺆ ﺇ ﺈ ﺉ ﺊ ﺋ ﺌ ﺍ ﺎ ﺏ ﺐ ﺑ ﺒ ﺓ ﺔ ﺕ ﺖ ﺗ ﺘ ﺙ ﺚ ﺛ ﺜ ﺝ ﺞ ﺟ ﺠ ﺡ ﺢ ﺣ ﺤ ﺥ ﺦ ﺧ ﺨ ﺩ ﺪ ﺫ ﺬ ﺭ ﺮ ﺯ ﺰ ﺱ ﺲ ﺳ ﺴ ﺵ ﺶ ﺷ ﺸ ﺹ ﺺ ﺻ ﺼ ﺽ ﺾ ﺿ ﻀ ﻁ ﻂ ﻃ ﻄ ﻅ ﻆ ﻇ ﻈ ﻉ ﻊ ﻋ ﻌ ﻍ ﻎ ﻏ ﻐ ﻑ ﻒ ﻓ ﻔ ﻕ ﻖ ﻗ ﻘ ﻙ ﻚ ﻛ ﻜ ﻝ ﻞ ﻟ ﻠ ﻡ ﻢ ﻣ ﻤ ﻥ ﻦ ﻧ ﻨ ﻩ ﻪ ﻫ ﻬ ﻭ ﻮ ﻯ ﻰ ﻱ ...' + tdLog.info( + "insert Arabic Presentation Forms-B %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(66) + tdSql.checkData(65, 1, data) + + # Halfwidth and Fullwidth Forms + data = '! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ 。 「 」 、 ・ ヲ ァ ィ ゥ ェ ォ ャ ュ ョ ッ ー ア イ ウ エ オ カ キ ク ケ コ サ シ ス セ ソ タ チ ツ ...' + tdLog.info( + "insert Halfwidth and Fullwidth Forms %d length data: %s" % + (len(data), data)) + tdSql.execute("insert into tb values (now, '%s')" % data) + tdSql.query("select * from tb") + tdSql.checkRows(67) + tdSql.checkData(66, 1, data) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/query.py b/tests/pytest/query/query.py new file mode 100644 index 0000000000000000000000000000000000000000..68b8baab64afce7c48eb51a2173e6d1187a1b7b8 --- /dev/null +++ b/tests/pytest/query/query.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))") + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags("dev_01")') + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags("dev_02")') + + print("==============step2") + + tdSql.execute( + """INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1), + ('2020-05-13 10:00:00.001', 1) + dev_002 VALUES('2020-05-13 10:00:00.001', 1)""") + + tdSql.query("select * from db.st where ts='2020-05-13 10:00:00.000'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/random-test/random-test.py b/tests/pytest/random-test/random-test.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f4c00ea5cc68eb187b24bf311074df88c671b6 --- /dev/null +++ b/tests/pytest/random-test/random-test.py @@ -0,0 +1,145 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class Test: + def __init__(self): + self.current_tb = "" + self.last_tb = "" + self.written = 0 + + def create_table(self): + tdLog.info("create a table") + self.current_tb = "tb%d" % int(round(time.time() * 1000)) + tdLog.info("current table %s" % self.current_tb) + + if (self.current_tb == self.last_tb): + return + else: + tdSql.execute( + 'create table %s (ts timestamp, speed int)' % + self.current_tb) + self.last_tb = self.current_tb + self.written = 0 + + def insert_data(self): + tdLog.info("will insert data to table") + if (self.current_tb == ""): + tdLog.info("no table, create first") + self.create_table() + + tdLog.info("insert data to table") + insertRows = 10 + tdLog.info("insert %d rows to %s" % (insertRows, self.last_tb)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into %s values (now + %dm, %d)' % + (self.last_tb, i, i)) + self.written = self.written + 1 + + tdLog.info("insert earlier data") + tdSql.execute('insert into %s values (now - 5m , 10)' % self.last_tb) + self.written = self.written + 1 + tdSql.execute('insert into %s values (now - 6m , 10)' % self.last_tb) + self.written = self.written + 1 + tdSql.execute('insert into %s values (now - 7m , 10)' % self.last_tb) + self.written = self.written + 1 + tdSql.execute('insert into %s values (now - 8m , 10)' % self.last_tb) + self.written = self.written + 1 + + def query_data(self): + if (self.written > 0): + tdLog.info("query data from table") + tdSql.query("select * from %s" % self.last_tb) + tdSql.checkRows(self.written) + + def create_stable(self): + tdLog.info("create a super table") + + def restart_database(self): + tdLog.info("restart databae") + tdDnodes.stop(1) + tdDnodes.start(1) + tdLog.sleep(5) + + def force_restart(self): + tdLog.info("force restart database") + tdDnodes.forcestop(1) + tdDnodes.start(1) + tdLog.sleep(5) + + def drop_table(self): + if (self.current_tb != ""): + tdLog.info("drop current tb %s" % self.current_tb) + tdSql.execute("drop table %s" % self.current_tb) + self.current_tb = "" + self.last_tb = "" + self.written = 0 + + def reset_query_cache(self): + tdLog.info("reset query cache") + tdSql.execute("reset query cache") + tdLog.sleep(1) + + def reset_database(self): + tdLog.info("reset database") + tdDnodes.forcestop(1) + tdDnodes.deploy(1) + self.current_tb = "" + self.last_tb = "" + self.written = 0 + tdDnodes.start(1) + tdSql.prepare() + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + test = Test() + + switch = { + 1: test.create_table, + 2: test.insert_data, + 3: test.query_data, + 4: test.create_stable, + 5: test.restart_database, + 6: test.force_restart, + 7: test.drop_table, + 8: test.reset_query_cache, + 9: test.reset_database, + } + + for x in range(1, 100): + r = random.randint(1, 9) + tdLog.notice("iteration %d run func %d" % (x, r)) + switch.get(r, lambda: "ERROR")() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh index 853ebe1d76f4c3dfcc764dfff83397f247748929..5f48789d45d6a3a4fd49eecb14be7dac58ec325e 100755 --- a/tests/pytest/smoketest.sh +++ b/tests/pytest/smoketest.sh @@ -1,4 +1,5 @@ #!/bin/bash +# insert python3 ./test.py $1 -f insert/basic.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f insert/int.py @@ -21,7 +22,10 @@ python3 ./test.py $1 -f insert/date.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f insert/nchar.py python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/multi.py +python3 ./test.py $1 -s && sleep 1 +# table python3 ./test.py $1 -f table/column_name.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f table/column_num.py @@ -29,8 +33,7 @@ python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f table/db_table.py python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importCacheFileT.py -python3 ./test.py $1 -s && sleep 1 +# import python3 ./test.py $1 -f import_merge/importDataLastSub.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importHead.py @@ -43,3 +46,7 @@ python3 ./test.py $1 -f import_merge/importTail.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importTRestart.py python3 ./test.py $1 -s && sleep 1 + +#tag +python3 ./test.py $1 -f tag_lite/filter.py +python3 ./test.py $1 -s && sleep 1 diff --git a/tests/pytest/stable/__init__.py b/tests/pytest/stable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/pytest/stable/insert.py b/tests/pytest/stable/insert.py new file mode 100644 index 0000000000000000000000000000000000000000..9f9e7c6e066db628f66cd2d9e897f471ee5cff56 --- /dev/null +++ b/tests/pytest/stable/insert.py @@ -0,0 +1,56 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))") + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags("dev_01")') + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags("dev_02")') + + print("==============step2") + + tdSql.execute( + """INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1), + ('2020-05-13 10:00:00.001', 1) + dev_002 VALUES('2020-05-13 10:00:00.001', 1)""") + + tdSql.query("select * from db.st where dev='dev_01'") + tdSql.checkRows(2) + + tdSql.query("select * from db.st where dev='dev_02'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/boundary.py b/tests/pytest/table/boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..faa222231bda5234dd624ffde5bf805b13524928 --- /dev/null +++ b/tests/pytest/table/boundary.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- + +import random +import string +import subprocess +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init( self, conn ): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + + def getLimitFromSourceCode( self, name ): + cmd = "grep -w '#define %s' ../../src/inc/taosdef.h|awk '{print $3}'" % name + return int(subprocess.check_output(cmd, shell=True)) + + + def generateString( self, length ): + chars = string.ascii_uppercase + string.ascii_lowercase + v = "" + for i in range( length ): + v += random.choice( chars ) + return v + + + def checkTagBoundaries( self ): + tdLog.debug( "checking tag boundaries" ) + tdSql.prepare() + + maxTags = self.getLimitFromSourceCode( 'TSDB_MAX_TAGS' ) + totalTagsLen = self.getLimitFromSourceCode( 'TSDB_MAX_TAGS_LEN' ) + tdLog.notice( "max tags is %d" % maxTags ) + tdLog.notice( "max total tag length is %d" % totalTagsLen ) + + # for binary tags, 2 bytes are used for length + tagLen = (totalTagsLen - maxTags * 2) // maxTags + firstTagLen = totalTagsLen - 2 * maxTags - tagLen * (maxTags - 1) + + sql = "create table cars(ts timestamp, f int) tags(t0 binary(%d)" % firstTagLen + for i in range( 1, maxTags ): + sql += ", t%d binary(%d)" % (i, tagLen) + sql += ");" + + tdLog.debug( "creating super table: " + sql ) + tdSql.execute( sql ) + tdSql.query( 'show stables' ) + tdSql.checkRows( 1 ) + + for i in range( 10 ): + sql = "create table car%d using cars tags('%d'" % (i, i) + sql += ", '0'" * (maxTags - 1) + ");" + tdLog.debug( "creating table: " + sql ) + tdSql.execute( sql ) + + sql = "insert into car%d values(now, 0);" % i + tdLog.debug( "inserting data: " + sql ) + tdSql.execute( sql ) + + tdSql.query( 'show tables' ) + tdLog.info( 'tdSql.checkRow(10)' ) + tdSql.checkRows( 10 ) + + tdSql.query( 'select * from cars;' ) + tdSql.checkRows( 10 ) + + + def checkColumnBoundaries( self ): + tdLog.debug( "checking column boundaries" ) + tdSql.prepare() + + # one column is for timestamp + maxCols = self.getLimitFromSourceCode( 'TSDB_MAX_COLUMNS' ) - 1 + + sql = "create table cars (ts timestamp" + for i in range( maxCols ): + sql += ", c%d int" % i + sql += ");" + tdSql.execute( sql ) + tdSql.query( 'show tables' ) + tdSql.checkRows( 1 ) + + sql = "insert into cars values (now" + for i in range( maxCols ): + sql += ", %d" % i + sql += ");" + tdSql.execute( sql ) + tdSql.query( 'select * from cars' ) + tdSql.checkRows( 1 ) + + + def checkTableNameBoundaries( self ): + tdLog.debug( "checking table name boundaries" ) + tdSql.prepare() + + maxTableNameLen = self.getLimitFromSourceCode( 'TSDB_TABLE_NAME_LEN' ) + tdLog.notice( "table name max length is %d" % maxTableNameLen ) + + name = self.generateString( maxTableNameLen - 1) + tdLog.info( "table name is '%s'" % name ) + + tdSql.execute( "create table %s (ts timestamp, value int)" % name ) + tdSql.execute( "insert into %s values(now, 0)" % name ) + + tdSql.query( 'show tables' ) + tdSql.checkRows( 1 ) + + tdSql.query( 'select * from %s' % name ) + tdSql.checkRows( 1 ) + + + def checkRowBoundaries( self ): + tdLog.debug( "checking row boundaries" ) + tdSql.prepare() + + # 8 bytes for timestamp + maxRowSize = 65536 - 8 + maxCols = self.getLimitFromSourceCode( 'TSDB_MAX_COLUMNS' ) - 1 + + # for binary cols, 2 bytes are used for length + colLen = (maxRowSize - maxCols * 2) // maxCols + firstColLen = maxRowSize - 2 * maxCols - colLen * (maxCols - 1) + + sql = "create table cars (ts timestamp, c0 binary(%d)" % firstColLen + for i in range( 1, maxCols ): + sql += ", c%d binary(%d)" % (i, colLen) + sql += ");" + tdSql.execute( sql ) + tdSql.query( 'show tables' ) + tdSql.checkRows( 1 ) + + col = self.generateString( firstColLen ) + sql = "insert into cars values (now, '%s'" % col + col = self.generateString( colLen ) + for i in range( 1, maxCols ): + sql += ", '%s'" % col + sql += ");" + tdLog.info( sql ); + tdSql.execute( sql ) + tdSql.query( "select * from cars" ) + tdSql.checkRows( 1 ) + + + def run(self): + self.checkTagBoundaries() + self.checkColumnBoundaries() + self.checkTableNameBoundaries() + self.checkRowBoundaries() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/column_name.py b/tests/pytest/table/column_name.py index bb1f587a65b42ab447817e8bb2760e582ac8c986..aa958fd60c98dc86f031f6980ca476802e5ff805 100644 --- a/tests/pytest/table/column_name.py +++ b/tests/pytest/table/column_name.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- import sys +import string +import random +import subprocess from util.log import * from util.cases import * from util.sql import * @@ -14,34 +17,9 @@ class TDTestCase: def run(self): tdSql.prepare() - # TSIM: system sh/stop_dnodes.sh - # TSIM: - # TSIM: system sh/ip.sh -i 1 -s up - # TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1 - # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 - # TSIM: system sh/exec.sh -n dnode1 -s start - # TSIM: - # TSIM: sleep 3000 - # TSIM: sql connect - # TSIM: - # TSIM: $i = 0 - # TSIM: $dbPrefix = lm_cm_db - # TSIM: $tbPrefix = lm_cm_tb - # TSIM: $db = $dbPrefix . $i - # TSIM: $tb = $tbPrefix . $i - # TSIM: - # TSIM: print =============== step1 tdLog.info('=============== step1') - # TSIM: sql create database $db - # TSIM: sql use $db - # TSIM: - # TSIM: sql drop table dd -x step0 tdLog.info('drop table dd -x step0') tdSql.error('drop table dd') - # TSIM: return -1 - # TSIM: step0: - # TSIM: - # TSIM: sql create table $tb(ts timestamp, int) -x step1 tdLog.info('create table tb(ts timestamp, int) -x step1') tdSql.error('create table tb(ts timestamp, int)') # TSIM: return -1 @@ -112,37 +90,24 @@ class TDTestCase: tdLog.info('=============== step4') # TSIM: sql create table $tb (ts timestamp, # a0123456789012345678901234567890123456789 int) + getMaxColNum = "grep -w '#define TSDB_COL_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'" + boundary = int(subprocess.check_output(getMaxColNum, shell=True)) + tdLog.info("get max column name length is %d" % boundary) + chars = string.ascii_uppercase + string.ascii_lowercase + +# col_name = ''.join(random.choices(chars, k=boundary+1)) +# tdLog.info( +# 'create table tb (ts timestamp, %s int), col_name length is %d' % (col_name, len(col_name))) +# tdSql.error( +# 'create table tb (ts timestamp, %s int)' % col_name) + + col_name = ''.join(random.choices(chars, k=boundary)) tdLog.info( - 'create table tb (ts timestamp, a0123456789012345678901234567890123456789 int)') + 'create table tb (ts timestamp, %s int), col_name length is %d' % + (col_name, len(col_name))) tdSql.execute( - 'create table tb (ts timestamp, a0123456789012345678901234567890123456789 int)') - # TSIM: sql drop table $tb - tdLog.info('drop table tb') - tdSql.execute('drop table tb') - # TSIM: - # TSIM: sql show tables - tdLog.info('show tables') - tdSql.query('show tables') - # TSIM: if $rows != 0 then - tdLog.info('tdSql.checkRow(0)') - tdSql.checkRows(0) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: print =============== step5 - tdLog.info('=============== step5') - # TSIM: sql create table $tb (ts timestamp, a0123456789 int) - tdLog.info('create table tb (ts timestamp, a0123456789 int)') - tdSql.execute('create table tb (ts timestamp, a0123456789 int)') - # TSIM: sql show tables - tdLog.info('show tables') - tdSql.query('show tables') - # TSIM: if $rows != 1 then - tdLog.info('tdSql.checkRow(1)') - tdSql.checkRows(1) - # TSIM: return -1 - # TSIM: endi - # TSIM: + 'create table tb (ts timestamp, %s int)' % col_name) + # TSIM: sql insert into $tb values (now , 1) tdLog.info("insert into tb values (now , 1)") tdSql.execute("insert into tb values (now , 1)") @@ -152,24 +117,6 @@ class TDTestCase: # TSIM: if $rows != 1 then tdLog.info('tdSql.checkRow(1)') tdSql.checkRows(1) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: sql drop database $db - tdLog.info('drop database db') - tdSql.execute('drop database db') - # TSIM: sql show databases - tdLog.info('show databases') - tdSql.query('show databases') - # TSIM: if $rows != 0 then - tdLog.info('tdSql.checkRow(0)') - tdSql.checkRows(0) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: - # TSIM: - # TSIM: # convert end def stop(self): diff --git a/tests/pytest/table/column_num.py b/tests/pytest/table/column_num.py index 87299827cccc90150157a2b10f3ea90e563d9b8a..877f0409dc4fa3d723606b62fc939c76809ef054 100644 --- a/tests/pytest/table/column_num.py +++ b/tests/pytest/table/column_num.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import sys +import subprocess from util.log import * from util.cases import * from util.sql import * @@ -14,127 +15,71 @@ class TDTestCase: def run(self): tdSql.prepare() - # TSIM: system sh/stop_dnodes.sh - # TSIM: - # TSIM: system sh/ip.sh -i 1 -s up - # TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1 - # TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0 - # TSIM: system sh/exec.sh -n dnode1 -s start - # TSIM: - # TSIM: sleep 3000 - # TSIM: sql connect - # TSIM: - # TSIM: $i = 0 - # TSIM: $dbPrefix = lm_cn_db - # TSIM: $tbPrefix = lm_cn_tb - # TSIM: $db = $dbPrefix . $i - # TSIM: $tb = $tbPrefix . $i - # TSIM: - # TSIM: print =============== step1 tdLog.info('=============== step1') - # TSIM: sql create database $db - # TSIM: sql use $db - # TSIM: - # TSIM: sql create table $tb() -x step1 tdLog.info('create table tb() -x step1') tdSql.error('create table tb()') - # TSIM: return -1 - # TSIM: step1: - # TSIM: - # TSIM: sql show tables tdLog.info('show tables') tdSql.query('show tables') # TSIM: if $rows != 0 then tdLog.info('tdSql.checkRow(0)') tdSql.checkRows(0) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: print =============== step2 tdLog.info('=============== step2') # TSIM: sql create table $tb (ts timestamp) -x step2 tdLog.info('create table tb (ts timestamp) -x step2') tdSql.error('create table tb (ts timestamp) ') - # TSIM: return -1 - # TSIM: step2: - # TSIM: - # TSIM: sql show tables tdLog.info('show tables') tdSql.query('show tables') # TSIM: if $rows != 0 then tdLog.info('tdSql.checkRow(0)') tdSql.checkRows(0) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: print =============== step3 tdLog.info('=============== step3') - # TSIM: sql create table $tb (ts int) -x step3 tdLog.info('create table tb (ts int) -x step3') tdSql.error('create table tb (ts int) ') - # TSIM: return -1 - # TSIM: step3: - # TSIM: - # TSIM: sql show tables tdLog.info('show tables') tdSql.query('show tables') - # TSIM: if $rows != 0 then tdLog.info('tdSql.checkRow(0)') tdSql.checkRows(0) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: print =============== step4 tdLog.info('=============== step4') - # TSIM: sql create table $tb (ts timestamp, a1 int, a2 int, a3 int, a4 - # int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 - # int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, - # a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 - # int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, - # b4 int, b5 int, b6 int, b7 int, b8 int, b9 int, b10 int, b11 int, b12 - # int, b13 int, b14 int, b15 int, b16 int, b17 int, b18 int, b19 int, - # b20 int, b21 int, b22 int, b23 int, b24 int, b25 int, b26 int, b27 - # int, b28 int,b29 int,b30 int,b31 int,b32 int) tdLog.info('create table tb (ts timestamp, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, b4 int, b5 int, b6 int, b7 int, b8 int, b9 int, b10 int, b11 int, b12 int, b13 int, b14 int, b15 int, b16 int, b17 int, b18 int, b19 int, b20 int, b21 int, b22 int, b23 int, b24 int, b25 int, b26 int, b27 int, b28 int,b29 int,b30 int,b31 int,b32 int)') tdSql.execute('create table tb (ts timestamp, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, b4 int, b5 int, b6 int, b7 int, b8 int, b9 int, b10 int, b11 int, b12 int, b13 int, b14 int, b15 int, b16 int, b17 int, b18 int, b19 int, b20 int, b21 int, b22 int, b23 int, b24 int, b25 int, b26 int, b27 int, b28 int,b29 int,b30 int,b31 int,b32 int)') # TSIM: # TSIM: sql show tables tdLog.info('show tables') tdSql.query('show tables') - # TSIM: if $rows != 1 then tdLog.info('tdSql.checkRow(1)') tdSql.checkRows(1) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: print =============== step5 + tdLog.info('=============== step5') - # TSIM: $i = 1 - # TSIM: $tb = $tbPrefix . $i - # TSIM: - # TSIM: sql create table $tb (ts timestamp, a1 int, a2 int, a3 int, a4 - # int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 - # int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, - # a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 - # int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, - # b4 int, b5 int) - tdLog.info('create table tb1 (ts timestamp, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, b4 int, b5 int)') - tdSql.execute('create table tb1 (ts timestamp, a1 int, a2 int, a3 int, a4 int, a5 int, a6 int, a7 int, a8 int, a9 int, a10 int, a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int,a29 int,a30 int,a31 int,a32 int, b1 int, b2 int, b3 int, b4 int, b5 int)') - # TSIM: - # TSIM: sql show tables + + getMaxColumnNum = "grep -w '#define TSDB_MAX_COLUMNS' ../../src/inc/taosdef.h|awk '{print $3}'" + boundary = int(subprocess.check_output(getMaxColumnNum, shell=True)) + tdLog.info("get max column number is %d" % boundary) + + columnSeq = "ts timestamp" + for x in range(0, boundary): + columnSeq = columnSeq + ", col%d int" % x + + tdLog.info("create table tb1 (%s)" % columnSeq) + tdSql.error('create table tb1 (%s)' % columnSeq) + + columnSeq = "ts timestamp" + for x in range(0, boundary - 1): + columnSeq = columnSeq + ", col%d int" % x + + tdLog.info("create table tb1 (%s)" % columnSeq) + tdSql.execute('create table tb1 (%s)' % columnSeq) + tdLog.info('show tables') tdSql.query('show tables') # TSIM: if $rows != 2 then tdLog.info('tdSql.checkRow(2)') tdSql.checkRows(2) - # TSIM: return -1 - # TSIM: endi - # TSIM: - # TSIM: sql insert into $tb values (now, 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 - # , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 - # , 23 , 24 , 25 ,26 , 27 ,28 ,29,30,31, 32, 33, 34, 35, 36, 37) - tdLog.info("insert into tb1 values (now, 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 ,26 , 27 ,28 ,29,30,31, 32, 33, 34, 35, 36, 37)") - tdSql.execute("insert into tb1 values (now, 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 ,26 , 27 ,28 ,29,30,31, 32, 33, 34, 35, 36, 37)") + + data = "now" + for x in range(0, boundary - 1): + data = data + ", %d" % x + tdLog.info("insert into tb1 values (%s)" % data) + tdSql.execute("insert into tb1 values (%s)" % data) # TSIM: sql select * from $tb tdLog.info('select * from tb1') tdSql.query('select * from tb1') diff --git a/tests/pytest/table/create.py b/tests/pytest/table/create.py new file mode 100644 index 0000000000000000000000000000000000000000..b456b444f4b361784b2bb157e453bf5ae1e57c85 --- /dev/null +++ b/tests/pytest/table/create.py @@ -0,0 +1,48 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + print("==============step1") + print("prepare data") + tdSql.execute("create table db.st (ts timestamp, i int) tags(j int)") + tdSql.execute("create table db.tb using st tags(1)") + tdSql.execute("insert into db.tb values(now, 1)") + + print("==============step2") + print("create table as select") + try: + tdSql.execute("create table db.test as select * from db.st") + except Exception as e: + tdLog.exit(e) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/del_stable.py b/tests/pytest/table/del_stable.py index de30e6bdfe841376d9e6f61037a7fdebe4309fcd..3932f325362457dbd98838863eafddd94716f2f3 100644 --- a/tests/pytest/table/del_stable.py +++ b/tests/pytest/table/del_stable.py @@ -26,9 +26,7 @@ class TDTestCase: def run(self): tdSql.prepare() - tdSql.execute("drop database if exists db") print("==============step1") - tdSql.execute("create database db") tdSql.execute("create table db.st (ts timestamp, i int) tags(j int)") tdSql.execute("create table db.tb using st tags(1)") tdSql.execute("insert into db.tb values(now, 1)") diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..335073065c5a5b6af82469c967bac1df623d5e5a --- /dev/null +++ b/tests/pytest/table/tablename-boundary.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +import sys +import string +import random +import subprocess +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + getTableNameLen = "grep -w '#define TSDB_TABLE_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'" + tableNameMaxLen = int( + subprocess.check_output( + getTableNameLen, shell=True)) + tdLog.info("table name max length is %d" % tableNameMaxLen) + chars = string.ascii_uppercase + string.ascii_lowercase + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) + tdLog.info('tb_name length %d' % len(tb_name)) + tdLog.info('create table %s (ts timestamp, value int)' % tb_name) + tdSql.error( + 'create table %s (ts timestamp, speed binary(4089))' % + tb_name) + + tb_name = ''.join(random.choices(chars, k=191)) + tdLog.info('tb_name length %d' % len(tb_name)) + tdLog.info('create table %s (ts timestamp, value int)' % tb_name) + tdSql.execute( + 'create table %s (ts timestamp, speed binary(4089))' % + tb_name) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/__init__.py b/tests/pytest/tag_lite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/pytest/tag_lite/create-tags-boundary.py b/tests/pytest/tag_lite/create-tags-boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..e80f458f0ccdd7d564143058bfd9b059a93877e9 --- /dev/null +++ b/tests/pytest/tag_lite/create-tags-boundary.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + getMaxTagNum = "grep -w TSDB_MAX_TAGS ../../src/inc/taosdef.h|awk '{print $3}'" + boundary = int(subprocess.check_output(getMaxTagNum, shell=True)) + tdLog.info("get max tags number is %d" % boundary) + for x in range(0, boundary): + stb_name = "stb%d" % x + + tagSeq = "tag0 int" + for y in range(1, x + 1): + tagSeq = tagSeq + ", tag%d int" % y + + tdLog.info( + "create table %s (ts timestamp, value int) tags (%s)" % + (stb_name, tagSeq)) + tdSql.execute( + "create table %s (ts timestamp, value int) tags (%s)" % + (stb_name, tagSeq)) + + tdSql.query("show stables") + tdSql.checkRows(boundary) + + stb_name = "stb%d" % (boundary + 1) + tagSeq = tagSeq + ", tag%d int" % (boundary) + tdLog.info( + "create table %s (ts timestamp, value int) tags (%s)" % + (stb_name, tagSeq)) + tdSql.error( + "create table %s (ts timestamp, value int) tags (%s)" % + (stb_name, tagSeq)) + tdSql.query("show stables") + tdSql.checkRows(boundary) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/datatype-without-alter.py b/tests/pytest/tag_lite/datatype-without-alter.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8d05d648a4c0fe7bb28458f15bd4e996ad946c --- /dev/null +++ b/tests/pytest/tag_lite/datatype-without-alter.py @@ -0,0 +1,98 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + self.ntables = 10 + self.rowsPerTable = 10 + self.startTime = 1520000010000 + + tdDnodes.stop(1) + tdDnodes.deploy(1) + tdDnodes.start(1) + + tdLog.info("================= step0") + tdSql.execute('reset query cache') + tdLog.info("drop database db if exits") + tdSql.execute('drop database if exists db') + tdLog.info("================= step1") + tdSql.execute('create database db maxtables 4') + tdLog.sleep(5) + tdSql.execute('use db') + + tdLog.info("================= step1") + tdLog.info("create 1 super table") + tdSql.execute('create table stb (ts timestamp, i int) \ + tags (tin int, tfl float, tbg bigint, tdo double, tbi binary(10), tbl bool)') + + tdLog.info("================= step2") + tdLog.info("create %d tables" % self.ntables) + for tid in range(1, self.ntables + 1): + tdSql.execute( + 'create table tb%d using stb tags(%d,%f,%ld,%f,\'%s\',%d)' % + (tid, + tid % + 3, + 1.2 * + tid, + self.startTime + + tid, + 1.22 * + tid, + 't' + + str(tid), + tid % + 2)) + tdLog.sleep(5) + + tdLog.info("================= step3") + tdLog.info( + "insert %d data in to each %d tables" % + (self.rowsPerTable, self.ntables)) + for rid in range(1, self.rowsPerTable + 1): + sqlcmd = ['insert into'] + for tid in range(1, self.ntables + 1): + sqlcmd.append( + 'tb%d values(%ld,%d)' % + (tid, self.startTime + rid, rid)) + tdSql.execute(" ".join(sqlcmd)) + tdSql.query('select count(*) from stb') + tdSql.checkData(0, 0, self.rowsPerTable * self.ntables) + + tdLog.info("================= step6") + tdLog.info("group and filter by tag1 int") + tdSql.query('select max(i) from stb where tbl=0 group by tin') + tdSql.checkRows(3) + tdSql.execute('reset query cache') + tdSql.query('select max(i) from stb where tbl=true group by tin') + tdSql.checkData(2, 0, self.rowsPerTable) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/datatype.py b/tests/pytest/tag_lite/datatype.py new file mode 100644 index 0000000000000000000000000000000000000000..1fcf7ce19eab0409fc28b0eed0222c8d5e3a7f5b --- /dev/null +++ b/tests/pytest/tag_lite/datatype.py @@ -0,0 +1,135 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + self.ntables = 10 + self.rowsPerTable = 10 + self.startTime = 1520000010000 + + tdDnodes.stop(1) + tdDnodes.deploy(1) + tdDnodes.start(1) + + tdLog.info("================= step0") + tdSql.execute('reset query cache') + tdLog.info("drop database db if exits") + tdSql.execute('drop database if exists db') + tdLog.info("================= step1") + tdSql.execute('create database db maxtables 4') + tdLog.sleep(5) + tdSql.execute('use db') + + tdLog.info("================= step1") + tdLog.info("create 1 super table") + tdSql.execute('create table stb (ts timestamp, i int) \ + tags (tin int, tfl float, tbg bigint, tdo double, tbi binary(10), tbl bool)') + + tdLog.info("================= step2") + tdLog.info("create %d tables" % self.ntables) + for tid in range(1, self.ntables + 1): + tdSql.execute( + 'create table tb%d using stb tags(%d,%f,%ld,%f,\'%s\',%d)' % + (tid, + tid % + 3, + 1.2 * + tid, + self.startTime + + tid, + 1.22 * + tid, + 't' + + str(tid), + tid % + 2)) + tdLog.sleep(5) + + tdLog.info("================= step3") + tdLog.info( + "insert %d data in to each %d tables" % + (self.rowsPerTable, self.ntables)) + for rid in range(1, self.rowsPerTable + 1): + sqlcmd = ['insert into'] + for tid in range(1, self.ntables + 1): + sqlcmd.append( + 'tb%d values(%ld,%d)' % + (tid, self.startTime + rid, rid)) + tdSql.execute(" ".join(sqlcmd)) + tdSql.query('select count(*) from stb') + tdSql.checkData(0, 0, self.rowsPerTable * self.ntables) + + tdLog.info("================= step4") + tdLog.info("drop one tag") + tdSql.execute('alter table stb drop tag tbi') + tdLog.info("insert %d data in to each %d tables" % (2, self.ntables)) + for rid in range(self.rowsPerTable + 1, self.rowsPerTable + 3): + sqlcmd = ['insert into'] + for tid in range(1, self.ntables + 1): + sqlcmd.append( + 'tb%d values(%ld,%d)' % + (tid, self.startTime + rid, rid)) + tdSql.execute(" ".join(sqlcmd)) + self.rowsPerTable += 2 + tdSql.query('select count(*) from stb') + tdSql.checkData(0, 0, self.rowsPerTable * self.ntables) + tdSql.query('describe tb1') + tdSql.checkRows(2 + 5) + + tdLog.info("================= step5") + tdLog.info("add one tag") + tdSql.execute('alter table stb add tag tnc nchar(10)') + for tid in range(1, self.ntables + 1): + tdSql.execute('alter table tb%d set tag tnc=\"%s\"' % + (tid, str(tid * 1.2))) + tdLog.info("insert %d data in to each %d tables" % (2, self.ntables)) + for rid in range(self.rowsPerTable + 1, self.rowsPerTable + 3): + sqlcmd = ['insert into'] + for tid in range(1, self.ntables + 1): + sqlcmd.append( + 'tb%d values(%ld,%d)' % + (tid, self.startTime + rid, rid)) + tdSql.execute(" ".join(sqlcmd)) + self.rowsPerTable += 2 + tdSql.query('select count(*) from stb') + tdSql.checkData(0, 0, self.rowsPerTable * self.ntables) + tdSql.query('describe tb1') + tdSql.checkRows(2 + 6) + + tdLog.info("================= step6") + tdLog.info("group and filter by tag1 int") + tdSql.query('select max(i) from stb where tbl=0 group by tin') + tdSql.checkRows(3) + tdSql.execute('reset query cache') + tdSql.query('select max(i) from stb where tbl=true group by tin') + tdSql.checkData(2, 0, self.rowsPerTable) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/filter.py b/tests/pytest/tag_lite/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..7d160a1b6115b53a1fb0ce53716b6195e9a8fccb --- /dev/null +++ b/tests/pytest/tag_lite/filter.py @@ -0,0 +1,270 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + #TSIM: system sh/stop_dnodes.sh + #TSIM: system sh/deploy.sh -n dnode1 -i 1 + #TSIM: system sh/exec.sh -n dnode1 -s start + #TSIM: + #TSIM: sleep 3000 + #TSIM: sql connect + #TSIM: + #TSIM: print ======================== dnode1 start + tdLog.info('======================== dnode1 start') + #TSIM: + dbPrefix = "ta_fi_db" + tbPrefix = "ta_fi_tb" + mtPrefix = "ta_fi_mt" + #TSIM: $tbNum = 10 + rowNum = 20 + #TSIM: $totalNum = 200 + #TSIM: + #TSIM: print =============== step1 + tdLog.info('=============== step1') + i = 0 + #TSIM: $db = $dbPrefix . $i + mt = "%s%d" % (mtPrefix, i) + #TSIM: + #TSIM: sql create database $db + #TSIM: sql use $db + #TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10)) + tdLog.info("create table %s (ts timestamp, tbcol int) TAGS(tgcol binary(10))" % mt) + tdSql.execute('create table %s (ts timestamp, tbcol int) TAGS(tgcol binary(10))' % mt) + #TSIM: + i = 0 + while (i < 5): + tb = "tbPrefix%d" % i + tdLog.info("create table %s using %s tags( '0' )" % (tb, mt)) + tdSql.execute("create table %s using %s tags( '0' )" % (tb, mt)) + + x = 0 + while (x < rowNum): + ms = "%dm" % x + tdLog.info("insert into %s values (now + %s , %d)" % (tb, ms, x)) + tdSql.execute("insert into %s values (now + %s , %d)" % (tb, ms, x)) + x = x + 1 + i = i + 1 + + while (i < 10): + tb = "%s%d" % (tbPrefix , i) + #TSIM: sql create table $tb using $mt tags( '1' ) + tdLog.info("create table %s using %s tags( '1' )" % (tb, mt)) + tdSql.execute("create table %s using %s tags( '1' )" % (tb, mt)) + x = 0 + while (x < rowNum): + ms = "%dm" % x + #TSIM: sql insert into $tb values (now + $ms , $x ) + tdLog.info("insert into %s values (now + %s, %d )" % (tb, ms, x)) + tdSql.execute("insert into %s values (now + %s, %d )" % (tb, ms, x)) + x = x + 1 + i = i + 1 + #TSIM: + #TSIM: print =============== step2 + tdLog.info('=============== step2') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = '1'" % mt) + tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = '1'" % mt) + #TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6))) + #TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tg = '1' -x step2 + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tg = '1' -x step2" % mt) + tdSql.error("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tg = '1'" % mt) + #TSIM: return -1 + #TSIM: step2: + #TSIM: + #TSIM: print =============== step3 + tdLog.info('=============== step3') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where noexist = '1' -x step3 + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where noexist = '1' -x step3" % mt) + tdSql.error("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where noexist = '1'" % mt) + #TSIM: return -1 + #TSIM: step3: + #TSIM: + #TSIM: print =============== step4 + tdLog.info('=============== step4') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tbcol = '1'" % mt) + tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tbcol = '1'" % mt) + #TSIM: if $rows != 1 then + tdLog.info('tdSql.checkRow(1)') + tdSql.checkRows(1) + #TSIM: return -1 + #TSIM: endi + #TSIM: if $data00 != 10 then + tdLog.info('tdSql.checkData(0, 0, 10)') + tdSql.checkData(0, 0, 10) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== step5 + tdLog.info('=============== step5') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt) + tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt) + #TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0,0), tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0,5 ), tdSql.getData(0, 6))) + #TSIM: if $data00 != 200 then + tdLog.info('tdSql.checkData(0, 0, 200)') + tdSql.checkData(0, 0, 200) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== step6 + tdLog.info('=============== step6') + #TSIM: sql select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -x step6 + tdLog.info("select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s -x step6" % mt) + tdSql.error("select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt) + #TSIM: return -1 + #TSIM: step6: + #TSIM: + #TSIM: print =============== step7 + tdLog.info('=============== step7') + #TSIM: sql select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from $mt -x step7 + tdLog.info("select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from %s -x step7" % mt) + tdSql.error("select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from %s" % mt) + #TSIM: return -1 + #TSIM: step7: + #TSIM: + #TSIM: print =============== step8 + tdLog.info('=============== step8') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tbcol + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s by tbcol" % mt) + tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tbcol" % mt) + #TSIM: + #TSIM: print =============== step9 + tdLog.info('=============== step9') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by noexist -x step9 + tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by noexist -x step9" % mt) + tdSql.error('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by noexist ' % mt) + #TSIM: return -1 + #TSIM: step9: + #TSIM: + #TSIM: print =============== step10 + tdLog.info('=============== step10') + #TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol + tdLog.info('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % mt) + tdSql.query('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % mt) + #TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 + tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06') + #TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== step11 + tdLog.info('=============== step11') + #TSIM: sql select count(tbcol) as c from $mt group by tbcol + tdLog.info('select count(tbcol) as c from %s group by tbcol' % mt) + tdSql.query('select count(tbcol) as c from %s group by tbcol' % mt) + #TSIM: + #TSIM: print =============== step12 + tdLog.info('=============== step12') + #TSIM: sql select count(tbcol) as c from $mt group by noexist -x step12 + tdLog.info('select count(tbcol) as c from %s group by noexist -x step12' % mt) + tdSql.error('select count(tbcol) as c from %s group by noexist2' % mt) + #TSIM: return -1 + #TSIM: step12: + #TSIM: + #TSIM: print =============== step13 + tdLog.info('=============== step13') + #TSIM: sql select count(tbcol) as c from $mt group by tgcol + tdLog.info('select count(tbcol) as c from %s group by tgcol' % mt) + tdSql.query('select count(tbcol) as c from %s group by tgcol' % mt) + #TSIM: print $data00 + tdLog.info('$data00') + #TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== step14 + tdLog.info('=============== step14') + #TSIM: sql select count(tbcol) as c from $mt where ts > 1000 group by tgcol + tdLog.info('select count(tbcol) as c from %s where ts > 1000 group by tgcol' % mt) + tdSql.query('select count(tbcol) as c from %s where ts > 1000 group by tgcol' % mt) + #TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +# tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6))) + #TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + #TSIM: print expect 100, actual $data00 + tdLog.info('expect 100, actual $data00') + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== step15 + tdLog.info('=============== step15') + #TSIM: sql select count(tbcol) as c from $mt where noexist < 1 group by tgcol -x step15 + tdLog.info('select count(tbcol) as c from %s where noexist < 1 group by tgcol -x step15' % mt) + tdSql.error('select count(tbcol) as c from %s where noexist < 1 group by tgcol5' % mt) + #TSIM: return -1 + #TSIM: step15: + #TSIM: + #TSIM: print =============== step16 + tdLog.info('=============== step16') + #TSIM: sql select count(tbcol) as c from $mt where tgcol = '1' group by tgcol + tdLog.info("select count(tbcol) as c from %s where tgcol = '1' group by tgcol" % mt) + tdSql.query("select count(tbcol) as c from %s where tgcol = '1' group by tgcol" % mt) + #TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +# tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6))) + #TSIM: if $data00 != 100 then + tdLog.info('tdSql.checkData(0, 0, 100)') + tdSql.checkData(0, 0, 100) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: print =============== clear + tdLog.info('=============== clear') + #TSIM: sql drop database $db + tdLog.info('drop database db') + tdSql.execute('drop database db') + #TSIM: sql show databases + tdLog.info('show databases') + tdSql.query('show databases') + #TSIM: if $rows != 0 then + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(0) + #TSIM: return -1 + #TSIM: endi + #TSIM: + #TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT +# convert end + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index a4129be34cdcadf5836359d5100cd0c0603b0c41..9d76b0a70e56d9a749b8ba5de7ca57c896809aa6 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -70,8 +70,6 @@ if __name__ == "__main__": toBeKilled = "valgrind.bin" killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled -# os.system(killCmd) -# time.sleep(1) psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) @@ -81,7 +79,8 @@ if __name__ == "__main__": time.sleep(1) processID = subprocess.check_output(psCmd, shell=True) - tdLog.exit('stop All dnodes') + tdLog.info('stop All dnodes') + sys.exit(0) tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py index 9a4f0fc98a88dc87ff3521d666052e765669b0ae..f65b0dfde3715221b26227f9224656347dc52709 100644 --- a/tests/pytest/util/cases.py +++ b/tests/pytest/util/cases.py @@ -57,7 +57,7 @@ class TDCases: runNum += 1 continue - tdLog.notice("total %d Linux test case(s) executed" % (runNum)) + tdLog.info("total %d Linux test case(s) executed" % (runNum)) def runOneLinux(self, conn, fileName): testModule = self.__dynamicLoadModule(fileName) @@ -71,13 +71,11 @@ class TDCases: case.run() except Exception as e: tdLog.notice(repr(e)) - tdLog.exit("%s failed: %s" % (__file__, fileName)) + tdLog.exit("%s failed" % (fileName)) case.stop() runNum += 1 continue - tdLog.success("total %d Linux test case(s) executed" % (runNum)) - def runAllWindows(self, conn): # TODO: load all Windows cases here runNum = 0 diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 23adab2c474962af95107006c8bb2c342cc688b1..727016adb320e917c4d68b3ed1486577e423b1f9 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -38,9 +38,9 @@ class TDSimClient: tdLog.exit(cmd) def deploy(self): - self.logDir = "%s/sim/psim/log" % (self.path,) - self.cfgDir = "%s/sim/psim/cfg" % (self.path) - self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path) + self.logDir = "%s/pysim/psim/log" % (self.path,) + self.cfgDir = "%s/pysim/psim/cfg" % (self.path) + self.cfgPath = "%s/pysim/psim/cfg/taos.cfg" % (self.path) cmd = "rm -rf " + self.logDir if os.system(cmd) != 0: @@ -100,10 +100,11 @@ class TDDnode: self.valgrind = value def deploy(self): - self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) - self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) - self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index) - self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (self.path, self.index) + self.logDir = "%s/pysim/dnode%d/log" % (self.path, self.index) + self.dataDir = "%s/pysim/dnode%d/data" % (self.path, self.index) + self.cfgDir = "%s/pysim/dnode%d/cfg" % (self.path, self.index) + self.cfgPath = "%s/pysim/dnode%d/cfg/taos.cfg" % ( + self.path, self.index) cmd = "rm -rf " + self.dataDir if os.system(cmd) != 0: @@ -177,21 +178,42 @@ class TDDnode: (self.index, self.cfgPath)) def start(self): - binPath = os.path.dirname(os.path.realpath(__file__)) - binPath = binPath + "/../../../debug/" - binPath = os.path.realpath(binPath) - binPath += "/build/bin/" + selfPath = os.path.dirname(os.path.realpath(__file__)) + binPath = "" + + if ("TDinternal" in selfPath): + projPath = selfPath + "/../../../../" + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("community" not in rootRealPath): + binPath = os.path.join(root, "taosd") + break + else: + projPath = selfPath + "/../../../" + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + binPath = os.path.join(root, "taosd") + break + + if (binPath == ""): + tdLog.exit("taosd not found!s") + else: + tdLog.info("taosd found in %s" % rootRealPath) if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % ( + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" - cmd = "nohup %s %staosd -c %s 2>&1 & " % ( + cmd = "nohup %s %s -c %s 2>&1 & " % ( valgrindCmdline, binPath, self.cfgDir) print(cmd) @@ -201,8 +223,8 @@ class TDDnode: self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - tdLog.debug("wait 4 seconds for the dnode:%d to start." % (self.index)) - time.sleep(4) + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) def stop(self): if self.valgrind == 0: @@ -211,22 +233,17 @@ class TDDnode: toBeKilled = "valgrind.bin" if self.running != 0: - killCmd = "ps -ef|grep -w %s| grep '%s' | grep -v grep | awk '{print $2}' | xargs kill -INT" % ( - toBeKilled, self.cfgDir) - psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -INT %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + self.running = 0 tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) - tdLog.debug( - "wait 2 seconds for the dnode:%d to stop." % - (self.index)) - time.sleep(2) def forcestop(self): if self.valgrind == 0: @@ -235,21 +252,17 @@ class TDDnode: toBeKilled = "valgrind.bin" if self.running != 0: - killCmd = "ps -ef|grep -w %s| grep '%s' | grep -v grep | awk '{print $2}' | xargs kill -KILL" % ( - toBeKilled, self.cfgDir) psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -KILL %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + self.running = 0 tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) - tdLog.debug( - "wait 2 seconds for the dnode:%d to stop." % - (self.index)) - time.sleep(2) def startIP(self): cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) @@ -268,11 +281,11 @@ class TDDnode: tdLog.exit(cmd) def getDnodeRootDir(self, index): - dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index) + dnodeRootDir = "%s/pysim/psim/dnode%d" % (self.path, index) return dnodeRootDir def getDnodesRootDir(self): - dnodesRootDir = "%s/sim/psim" % (self.path) + dnodesRootDir = "%s/pysim/psim" % (self.path) return dnodesRootDir @@ -291,21 +304,21 @@ class TDDnodes: self.dnodes.append(TDDnode(10)) def init(self, path): - killCmd = "ps -ef|grep -w taosd | grep -v grep | awk '{print $2}' | xargs kill -KILL" psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -KILL %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - killCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs kill -KILL" psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -KILL %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") binPath = os.path.dirname(os.path.realpath(__file__)) binPath = binPath + "/../../../debug/" @@ -386,38 +399,38 @@ class TDDnodes: tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) def stopAll(self): - tdLog.debug("stop all dnodes") + tdLog.info("stop all dnodes") for i in range(len(self.dnodes)): self.dnodes[i].stop() psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") if processID: cmd = "sudo systemctl stop taosd" os.system(cmd) # if os.system(cmd) != 0 : # tdLog.exit(cmd) - killCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}' | xargs kill -KILL" psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -KILL %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - killCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs kill -KILL" psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): + killCmd = "kill -KILL %s" % processID os.system(killCmd) time.sleep(1) - processID = subprocess.check_output(psCmd, shell=True) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") # if os.system(cmd) != 0 : # tdLog.exit(cmd) def getDnodesRootDir(self): - dnodesRootDir = "%s/sim" % (self.path) + dnodesRootDir = "%s/pysim" % (self.path) return dnodesRootDir def getSimCfgPath(self): diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 0e7e186206b85f38babe3a9c0aceb42c996c0820..1cc0eddbfc72dd5b266bbf6ac6e975092dc6788e 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -15,6 +15,7 @@ import sys import os import time import datetime +import inspect from util.log import * @@ -44,9 +45,14 @@ class TDSql: except BaseException: expectErrNotOccured = False if expectErrNotOccured: - tdLog.exit("failed: sql:%.40s, expect error not occured" % (sql)) + frame = inspect.stack()[1] + callerModule = inspect.getmodule(frame[0]) + callerFilename = callerModule.__file__ + tdLog.exit( + "%s failed: sql:%s, expect error not occured" % + (callerFilename, sql)) else: - tdLog.info("sql:%.40s, expect error occured" % (sql)) + tdLog.info("sql:%s, expect error occured" % (sql)) def query(self, sql): self.sql = sql @@ -62,64 +68,74 @@ class TDSql: def checkRows(self, expectRows): if self.queryRows != expectRows: + frame = inspect.stack()[1] + callerModule = inspect.getmodule(frame[0]) + callerFilename = callerModule.__file__ tdLog.exit( - "failed: sql:%.40s, queryRows:%d != expect:%d" % - (self.sql, self.queryRows, expectRows)) - tdLog.info("sql:%.40s, queryRows:%d == expect:%d" % + "%s failed: sql:%s, queryRows:%d != expect:%d" % + (callerFilename, self.sql, self.queryRows, expectRows)) + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) def checkData(self, row, col, data): + frame = inspect.stack()[1] + callerModule = inspect.getmodule(frame[0]) + callerFilename = callerModule.__file__ + if row < 0: tdLog.exit( - "failed: sql:%.40s, row:%d is smaller than zero" % - (self.sql, row)) + "%s failed: sql:%s, row:%d is smaller than zero" % + (callerFilename, self.sql, row)) if col < 0: tdLog.exit( - "failed: sql:%.40s, col:%d is smaller than zero" % - (self.sql, col)) - if row >= self.queryRows: + "%s failed: sql:%s, col:%d is smaller than zero" % + (callerFilename, self.sql, col)) + if row > self.queryRows: tdLog.exit( - "failed: sql:%.40s, row:%d is larger than queryRows:%d" % - (self.sql, row, self.queryRows)) - if col >= self.queryCols: + "%s failed: sql:%s, row:%d is larger than queryRows:%d" % + (callerFilename, self.sql, row, self.queryRows)) + if col > self.queryCols: tdLog.exit( - "failed: sql:%.40s, col:%d is larger than queryRows:%d" % - (self.sql, col, self.queryCols)) + "%s failed: sql:%s, col:%d is larger than queryCols:%d" % + (callerFilename, self.sql, col, self.queryCols)) if self.queryResult[row][col] != data: - tdLog.exit( - "failed: sql:%.40s row:%d col:%d data:%s != expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.exit("%s failed: sql:%s row:%d col:%d data:%s != expect:%s" % ( + callerFilename, self.sql, row, col, self.queryResult[row][col], data)) if data is None: - tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) elif isinstance(data, str): - tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) elif isinstance(data, datetime.date): - tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) else: - tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%d" % + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % (self.sql, row, col, self.queryResult[row][col], data)) def getData(self, row, col): + frame = inspect.stack()[1] + callerModule = inspect.getmodule(frame[0]) + callerFilename = callerModule.__file__ + if row < 0: tdLog.exit( - "failed: sql:%.40s, row:%d is smaller than zero" % - (self.sql, row)) + "%s failed: sql:%s, row:%d is smaller than zero" % + (callerFilename, self.sql, row)) if col < 0: tdLog.exit( - "failed: sql:%.40s, col:%d is smaller than zero" % - (self.sql, col)) - if row >= self.queryRows: + "%s failed: sql:%s, col:%d is smaller than zero" % + (callerFilename, self.sql, col)) + if row > self.queryRows: tdLog.exit( - "failed: sql:%.40s, row:%d is larger than queryRows:%d" % - (self.sql, row, self.queryRows)) - if col >= self.queryCols: + "%s failed: sql:%s, row:%d is larger than queryRows:%d" % + (callerFilename, self.sql, row, self.queryRows)) + if col > self.queryCols: tdLog.exit( - "failed: sql:%.40s, col:%d is larger than queryRows:%d" % - (self.sql, col, self.queryCols)) + "%s failed: sql:%s, col:%d is larger than queryCols:%d" % + (callerFilename, self.sql, col, self.queryCols)) return self.queryResult[row][col] def executeTimes(self, sql, times): @@ -137,9 +153,13 @@ class TDSql: def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: - tdLog.exit("failed: sql:%.40s, affectedRows:%d != expect:%d" % - (self.sql, self.affectedRows, expectAffectedRows)) - tdLog.info("sql:%.40s, affectedRows:%d == expect:%d" % + frame = inspect.stack()[1] + callerModule = inspect.getmodule(frame[0]) + callerFilename = callerModule.__file__ + + tdLog.exit("%s failed: sql:%s, affectedRows:%d != expect:%d" % ( + callerFilename, self.sql, self.affectedRows, expectAffectedRows)) + tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) diff --git a/tests/pytest/valgrind-test.sh b/tests/pytest/valgrind-test.sh index 1d65830fa30015a853483d5aab75075fd101139f..55bb03c9664a0a3be0d37ce0d392fc8171386d26 100755 --- a/tests/pytest/valgrind-test.sh +++ b/tests/pytest/valgrind-test.sh @@ -1,35 +1,42 @@ #!/bin/bash -python3 ./test.py $1 -f insert/basic.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/int.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/float.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/bigint.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/bool.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/double.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/smallint.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/tinyint.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/binary.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/date.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f insert/nchar.py -python3 ./test.py $1 -s && sleep 1 +# insert +python3 ./test.py -g -f insert/basic.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/int.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/float.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/bigint.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/bool.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/double.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/smallint.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/tinyint.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/binary.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/date.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/nchar.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f insert/multi.py +python3 ./test.py -g -s && sleep 1 -python3 ./test.py $1 -f table/column_name.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f table/column_num.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f table/db_table.py -python3 ./test.py $1 -s && sleep 1 +# table +python3 ./test.py -g -f table/column_name.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f table/column_num.py +python3 ./test.py -g -s && sleep 1 +python3 ./test.py -g -f table/db_table.py +python3 ./test.py -g -s && sleep 1 -python3 ./test.py $1 -f import_merge/importCacheFileT.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importDataLastSub.py +# import +python3 ./test.py -g -f import_merge/importDataLastSub.py +python3 ./test.py -g -s && sleep 1 + +#tag +python3 ./test.py $1 -f tag_lite/filter.py python3 ./test.py $1 -s && sleep 1 diff --git a/tests/script/general/db/basic4.sim b/tests/script/general/db/basic4.sim index 8494b0358a723589525684785d70c8039d43be03..a0a9aaa627fd643b2863b14a4cacb93f092104b0 100644 --- a/tests/script/general/db/basic4.sim +++ b/tests/script/general/db/basic4.sim @@ -43,9 +43,6 @@ endi if $data01 != 4 then return -1 endi -if $data02 != ready then - return -1 -endi print =============== drop table sql drop table d1.t1 @@ -82,9 +79,6 @@ endi if $data01 != 3 then return -1 endi -if $data02 != ready then - return -1 -endi print =============== drop all table sql drop table d1.t2 diff --git a/tests/script/general/db/basic5.sim b/tests/script/general/db/basic5.sim index 3c59144387c32d46183ab9ef555bff6610193f24..82b9bf9bf4dadf2c0215626d8905a64cc0962fe3 100644 --- a/tests/script/general/db/basic5.sim +++ b/tests/script/general/db/basic5.sim @@ -43,9 +43,6 @@ endi if $data01 != 4 then return -1 endi -if $data02 != ready then - return -1 -endi print =============== drop database sql drop database d1 diff --git a/tests/script/general/db/delete.sim b/tests/script/general/db/delete.sim new file mode 100644 index 0000000000000000000000000000000000000000..f95676088bf8a6b9fa67aad863ddf2054f327f86 --- /dev/null +++ b/tests/script/general/db/delete.sim @@ -0,0 +1,65 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10 + +print ========= start dnodes +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ======== step1 +sql create database db blocks 2 maxtables 1000 +sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int) + +$tbPrefix = db.t +$i = 0 +while $i < 2000 + $tb = $tbPrefix . $i + sql create table $tb using db.mt tags( $i ) + $i = $i + 1 +endw + +sql show db.vgroups +if $rows != 2 then + return -1 +endi + +return +print ======== step2 +sleep 1000 +sql drop database db +sql show databases +if $rows != 0 then + return -1 +endi + +sleep 1000 +sql show dnodes +print dnode1 openVnodes $data2_1 +if $data2_1 != 0 then + return -1 +endi + +print ======= step3 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 1000 +system sh/exec.sh -n dnode1 -s start + +$x = 0 +step3: + $x = $x + 1 + sleep 2000 + if $x == 30 then + return -1 + endi + +sql show mnodes +print dnode1 role $data2_1 +if $data2_1 != master then + goto step3 +endi + +sleep 1000 + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/stable/dnode3.sim b/tests/script/general/stable/dnode3.sim new file mode 100644 index 0000000000000000000000000000000000000000..38d05a924b819acb5551924c613f8b827a322b6e --- /dev/null +++ b/tests/script/general/stable/dnode3.sim @@ -0,0 +1,223 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode2 -c walLevel -v 0 +system sh/cfg.sh -n dnode3 -c walLevel -v 0 +system sh/cfg.sh -n dnode4 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 +system sh/exec.sh -n dnode1 -s start + +sql connect + +sql create dnode $hostname2 +sql create dnode $hostname3 +sql create dnode $hostname4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +$x = 0 +createDnode: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show dnodes; +if $data4_2 == offline then + goto createDnode +endi +if $data4_3 == offline then + goto createDnode +endi +if $data4_4 == offline then + goto createDnode +endi + +print ======================== dnode1 start + +$dbPrefix = r3v3_db +$tbPrefix = r3v3_tb +$mtPrefix = r3v3_mt +$tbNum = 10 +$rowNum = 20 +$totalNum = 200 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql create database $db maxTables 4 +sql use $db +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $val = $x * 60000 + $ms = 1519833600000 + $val + sql insert into $tb values ($ms , $x ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql show vgroups +print vgroups ==> $rows +if $rows != 3 then + return -1 +endi + +sleep 100 + +print =============== step2 +$i = 1 +$tb = $tbPrefix . $i + +sql select count(*) from $tb +print ===> $data00 +if $data00 != $rowNum then + return -1 +endi + +sql select count(tbcol) from $tb +print ===> $data00 +if $data00 != $rowNum then + return -1 +endi + +print =============== step3 +sql select count(tbcol) from $tb where ts <= 1519833840000 +print ===> $data00 +if $data00 != 5 then + return -1 +endi + +print =============== step4 +sql select count(tbcol) as b from $tb +print ===> $data00 +if $data00 != $rowNum then + return -1 +endi + +print =============== step5 +sql select count(tbcol) as b from $tb interval(1m) +print ===> $data01 +if $data01 != 1 then + return -1 +endi + +sql select count(tbcol) as b from $tb interval(1d) +print ===> $data01 +if $data01 != $rowNum then + return -1 +endi + +print =============== step6 +sql select count(tbcol) as b from $tb where ts <= 1519833840000 interval(1m) +print ===> $data01 +if $data01 != 1 then + return -1 +endi +if $rows != 5 then + return -1 +endi + +print =============== step7 +print select count(*) from $mt +sql select count(*) from $mt +print ===> $data00 +if $data00 != $totalNum then + return -1 +endi + +sql select count(tbcol) from $mt +print ===> $data00 +if $data00 != $totalNum then + return -1 +endi + +print =============== step8 +sql select count(tbcol) as c from $mt where ts <= 1519833840000 +print ===> $data00 +if $data00 != 50 then + return -1 +endi + +sql select count(tbcol) as c from $mt where tgcol < 5 +print ===> $data00 +if $data00 != 100 then + return -1 +endi + +sql select count(tbcol) as c from $mt where tgcol < 5 and ts <= 1519833840000 +print ===> $data00 +if $data00 != 25 then + return -1 +endi + +print =============== step9 +sql select count(tbcol) as b from $mt interval(1m) +print ===> $data01 +if $data01 != 10 then + return -1 +endi + +sql select count(tbcol) as b from $mt interval(1d) +print ===> $data01 +if $data01 != 200 then + return -1 +endi + +print =============== step10 +sql select count(tbcol) as b from $mt group by tgcol +print ===> $data00 +if $data00 != $rowNum then + return -1 +endi + +if $rows != $tbNum then + return -1 +endi + +print =============== step11 +sql select count(tbcol) as b from $mt where ts <= 1519833840000 interval(1m) group by tgcol +print ===> $data01 +if $data01 != 1 then + return -1 +endi +if $rows != 50 then + return -1 +endi + +print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT +system sh/exec.sh -n dnode6 -s stop -x SIGINT +system sh/exec.sh -n dnode7 -s stop -x SIGINT +system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/stable/testSuite.sim b/tests/script/general/stable/testSuite.sim index a3696c8706e6c1913d06df31866f9ea0a37620b0..e786ac9ca4d1b3e8aea7cec9625fe2fd1481377b 100644 --- a/tests/script/general/stable/testSuite.sim +++ b/tests/script/general/stable/testSuite.sim @@ -1,4 +1,5 @@ run general/stable/disk.sim +run general/stable/dnode3.sim run general/stable/metrics.sim run general/stable/values.sim run general/stable/vnode3.sim diff --git a/tests/script/general/table/column_value.sim b/tests/script/general/table/column_value.sim index bd98d3b2909eec26ceadbebdf35702f622622c50..72f5faee63f7f3c30505f15c66d2b7296db8c83f 100644 --- a/tests/script/general/table/column_value.sim +++ b/tests/script/general/table/column_value.sim @@ -34,13 +34,12 @@ if $rows != 0 then endi print =============== step2 -sql create table $tb (ts timestamp, speed bigint, v1 binary(1500), v2 binary(1500), v3 binary(1500), v4 binary(500), v5 binary(500)) -x step2 - return -1 -step2: +sql create table $tb (ts timestamp, speed bigint, v1 binary(1500), v2 binary(1500), v3 binary(1500), v4 binary(500), v5 binary(500)) sql show tables -if $rows != 0 then +if $rows != 1 then return -1 endi +sql drop table $tb print =============== step3 sql create table $tb (ts timestamp, speed float, v1 binary(100), v2 binary(100), v3 binary(100), v4 binary(100), v5 binary(100)) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index db7a3d6757cf5eebbfcabd62a4f25da9e282d3ee..272183b26d4c432d80002e454c7809601af75820 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -15,7 +15,7 @@ cd ../../../debug; make ./test.sh -f general/cache/restart_metrics.sim ./test.sh -f general/cache/restart_table.sim -./test.sh -f general/connection/connections.sim +./test.sh -f general/connection/connection.sim ./test.sh -f general/column/commit.sim ./test.sh -f general/column/metrics.sim @@ -49,6 +49,7 @@ cd ../../../debug; make ./test.sh -f general/db/basic3.sim ./test.sh -f general/db/basic4.sim ./test.sh -f general/db/basic5.sim +./test.sh -f general/db/delete.sim ./test.sh -f general/db/delete_reuse1.sim ./test.sh -f general/db/delete_reuse2.sim ./test.sh -f general/db/delete_reusevnode.sim @@ -84,10 +85,10 @@ cd ../../../debug; make ./test.sh -f general/import/basic.sim ./test.sh -f general/import/commit.sim ./test.sh -f general/import/large.sim -#hongze ./test.sh -f general/import/replica1.sim +#liao ./test.sh -f general/import/replica1.sim ./test.sh -f general/insert/basic.sim -#hongze ./test.sh -f general/insert/insert_drop.sim +#liao ./test.sh -f general/insert/insert_drop.sim ./test.sh -f general/insert/query_block1_memory.sim ./test.sh -f general/insert/query_block2_memory.sim ./test.sh -f general/insert/query_block1_file.sim @@ -149,6 +150,7 @@ cd ../../../debug; make #./test.sh -f general/parser/bug.sim ./test.sh -f general/stable/disk.sim +#liao ./test.sh -f general/stable/dnode3.sim ./test.sh -f general/stable/metrics.sim ./test.sh -f general/stable/values.sim ./test.sh -f general/stable/vnode3.sim @@ -239,52 +241,52 @@ cd ../../../debug; make ./test.sh -u -f unique/account/user_create.sim ./test.sh -u -f unique/account/user_len.sim -#./test.sh -u -f unique/big/balance.sim -#slguan ./test.sh -u -f unique/big/maxvnodes.sim +#liao wait ./test.sh -u -f unique/big/balance.sim +#liao wait ./test.sh -u -f unique/big/maxvnodes.sim ./test.sh -u -f unique/big/tcp.sim -##./test.sh -u -f unique/cluster/balance1.sim -##./test.sh -u -f unique/cluster/balance2.sim -##./test.sh -u -f unique/cluster/balance3.sim -#./test.sh -u -f unique/cluster/cache.sim +./test.sh -u -f unique/cluster/balance1.sim +./test.sh -u -f unique/cluster/balance2.sim +./test.sh -u -f unique/cluster/balance3.sim +./test.sh -u -f unique/cluster/cache.sim ./test.sh -u -f unique/column/replica3.sim -#./test.sh -u -f unique/db/commit.sim -#./test.sh -u -f unique/db/delete.sim -#./test.sh -u -f unique/db/delete_part.sim -##./test.sh -u -f unique/db/replica_add12.sim -##./test.sh -u -f unique/db/replica_add13.sim -##./test.sh -u -f unique/db/replica_add23.sim -##./test.sh -u -f unique/db/replica_reduce21.sim -##./test.sh -u -f unique/db/replica_reduce32.sim -##./test.sh -u -f unique/db/replica_reduce31.sim -##./test.sh -u -f unique/db/replica_part.sim +#liao wait ./test.sh -u -f unique/db/commit.sim +./test.sh -u -f unique/db/delete.sim +./test.sh -u -f unique/db/delete_part.sim +./test.sh -u -f unique/db/replica_add12.sim +#hongze ./test.sh -u -f unique/db/replica_add13.sim +#hongze wait ./test.sh -u -f unique/db/replica_add23.sim +#hongze wait ./test.sh -u -f unique/db/replica_reduce21.sim +./test.sh -u -f unique/db/replica_reduce32.sim +#hongze wait /test.sh -u -f unique/db/replica_reduce31.sim +./test.sh -u -f unique/db/replica_part.sim -##./test.sh -u -f unique/dnode/balance1.sim -##./test.sh -u -f unique/dnode/balance2.sim -##./test.sh -u -f unique/dnode/balance3.sim -##./test.sh -u -f unique/dnode/balancex.sim -##./test.sh -u -f unique/dnode/offline1.sim -##./test.sh -u -f unique/dnode/offline2.sim -#./test.sh -u -f unique/dnode/remove1.sim -#./test.sh -u -f unique/dnode/remove2.sim -#./test.sh -u -f unique/dnode/vnode_clean.sim +./test.sh -u -f unique/dnode/balance1.sim +./test.sh -u -f unique/dnode/balance2.sim +./test.sh -u -f unique/dnode/balance3.sim +./test.sh -u -f unique/dnode/balancex.sim +./test.sh -u -f unique/dnode/offline1.sim +#hongze wait ./test.sh -u -f unique/dnode/offline2.sim +./test.sh -u -f unique/dnode/remove1.sim +#hongze ./test.sh -u -f unique/dnode/remove2.sim +./test.sh -u -f unique/dnode/vnode_clean.sim -##./test.sh -u -f unique/http/admin.sim -##./test.sh -u -f unique/http/opentsdb.sim +./test.sh -u -f unique/http/admin.sim +./test.sh -u -f unique/http/opentsdb.sim -#./test.sh -u -f unique/import/replica2.sim -#./test.sh -u -f unique/import/replica3.sim +#liao wait ./test.sh -u -f unique/import/replica2.sim +#liao wait ./test.sh -u -f unique/import/replica3.sim -#./test.sh -u -f unique/metrics/balance_replica1.sim -#./test.sh -u -f unique/metrics/dnode2_stop.sim -#./test.sh -u -f unique/metrics/dnode2.sim -#./test.sh -u -f unique/metrics/dnode3.sim -#./test.sh -u -f unique/metrics/replica2_dnode4.sim -#./test.sh -u -f unique/metrics/replica2_vnode3.sim -#./test.sh -u -f unique/metrics/replica3_dnode6.sim -#./test.sh -u -f unique/metrics/replica3_vnode3.sim +#liao wait ./test.sh -u -f unique/stable/balance_replica1.sim +#liao wait ./test.sh -u -f unique/stable/dnode2_stop.sim +#liao wait ./test.sh -u -f unique/stable/dnode2.sim +#liao wait ./test.sh -u -f unique/stable/dnode3.sim +#liao wait ./test.sh -u -f unique/stable/replica2_dnode4.sim +#liao wait ./test.sh -u -f unique/stable/replica2_vnode3.sim +#liao wait ./test.sh -u -f unique/stable/replica3_dnode6.sim +#liao wait ./test.sh -u -f unique/stable/replica3_vnode3.sim ./test.sh -u -f unique/mnode/mgmt22.sim ./test.sh -u -f unique/mnode/mgmt23.sim @@ -293,16 +295,11 @@ cd ../../../debug; make ./test.sh -u -f unique/mnode/mgmt26.sim ./test.sh -u -f unique/mnode/mgmt33.sim ./test.sh -u -f unique/mnode/mgmt34.sim -#./test.sh -u -f unique/mnode/mgmtr2.sim -#./test.sh -u -f unique/mnode/secondIp.sim +./test.sh -u -f unique/mnode/mgmtr2.sim -##./test.sh -u -f unique/table/delete_part.sim - -#./test.sh -u -f unique/vnode/commit.sim -#./test.sh -u -f unique/vnode/many.sim -#./test.sh -u -f unique/vnode/replica2_basic.sim +./test.sh -u -f unique/vnode/many.sim ./test.sh -u -f unique/vnode/replica2_basic2.sim -#./test.sh -u -f unique/vnode/replica2_repeat.sim -##./test.sh -u -f unique/vnode/replica3_basic.sim -#./test.sh -u -f unique/vnode/replica3_repeat.sim -#./test.sh -u -f unique/vnode/replica3_vgroup.sim +./test.sh -u -f unique/vnode/replica2_repeat.sim +./test.sh -u -f unique/vnode/replica3_basic.sim +./test.sh -u -f unique/vnode/replica3_repeat.sim +./test.sh -u -f unique/vnode/replica3_vgroup.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index bc18479896993fa4a74febfdcd36387a011d95ba..bd85ce9704945c26ea75ba663a66a735038bb971 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -96,10 +96,10 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG -echo "dDebugFlag 199" >> $TAOS_CFG -echo "mDebugFlag 199" >> $TAOS_CFG -echo "sdbDebugFlag 199" >> $TAOS_CFG -echo "rpcDebugFlag 151" >> $TAOS_CFG +echo "dDebugFlag 135" >> $TAOS_CFG +echo "mDebugFlag 135" >> $TAOS_CFG +echo "sdbDebugFlag 135" >> $TAOS_CFG +echo "rpcDebugFlag 135" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 135" >> $TAOS_CFG diff --git a/tests/script/unique/big/maxvnodes.sim b/tests/script/unique/big/maxvnodes.sim index 3015d07b6dfdca7cc67e1621c096d8238b063129..2b56f51e599f72e9c2bf62251e82aff675722af1 100644 --- a/tests/script/unique/big/maxvnodes.sim +++ b/tests/script/unique/big/maxvnodes.sim @@ -1,19 +1,18 @@ system sh/stop_dnodes.sh $totalVnodes = 100 -$minVnodes = 48 -$maxVnodes = 52 +$minVnodes = 50 +$maxVnodes = 50 $maxTables = 4 $totalRows = $totalVnodes * $maxTables system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables +system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 100000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 100000 +system sh/deploy.sh -n dnode2 -i 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v $totalVnodes + print ========== prepare data system sh/exec_up.sh -n dnode1 -s start @@ -44,16 +43,7 @@ if $data00 != $totalRows then return -1 endi -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c walLevel -v 0 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 100 -system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256 -system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 100000 -system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 100000 -system sh/cfg.sh -n dnode2 -c maxShellConns -v 100000 -system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 100000 - -print ========== step2 +print ========== step3 sql create dnode $hostname2 system sh/exec_up.sh -n dnode2 -s start @@ -86,6 +76,8 @@ if $data00 != $totalRows then return -1 endi +return + system sh/exec_up.sh -n dnode1 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/unique/cluster/balance1.sim b/tests/script/unique/cluster/balance1.sim index 10e05971d7f0f01a1641f615cef2097417793e9a..424a80d25aa397837fd4598ccd0a48c77e27f140 100644 --- a/tests/script/unique/cluster/balance1.sim +++ b/tests/script/unique/cluster/balance1.sim @@ -77,7 +77,7 @@ print dnode2 $dnode2Vnodes if $dnode1Vnodes != 2 then return -1 endi -if $dnode2Vnodes != NULL then +if $dnode2Vnodes != null then return -1 endi @@ -146,7 +146,7 @@ print dnode2 $dnode2Vnodes if $dnode1Vnodes != 3 then goto show4 endi -if $dnode2Vnodes != NULL then +if $dnode2Vnodes != null then goto show4 endi @@ -229,7 +229,7 @@ print dnode3 $dnode3Vnodes if $dnode1Vnodes != 3 then goto show8 endi -if $dnode3Vnodes != NULL then +if $dnode3Vnodes != null then goto show8 endi @@ -245,7 +245,7 @@ if $dnode1Role != master then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim index d741e03eec9723a7e191c35138836b784a8658bc..08fdd233e012f89a29bcaca938fb34f73aeb224a 100644 --- a/tests/script/unique/cluster/balance2.sim +++ b/tests/script/unique/cluster/balance2.sim @@ -131,7 +131,7 @@ print dnode3 $dnode3Vnodes if $dnode1Vnodes != 3 then goto show2 endi -if $dnode2Vnodes != NULL then +if $dnode2Vnodes != null then goto show2 endi if $dnode3Vnodes != 3 then @@ -194,7 +194,7 @@ print dnode4 ==> $dnode4Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -231,7 +231,7 @@ endi if $dnode4Vnodes != 3 then goto show4 endi -if $dnode3Vnodes != NULL then +if $dnode3Vnodes != null then goto show4 endi @@ -248,10 +248,10 @@ print dnode4 ==> $dnode4Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi @@ -339,7 +339,7 @@ print dnode4 $dnode4Vnodes $dnode5Vnodes = $data2_5 print dnode5 $dnode5Vnodes -if $dnode1Vnodes != NULL then +if $dnode1Vnodes != null then goto show6 endi if $dnode4Vnodes != 3 then diff --git a/tests/script/unique/cluster/balance3.sim b/tests/script/unique/cluster/balance3.sim index e9847e21dac7af4deeb8e92b8ff59b2b5bc8a821..407adc7f3b61301d6a5d08047160ae0b4d3fac27 100644 --- a/tests/script/unique/cluster/balance3.sim +++ b/tests/script/unique/cluster/balance3.sim @@ -110,7 +110,7 @@ endi if $dnode3Vnodes != 3 then goto show1 endi -if $dnode4Vnodes != NULL then +if $dnode4Vnodes != null then goto show1 endi @@ -166,7 +166,7 @@ print dnode4 $dnode4Vnodes if $dnode1Vnodes != 3 then goto show3 endi -if $dnode2Vnodes != NULL then +if $dnode2Vnodes != null then goto show3 endi if $dnode3Vnodes != 3 then @@ -232,7 +232,7 @@ endi if $dnode5Vnodes != 3 then goto show5 endi -if $dnode3Vnodes != NULL then +if $dnode3Vnodes != null then goto show5 endi if $dnode4Vnodes != 3 then @@ -298,7 +298,7 @@ endi if $dnode6Vnodes != 3 then goto show7 endi -if $dnode4Vnodes != NULL then +if $dnode4Vnodes != null then goto show7 endi diff --git a/tests/script/unique/cluster/cache.sim b/tests/script/unique/cluster/cache.sim index 41f9db69f37c6d2578c27cdbdde8015b5503b385..e23b40782870be00eb3f010cbcdba693e18b19da 100644 --- a/tests/script/unique/cluster/cache.sim +++ b/tests/script/unique/cluster/cache.sim @@ -4,26 +4,18 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode2 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode2 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 2 system sh/cfg.sh -n dnode2 -c httpMaxThreads -v 2 system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode2 -c http -v 1 +system sh/cfg.sh -n dnode1 -c monitor -v 2 +system sh/cfg.sh -n dnode2 -c http -v 1 system sh/cfg.sh -n dnode1 -c enableHttp -v 1 system sh/cfg.sh -n dnode2 -c monitor -v 1 system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 system sh/cfg.sh -n dnode2 -c monitorInterval -v 1 -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 30000 -system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 30000 -system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 30000 -system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 30000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 30000 -system sh/cfg.sh -n dnode2 -c maxShellConns -v 30000 - system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect @@ -45,13 +37,20 @@ sleep 3000 system sh/exec.sh -n dnode2 -s start sql create dnode $hostname2 -sleep 20000 -sql select * from log.dn_192_168_0_1 + +sleep 10000 + +sql show log.tables; +if $rows != 5 then + return -1 +endi + +sql select * from log.dn1 print ===>rows $rows print $data00 $data01 $data02 print $data10 $data11 $data12 print $data20 $data21 $data22 -if $rows < 20 then +if $rows < 10 then return -1 endi diff --git a/tests/script/unique/db/commit.sim b/tests/script/unique/db/commit.sim index 648cd8db2fe592b2405017d9d850c76ffeefb117..5bf6ea6f10bf7e52581785459aa03de321f91c1c 100644 --- a/tests/script/unique/db/commit.sim +++ b/tests/script/unique/db/commit.sim @@ -72,8 +72,8 @@ sql import into tb values (now - 10d , -10 ) sql import into tb values (now - 11d , -11 ) sql select * from tb order by ts desc -print ===> rows $rows -print ===> last $data01 +print ===> rows $rows expect $num +print ===> last $data01 expect $data01 if $rows != $num then return -1 diff --git a/tests/script/unique/db/delete.sim b/tests/script/unique/db/delete.sim index e222db8d70fff0dfb17d2731a346557b5ddb0b8e..5688333d20fc8867de16dbe5cc4705183d12d8df 100644 --- a/tests/script/unique/db/delete.sim +++ b/tests/script/unique/db/delete.sim @@ -1,7 +1,4 @@ system sh/stop_dnodes.sh - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 @@ -18,33 +15,81 @@ system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 10 print ========= start dnodes system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect sql create dnode $hostname2 system sh/exec_up.sh -n dnode2 -s start sql create dnode $hostname3 system sh/exec_up.sh -n dnode3 -s start -sleep 3000 print ======== step1 -sql create database db replica 3 ablocks 2 tblocks 5 maxtables 10000 +sql create database db replica 3 blocks 2 maxtables 1000 sql create table db.mt (ts timestamp, tbcol int) TAGS(tgcol int) $tbPrefix = db.t $i = 0 -while $i < 100000 +while $i < 2000 $tb = $tbPrefix . $i sql create table $tb using db.mt tags( $i ) $i = $i + 1 endw +sleep 2500 + +sql show db.vgroups +if $rows != 2 then + return -1 +endi + print ======== step2 -sleep 1000 sql drop database db sql show databases if $rows != 0 then return -1 endi +sleep 3000 +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +if $data2_1 != 0 then + return -1 +endi +if $data2_2 != 0 then + return -1 +endi +if $data2_3 != 0 then + return -1 +endi + +print ======== step3 + +system sh/exec_up.sh -n dnode1 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT + +sleep 1000 +system sh/exec_up.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start + +$x = 0 +step3: + $x = $x + 1 + sleep 2000 + if $x == 10 then + return -1 + endi + +sql show mnodes +print dnode1 role $data2_1 +if $data2_1 != master then + goto step3 +endi + +sleep 1000 + system sh/exec_up.sh -n dnode1 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/unique/db/delete_part.sim b/tests/script/unique/db/delete_part.sim index 3d1cc5dc633dd143d5cef3397a8e7b234cadaf5f..179d729d8dae2b7b0a8bf890fdfb43e5be76a05a 100644 --- a/tests/script/unique/db/delete_part.sim +++ b/tests/script/unique/db/delete_part.sim @@ -31,44 +31,147 @@ system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 print ========= start dnodes system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect sql create dnode $hostname2 system sh/exec_up.sh -n dnode2 -s start -sleep 3000 $loop = 0 begin: $db = db . $loop - print ======== step1 + print ======== step1 $loop sql create database $db sql use $db - $x = 0 - while $x < 32 - $tb = tb . $x - sql create table $tb (ts timestamp, i int) - sql insert into $tb values(now, $x ) - $x = $x + 1 - endw - - print ======== step2 + sql create table t11 (ts timestamp, i int) + sql insert into t11 values(now, 1 ) + sql create table t12 (ts timestamp, i int) + sql insert into t12 values(now, 1 ) + sql create table t13 (ts timestamp, i int) + sql insert into t13 values(now, 1 ) + sql create table t14 (ts timestamp, i int) + sql insert into t14 values(now, 1 ) + sleep 1200 + + sql create table t21 (ts timestamp, i int) + sql insert into t21 values(now, 1 ) + sql create table t22 (ts timestamp, i int) + sql insert into t22 values(now, 1 ) + sql create table t23 (ts timestamp, i int) + sql insert into t23 values(now, 1 ) + sql create table t24 (ts timestamp, i int) + sql insert into t24 values(now, 1 ) + sleep 1200 + + sql create table t31 (ts timestamp, i int) + sql insert into t31 values(now, 1 ) + sql create table t32 (ts timestamp, i int) + sql insert into t32 values(now, 1 ) + sql create table t33 (ts timestamp, i int) + sql insert into t33 values(now, 1 ) + sql create table t34 (ts timestamp, i int) + sql insert into t34 values(now, 1 ) + sleep 1200 + + sql create table t41 (ts timestamp, i int) + sql insert into t41 values(now, 1 ) + sql create table t42 (ts timestamp, i int) + sql insert into t42 values(now, 1 ) + sql create table t43 (ts timestamp, i int) + sql insert into t43 values(now, 1 ) + sql create table t44 (ts timestamp, i int) + sql insert into t44 values(now, 1 ) + sleep 1200 + + sql create table t51 (ts timestamp, i int) + sql insert into t51 values(now, 1 ) + sql create table t52 (ts timestamp, i int) + sql insert into t52 values(now, 1 ) + sql create table t53 (ts timestamp, i int) + sql insert into t53 values(now, 1 ) + sql create table t54 (ts timestamp, i int) + sql insert into t54 values(now, 1 ) + sleep 1200 + + sql create table t61 (ts timestamp, i int) + sql insert into t61 values(now, 1 ) + sql create table t62 (ts timestamp, i int) + sql insert into t62 values(now, 1 ) + sql create table t63 (ts timestamp, i int) + sql insert into t63 values(now, 1 ) + sql create table t64 (ts timestamp, i int) + sql insert into t64 values(now, 1 ) + sleep 1200 + + sql create table t71 (ts timestamp, i int) + sql insert into t71 values(now, 1 ) + sql create table t72 (ts timestamp, i int) + sql insert into t72 values(now, 1 ) + sql create table t73 (ts timestamp, i int) + sql insert into t73 values(now, 1 ) + sql create table t74 (ts timestamp, i int) + sql insert into t74 values(now, 1 ) + sleep 1200 + + sql create table t81 (ts timestamp, i int) + sql insert into t81 values(now, 1 ) + sql create table t82 (ts timestamp, i int) + sql insert into t82 values(now, 1 ) + sql create table t83 (ts timestamp, i int) + sql insert into t83 values(now, 1 ) + sql create table t84 (ts timestamp, i int) + sql insert into t84 values(now, 1 ) + sleep 1200 + + sql show dnodes + print dnode1 openVnodes $data2_1 + print dnode2 openVnodes $data2_2 + if $data2_1 != 4 then + return -1 + endi + if $data2_2 != 4 then + return -1 + endi + + print ======== step2 $loop + system sh/exec_up.sh -n dnode2 -s stop + sleep 1000 + print ==> drop database $db sql drop database $db - - print ======== step3 - sleep 3000 + print ======== step3 $loop + sleep 2000 system sh/exec_up.sh -n dnode2 -s start - sleep 20000 + sleep 15000 + + sql show dnodes + print dnode1 openVnodes $data2_1 $data4_1 + print dnode2 openVnodes $data2_2 $data4_2 + if $data2_1 != 0 then + return -1 + endi + if $data2_2 != 0 then + return -1 + endi + if $data4_1 != ready then + return -1 + endi + if $data4_2 != ready then + return -1 + endi print ===> test times : $loop - if $loop > 5 then + if $loop > 3 then return 0 endi $loop = $loop + 1 + + sql reset query cache + sleep 1000 goto begin diff --git a/tests/script/unique/db/replica_add13.sim b/tests/script/unique/db/replica_add13.sim index ac7e3f5c5c13dcd4f126456e15124ce3778a85c8..9f66faab0aa60e8493234d5ff2fb933f130a672e 100644 --- a/tests/script/unique/db/replica_add13.sim +++ b/tests/script/unique/db/replica_add13.sim @@ -47,10 +47,10 @@ sql create table d2.t2 (ts timestamp, i int) sql create table d3.t3 (ts timestamp, i int) sql create table d4.t4 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d3.t3 values(now, 1) -sql insert into d4.t4 values(now, 1) +sql insert into d1.t1 values(1589529000011, 1) +sql insert into d2.t2 values(1589529000021, 1) +sql insert into d3.t3 values(1589529000031, 1) +sql insert into d4.t4 values(1589529000041, 1) sql select * from d1.t1 if $rows != 1 then @@ -111,10 +111,10 @@ if $data2_3 != 4 then endi print ======== step4 -sql insert into d1.t1 values(now, 2) -sql insert into d2.t2 values(now, 2) -sql insert into d3.t3 values(now, 2) -sql insert into d4.t4 values(now, 2) +sql insert into d1.t1 values(1589529000012, 2) +sql insert into d2.t2 values(1589529000022, 2) +sql insert into d3.t3 values(1589529000032, 2) +sql insert into d4.t4 values(1589529000042, 2) sql select * from d1.t1 if $rows != 2 then @@ -142,10 +142,10 @@ sleep 1000 system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sleep 5000 -sql insert into d1.t1 values(now, 3) -sql insert into d2.t2 values(now, 3) -sql insert into d3.t3 values(now, 3) -sql insert into d4.t4 values(now, 3) +sql insert into d1.t1 values(1589529000013, 3) +sql insert into d2.t2 values(1589529000023, 3) +sql insert into d3.t3 values(1589529000033, 3) +sql insert into d4.t4 values(1589529000043, 3) sql select * from d1.t1 if $rows != 3 then @@ -173,27 +173,31 @@ sleep 5000 system sh/exec_up.sh -n dnode3 -s stop -x SIGINT sleep 3000 -sql insert into d1.t1 values(now, 4) -sql insert into d2.t2 values(now, 4) -sql insert into d3.t3 values(now, 4) -sql insert into d4.t4 values(now, 4) +sql insert into d1.t1 values(1589529000014, 4) +sql insert into d2.t2 values(1589529000024, 4) +sql insert into d3.t3 values(1589529000034, 4) +sql insert into d4.t4 values(1589529000044, 4) sql select * from d1.t1 +print select * from d1.t1 $rows if $rows != 4 then return -1 endi sql select * from d2.t2 +print select * from d2.t2 $rows if $rows != 4 then return -1 endi sql select * from d3.t3 +print select * from d3.t3 $rows if $rows != 4 then return -1 endi sql select * from d4.t4 +print select * from d4.t4 $rows if $rows != 4 then return -1 endi @@ -204,10 +208,10 @@ sleep 5000 system sh/exec_up.sh -n dnode4 -s stop -x SIGINT sleep 3000 -sql insert into d1.t1 values(now, 5) -sql insert into d2.t2 values(now, 5) -sql insert into d3.t3 values(now, 5) -sql insert into d4.t4 values(now, 5) +sql insert into d1.t1 values(1589529000015, 5) +sql insert into d2.t2 values(1589529000025, 5) +sql insert into d3.t3 values(1589529000035, 5) +sql insert into d4.t4 values(1589529000045, 5) sql select * from d1.t1 if $rows != 5 then @@ -235,10 +239,10 @@ sleep 5000 system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sleep 3000 -sql insert into d1.t1 values(now, 6) -sql insert into d2.t2 values(now, 6) -sql insert into d3.t3 values(now, 6) -sql insert into d4.t4 values(now, 6) +sql insert into d1.t1 values(1589529000016, 6) +sql insert into d2.t2 values(1589529000026, 6) +sql insert into d3.t3 values(1589529000036, 6) +sql insert into d4.t4 values(1589529000046, 6) sql select * from d1.t1 if $rows != 6 then diff --git a/tests/script/unique/db/replica_part.sim b/tests/script/unique/db/replica_part.sim index f0ffb8901526f59ae30f5bd43e89010b2bdf454f..76e3eaabbe1ad7cfa94177186df6e66327f248a7 100644 --- a/tests/script/unique/db/replica_part.sim +++ b/tests/script/unique/db/replica_part.sim @@ -7,9 +7,9 @@ system sh/deploy.sh -n dnode3 -i 3 system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode2 -c wallevel -v 2 system sh/cfg.sh -n dnode3 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numOfMPeers -v 2 -system sh/cfg.sh -n dnode2 -c numOfMPeers -v 2 -system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2 +system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1 +system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1 +system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1 system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4 diff --git a/tests/script/unique/dnode/balance1.sim b/tests/script/unique/dnode/balance1.sim index 34d3310394ea6d071965878a9450fb75c53254c0..9a598e1704815f35929dd123aad7b53594e4a5ee 100644 --- a/tests/script/unique/dnode/balance1.sim +++ b/tests/script/unique/dnode/balance1.sim @@ -53,7 +53,7 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -93,7 +93,7 @@ $x = 0 show4: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -103,7 +103,7 @@ print dnode2 openVnodes $data2_2 if $data2_1 != 2 then goto show4 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show4 endi if $rows != 1 then @@ -131,7 +131,7 @@ print dnode3 openVnodes $data2_3 if $data2_1 != 0 then goto show5 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show5 endi if $data2_3 != 2 then @@ -155,7 +155,7 @@ print dnode3 openVnodes $data2_3 if $data2_1 != 0 then return -1 endi -if $data2_2 != NULL then +if $data2_2 != null then return -1 endi if $data2_3 != 3 then @@ -170,7 +170,7 @@ $x = 0 show7: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -182,7 +182,7 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 0 then goto show7 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show7 endi if $data2_3 != 2 then @@ -210,7 +210,7 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 0 then return -1 endi -if $data2_2 != NULL then +if $data2_2 != null then return -1 endi if $data2_3 != 2 then @@ -227,7 +227,7 @@ $x = 0 show9: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -240,10 +240,10 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 0 then goto show9 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show9 endi -if $data2_3 != NULL then +if $data2_3 != null then goto show9 endi if $data2_4 != 4 then diff --git a/tests/script/unique/dnode/balance2.sim b/tests/script/unique/dnode/balance2.sim index 9786a854a5845ba4981f49265b98394893f4ba8c..f039579012798fb338e1b742f1c76d1a8ccfa6bd 100644 --- a/tests/script/unique/dnode/balance2.sim +++ b/tests/script/unique/dnode/balance2.sim @@ -65,7 +65,7 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -76,7 +76,7 @@ print dnode3 openVnodes $data2_3 if $data2_1 != 2 then goto show2 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show2 endi if $data2_3 != 2 then @@ -105,7 +105,7 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 0 then goto show3 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show3 endi if $data2_3 != 2 then @@ -132,7 +132,7 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 0 then return -1 endi -if $data2_2 != NULL then +if $data2_2 != null then return -1 endi if $data2_3 != 3 then @@ -150,7 +150,7 @@ $x = 0 show5: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -163,7 +163,7 @@ print dnode5 openVnodes $data2_5 if $data2_1 != 0 then goto show5 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show5 endi if $data2_3 != 2 then @@ -183,7 +183,7 @@ $x = 0 show6: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -196,10 +196,10 @@ print dnode5 openVnodes $data2_5 if $data2_1 != 0 then goto show6 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show6 endi -if $data2_3 != NULL then +if $data2_3 != null then goto show6 endi if $data2_4 != 3 then diff --git a/tests/script/unique/dnode/balance3.sim b/tests/script/unique/dnode/balance3.sim index 6d5bbc77a53cbd72a58d97506bdcd7ee0efcbaf2..acb0d033d4a21ed7bb262b9437cdb4d404fe437f 100644 --- a/tests/script/unique/dnode/balance3.sim +++ b/tests/script/unique/dnode/balance3.sim @@ -88,7 +88,7 @@ print dnode4 openVnodes $data2_4 if $data2_1 != 2 then goto show2 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show2 endi if $data2_3 != 2 then @@ -122,7 +122,7 @@ print dnode5 openVnodes $data2_5 if $data2_1 != 0 then goto show3 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show3 endi if $data2_3 != 2 then @@ -162,7 +162,7 @@ print dnode5 openVnodes $data2_5 if $data2_1 != 0 then goto show4 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show4 endi if $data2_3 != 3 then @@ -228,7 +228,7 @@ endi if $data2_6 != 3 then goto show6 endi -if $data2_3 != NULL then +if $data2_3 != null then goto show6 endi if $data2_4 != 3 then diff --git a/tests/script/unique/dnode/balancex.sim b/tests/script/unique/dnode/balancex.sim index 4b46db4b4900a3da083eb8c9e2eee25516363cb2..202c9b5396206815e8bf4480505542fff6ea78eb 100644 --- a/tests/script/unique/dnode/balancex.sim +++ b/tests/script/unique/dnode/balancex.sim @@ -50,7 +50,7 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -77,7 +77,7 @@ $x = 0 show3: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes @@ -122,7 +122,7 @@ $x = 0 show5: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes @@ -132,7 +132,7 @@ print dnode3 openVnodes $data2_3 if $data2_1 != 1 then goto show5 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show5 endi if $data2_3 != 3 then diff --git a/tests/script/unique/dnode/offline1.sim b/tests/script/unique/dnode/offline1.sim index 4d67b5f55ca9ef1d8bd27e8eab1688ed600cf598..5e4ab65be37e90a4cf6737d6fa7b485c16dd61ae 100644 --- a/tests/script/unique/dnode/offline1.sim +++ b/tests/script/unique/dnode/offline1.sim @@ -63,7 +63,7 @@ print dnode1 $data4_2 if $data4_1 != ready then return -1 endi -if $data4_2 != NULL then +if $data4_2 != null then return -1 endi diff --git a/tests/script/unique/dnode/offline2.sim b/tests/script/unique/dnode/offline2.sim index 6aa85465dded7acbe45d2071e8970eab1060b38e..c526e45b6e44e3a7fc387e0b2430a52e85806829 100644 --- a/tests/script/unique/dnode/offline2.sim +++ b/tests/script/unique/dnode/offline2.sim @@ -82,15 +82,18 @@ $x = 0 show4: $x = $x + 1 sleep 5000 - if $x == 50 then + if $x == 10 then return -1 endi sql show dnodes +print dnode1 $data4_1 +print dnode2 $data4_2 +print dnode3 $data4_3 if $data4_1 != ready then goto show4 endi -if $data4_2 != NULL then +if $data4_2 != null then goto show4 endi if $data4_3 != ready then diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim index 545c28a4ea14cd3c1d29a4d19a3011158f522850..6b23014b0348c9149bd301cf97dd1b7e7210c6c3 100644 --- a/tests/script/unique/dnode/remove1.sim +++ b/tests/script/unique/dnode/remove1.sim @@ -17,8 +17,8 @@ system sh/cfg.sh -n dnode4 -c wallevel -v 1 print ========== step1 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect sql create database d1 maxTables 4 sql create table d1.t1 (t timestamp, i int) @@ -59,17 +59,17 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 -if $data2_1 != 3 then +if $data2_1 != 1 then goto show2 endi -if $data2_2 != 1 then +if $data2_2 != 3 then goto show2 endi @@ -81,7 +81,7 @@ $x = 0 show3: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -97,7 +97,7 @@ $x = 0 show4: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -105,7 +105,7 @@ sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 -if $data2_2 != NULL then +if $data2_2 != null then goto show4 endi @@ -118,8 +118,8 @@ system sh/exec_up.sh -n dnode4 -s start $x = 0 show5: $x = $x + 1 - sleep 3000 - if $x == 20 then + sleep 2000 + if $x == 10 then return -1 endi sql show dnodes @@ -127,10 +127,10 @@ print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 print dnode4 openVnodes $data2_4 -if $data2_1 != 4 then +if $data2_1 != 0 then goto show5 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show5 endi if $data2_3 != 2 then diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim index 972b77a35d240cd3f491c156daa310e8279780c2..77ec1fa630d409b391f72b7733959c1eee29059d 100644 --- a/tests/script/unique/dnode/remove2.sim +++ b/tests/script/unique/dnode/remove2.sim @@ -10,15 +10,15 @@ system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode1 -c wallevel -v 1 -system sh/cfg.sh -n dnode2 -c wallevel -v 1 -system sh/cfg.sh -n dnode3 -c wallevel -v 1 -system sh/cfg.sh -n dnode4 -c wallevel -v 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/cfg.sh -n dnode2 -c wallevel -v 2 +system sh/cfg.sh -n dnode3 -c wallevel -v 2 +system sh/cfg.sh -n dnode4 -c wallevel -v 2 print ========== step1 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect sql create database d1 maxTables 4 sql create table d1.t1 (t timestamp, i int) @@ -59,46 +59,51 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 -if $data2_1 != 3 then +if $data2_1 != 1 then goto show2 endi -if $data2_2 != 1 then +if $data2_2 != 3 then goto show2 endi print ========== step3 system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sql drop dnode $hostname2 -sleep 7001 +sleep 4000 -$x = 0 -show3: - $x = $x + 1 - sleep 2000 - if $x == 30 then - return -1 - endi - sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 $data5_192.168.0.2 +print dnode2 openVnodes $data2_2 print ========== step4 sql create dnode $hostname3 system sh/exec_up.sh -n dnode3 -s start +sleep 5000 + +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +if $data2_3 != 0 then + return -1 +endi + +print ============ step 4.1 +system sh/exec_up.sh -n dnode2 -s start + $x = 0 show4: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi @@ -106,13 +111,13 @@ sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 -if $data2_2 != NULL then +if $data2_2 != null then goto show4 endi -if $data2_1 != 3 then +if $data2_1 != 1 then goto show4 endi -if $data2_3 != 1 then +if $data2_3 != 3 then goto show4 endi diff --git a/tests/script/unique/dnode/vnode_clean.sim b/tests/script/unique/dnode/vnode_clean.sim index da34c7bc9b3b26f0a6bad497ae02ea253b97bb7a..d46e1a751ed788249f158e519ef901cdf46b886c 100644 --- a/tests/script/unique/dnode/vnode_clean.sim +++ b/tests/script/unique/dnode/vnode_clean.sim @@ -29,7 +29,7 @@ sql insert into d1.t1 values(now+5s, 11) sql show dnodes print dnode1 openVnodes $data2_1 -if $data2_1 != 3 then +if $data2_1 != 1 then return -1 endi @@ -41,16 +41,16 @@ $x = 0 show2: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 -if $data2_1 != 4 then +if $data2_1 != 0 then goto show2 endi -if $data2_2 != 3 then +if $data2_2 != 1 then goto show2 endi @@ -68,7 +68,7 @@ $x = 0 sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 -if $data2_1 != 4 then +if $data2_1 != 0 then return -1 endi if $data2_2 != 2 then @@ -82,7 +82,7 @@ $x = 0 show4: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes @@ -91,7 +91,7 @@ print dnode2 openVnodes $data2_2 if $data2_1 != 2 then goto show4 endi -if $data2_2 != NULL then +if $data2_2 != null then goto show4 endi if $rows != 1 then @@ -102,13 +102,8 @@ system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step5 sleep 2000 -sql create dnode $hostname2 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode2 -c wallevel -v 1 -system sh/exec_up.sh -n dnode2 -s start +sql create dnode $hostname3 +system sh/exec_up.sh -n dnode3 -s start $x = 0 show5: @@ -119,11 +114,11 @@ show5: endi sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 -if $data2_1 != 4 then +print dnode3 openVnodes $data2_3 +if $data2_1 != 0 then goto show5 endi -if $data2_2 != 2 then +if $data2_3 != 2 then goto show5 endi @@ -138,17 +133,17 @@ sql insert into d3.t3 values(now+5s, 31) sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 -if $data2_1 != 4 then +print dnode2 openVnodes $data2_3 +if $data2_1 != 0 then return -1 endi -if $data2_2 != 1 then +if $data2_3 != 3 then return -1 endi print ========== step7 -sql create dnode $hostname3 -system sh/exec_up.sh -n dnode3 -s start +sql create dnode $hostname4 +system sh/exec_up.sh -n dnode4 -s start $x = 0 show7: @@ -160,15 +155,15 @@ show7: sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 -if $data2_1 != 4 then +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then goto show7 endi -if $data2_2 != 2 then +if $data2_3 != 2 then goto show7 endi -if $data2_3 != 3 then +if $data2_4 != 1 then goto show7 endi @@ -185,49 +180,49 @@ $x = 0 show8: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 -if $data2_1 != 4 then +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then goto show8 endi -if $data2_2 != 2 then +if $data2_3 != 2 then goto show8 endi -if $data2_3 != 2 then +if $data2_4 != 2 then goto show8 endi print ========== step9 -sql drop dnode $hostname2 +sql drop dnode $hostname3 $x = 0 show9: $x = $x + 1 sleep 2000 - if $x == 30 then + if $x == 10 then return -1 endi sql show dnodes print dnode1 openVnodes $data2_1 -print dnode2 openVnodes $data2_2 print dnode3 openVnodes $data2_3 -if $data2_1 != 4 then +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then goto show9 endi -if $data2_2 != NULL then +if $data2_3 != null then goto show9 endi -if $data2_3 != 0 then +if $data2_4 != 4 then goto show9 endi -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT print ========== step10 sql select * from d1.t1 order by t desc diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim index 914838361ae8dddcddf5d5e2cf21fcd31fb020cb..4e126c4e6098bcc1fda0a47929197569e4a7b5c7 100644 --- a/tests/script/unique/http/opentsdb.sim +++ b/tests/script/unique/http/opentsdb.sim @@ -1,7 +1,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c http -v 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 +system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135 system sh/exec_up.sh -n dnode1 -s start sleep 3000 @@ -12,7 +14,7 @@ print ============================ dnode1 start print =============== step1 - parse system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/ print $system_content -if $system_content != @{"status":"error","code":1057,"desc":"database name can not be NULL"}@ then +if $system_content != @{"status":"error","code":1057,"desc":"database name can not be null"}@ then return -1 endi @@ -24,7 +26,7 @@ endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/ print $system_content -if $system_content != @{"status":"error","code":1057,"desc":"database name can not be NULL"}@ then +if $system_content != @{"status":"error","code":1057,"desc":"database name can not be null"}@ then return -1 endi @@ -73,7 +75,7 @@ endi system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put print $system_content -if $system_content != @{"status":"error","code":1065,"desc":"metric name length can not more than 22"}@ then +if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":-2147483389}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi @@ -123,13 +125,13 @@ endi system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put print $system_content -if $system_content != @{"status":"error","code":1071,"desc":"tags size too long"}@ then +if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147483445}}],"failed":1,"success":0,"affected_rows":0}@ then return -1 endi system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:6020/opentsdb/db/put print $system_content -if $system_content != @{"status":"error","code":1073,"desc":"tag name is NULL"}@ then +if $system_content != @{"status":"error","code":1073,"desc":"tag name is null"}@ then return -1 endi @@ -147,7 +149,7 @@ endi system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:6020/opentsdb/db/put print $system_content -if $system_content != @{"status":"error","code":1076,"desc":"tag value is NULL"}@ then +if $system_content != @{"status":"error","code":1076,"desc":"tag value is null"}@ then return -1 endi @@ -162,7 +164,7 @@ endi system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":0,"status":"succ"}}],"failed":0,"success":1,"affected_rows":0}@ then +if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then return -1 endi diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim index 56a4b305737cf0fb22a0aafbb7ae5357f425bd28..625e42a334dc9de7cb85111e17257878e8d2552c 100644 --- a/tests/script/unique/mnode/mgmtr2.sim +++ b/tests/script/unique/mnode/mgmtr2.sim @@ -9,8 +9,8 @@ system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2 print ============== step1 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect sql show mnodes $dnode1Role = $data2_1 @@ -23,16 +23,15 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi print ============== step2 sql create dnode $hostname2 -sleep 1700 sql create dnode $hostname3 print ============== step3 @@ -68,10 +67,10 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $rows != 2 then +if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi diff --git a/tests/script/unique/mnode/secondIp.sim b/tests/script/unique/mnode/secondIp.sim deleted file mode 100644 index cfe75ffc842859d325ee589055513466f02ceb5c..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/secondIp.sim +++ /dev/null @@ -1,44 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -print ========== step1 dnode2 start -system sh/exec_up.sh -n dnode2 -s start -sql connect - -print ========== step2 connect to dnode2 -sql create dnode $hostname1 -system sh/exec_up.sh -n dnode1 -s start -sleep 3000 - -print ========== step3 -sql show dnodes -print dnode1 openvnodes $data3_1 -print dnode2 openvnodes $data3_2 -print dnode1 totalvnodes $data4_1 -print dnode2 totalvnodes $data4_2 - -if $rows != 2 then - return -1 -endi -if $data3_1 != 0 then - return -1 -endi -if $data3_2 != 0 then - return -1 -endi -if $data4_1 != 4 then - return -1 -endi -if $data4_2 != 4 then - return -1 -endi - -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT -system sh/exec_up.sh -n dnode3 -s stop -x SIGINT -system sh/exec_up.sh -n dnode4 -s stop -x SIGINT -system sh/exec_up.sh -n dnode5 -s stop -x SIGINT -system sh/exec_up.sh -n dnode6 -s stop -x SIGINT -system sh/exec_up.sh -n dnode7 -s stop -x SIGINT -system sh/exec_up.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim index 34d3ce7e53ddbda0ab328324a1a1a622a0941e9b..33df24b860d90ffd9d168a6788998b0955b2f347 100644 --- a/tests/script/unique/mnode/testSuite.sim +++ b/tests/script/unique/mnode/testSuite.sim @@ -5,5 +5,4 @@ run unique/mnode/mgmt25.sim run unique/mnode/mgmt26.sim run unique/mnode/mgmt33.sim run unique/mnode/mgmt34.sim -#run unique/mnode/mgmtr2.sim -#run unique/mnode/secondIp.sim +run unique/mnode/mgmtr2.sim diff --git a/tests/script/unique/metrics/balance_replica1.sim b/tests/script/unique/stable/balance_replica1.sim similarity index 95% rename from tests/script/unique/metrics/balance_replica1.sim rename to tests/script/unique/stable/balance_replica1.sim index 52bf558faa46395682e4bad34073f9e3f8fce286..8cf41319a020d201af62d4a775737aac679f9bba 100644 --- a/tests/script/unique/metrics/balance_replica1.sim +++ b/tests/script/unique/stable/balance_replica1.sim @@ -1,7 +1,4 @@ system sh/stop_dnodes.sh - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 @@ -26,7 +23,7 @@ $totalNum = 200 print ============== step1 print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect $i = 0 @@ -81,7 +78,7 @@ if $dnode2Vnodes != NULL then endi print =============== step3 start dnode2 sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 8000 $x = 0 diff --git a/tests/script/unique/metrics/dnode2.sim b/tests/script/unique/stable/dnode2.sim similarity index 96% rename from tests/script/unique/metrics/dnode2.sim rename to tests/script/unique/stable/dnode2.sim index cd76b39ba0dfaa49495c513bfcc2e7a748fd1ab1..2d894c6542f9ff710680641b82a8a5464285d745 100644 --- a/tests/script/unique/metrics/dnode2.sim +++ b/tests/script/unique/stable/dnode2.sim @@ -1,7 +1,4 @@ system sh/stop_dnodes.sh - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode1 -c walLevel -v 0 @@ -10,12 +7,12 @@ system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 createDnode: @@ -25,7 +22,7 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi diff --git a/tests/script/unique/metrics/dnode2_stop.sim b/tests/script/unique/stable/dnode2_stop.sim similarity index 92% rename from tests/script/unique/metrics/dnode2_stop.sim rename to tests/script/unique/stable/dnode2_stop.sim index 996482d11ebcfda55a1734bf233bd6e21ae8a44c..55ad5fcf094bb35cd399762bbce23e3b0b390d4b 100644 --- a/tests/script/unique/metrics/dnode2_stop.sim +++ b/tests/script/unique/stable/dnode2_stop.sim @@ -1,7 +1,4 @@ system sh/stop_dnodes.sh - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode1 -c walLevel -v 0 @@ -10,11 +7,11 @@ system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 createDnode: @@ -24,7 +21,7 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi @@ -76,7 +73,7 @@ endi sleep 100 -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print =============== step2 sql select count(*) from $mt -x step2 @@ -87,7 +84,7 @@ sql select count(tbcol) from $mt -x step21 return -1 step21: -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 10000 print =============== step3 diff --git a/tests/script/unique/metrics/dnode3.sim b/tests/script/unique/stable/dnode3.sim similarity index 92% rename from tests/script/unique/metrics/dnode3.sim rename to tests/script/unique/stable/dnode3.sim index 3a5419dff4e579507d9b7eebb72d5bc6921a5c2d..c9dd31f6f80421791f4b2fecf34d58e2205383d8 100644 --- a/tests/script/unique/metrics/dnode3.sim +++ b/tests/script/unique/stable/dnode3.sim @@ -1,8 +1,4 @@ system sh/stop_dnodes.sh - - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 @@ -15,14 +11,14 @@ system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sql create dnode $hostname3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 createDnode: @@ -32,10 +28,10 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi -if $data4_192.168.0.3 == offline then +if $data4_3 == offline then goto createDnode endi @@ -81,11 +77,11 @@ if $rows != 3 then endi sql show dnodes -$dnode1Vnodes = $data3_192.168.0.1 +$dnode1Vnodes = $data2_1 print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 +$dnode2Vnodes = $data2_2 print dnode2 $dnode2Vnodes -$dnode3Vnodes = $data3_192.168.0.3 +$dnode3Vnodes = $data2_3 print dnode3 $dnode3Vnodes if $dnode1Vnodes != 3 then diff --git a/tests/script/unique/metrics/replica2_dnode4.sim b/tests/script/unique/stable/replica2_dnode4.sim similarity index 93% rename from tests/script/unique/metrics/replica2_dnode4.sim rename to tests/script/unique/stable/replica2_dnode4.sim index bedc895dd180dcb14a0adf34a91361ad01b0c723..f204e059491b63e79008a9d54b2e1934b9f28072 100644 --- a/tests/script/unique/metrics/replica2_dnode4.sim +++ b/tests/script/unique/stable/replica2_dnode4.sim @@ -1,9 +1,4 @@ system sh/stop_dnodes.sh - - - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 @@ -20,16 +15,16 @@ system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 sql create dnode $hostname3 sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 createDnode: @@ -39,13 +34,13 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi -if $data4_192.168.0.3 == offline then +if $data4_3 == offline then goto createDnode endi -if $data4_192.168.0.4 == offline then +if $data4_4 == offline then goto createDnode endi diff --git a/tests/script/unique/metrics/replica2_vnode3.sim b/tests/script/unique/stable/replica2_vnode3.sim similarity index 96% rename from tests/script/unique/metrics/replica2_vnode3.sim rename to tests/script/unique/stable/replica2_vnode3.sim index 9a1d3477be9344d8451eec158e32209ca1b7b40d..238e2e4aee36c3311cb57a6c16e270db1a9e8195 100644 --- a/tests/script/unique/metrics/replica2_vnode3.sim +++ b/tests/script/unique/stable/replica2_vnode3.sim @@ -1,7 +1,4 @@ system sh/stop_dnodes.sh - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode1 -c walLevel -v 0 @@ -10,11 +7,11 @@ system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 createDnode: @@ -24,7 +21,7 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi diff --git a/tests/script/unique/metrics/replica3_dnode6.sim b/tests/script/unique/stable/replica3_dnode6.sim similarity index 91% rename from tests/script/unique/metrics/replica3_dnode6.sim rename to tests/script/unique/stable/replica3_dnode6.sim index 135e594cdcfdc5f9e85cab1aeb531f6349eafa81..1c8a8ae10ecb817fef4a1f72518caa30c4da197d 100644 --- a/tests/script/unique/metrics/replica3_dnode6.sim +++ b/tests/script/unique/stable/replica3_dnode6.sim @@ -1,12 +1,4 @@ system sh/stop_dnodes.sh - - - - - - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 @@ -35,7 +27,7 @@ system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode6 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 @@ -43,11 +35,11 @@ sql create dnode $hostname3 sql create dnode $hostname4 sql create dnode $hostname5 sql create dnode $hostname6 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start -system sh/exec.sh -n dnode6 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode5 -s start +system sh/exec_up.sh -n dnode6 -s start $x = 0 createDnode: @@ -57,19 +49,19 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi -if $data4_192.168.0.3 == offline then +if $data4_3 == offline then goto createDnode endi -if $data4_192.168.0.4 == offline then +if $data4_4 == offline then goto createDnode endi -if $data4_192.168.0.5 == offline then +if $data4_5 == offline then goto createDnode endi -if $data4_192.168.0.6 == offline then +if $data4_6 == offline then goto createDnode endi diff --git a/tests/script/unique/metrics/replica3_vnode3.sim b/tests/script/unique/stable/replica3_vnode3.sim similarity index 93% rename from tests/script/unique/metrics/replica3_vnode3.sim rename to tests/script/unique/stable/replica3_vnode3.sim index ca147c1ef5af4b74462267a6554589e8da01f24c..75870af4c43de82dc99ba375a0035040bf249d9e 100644 --- a/tests/script/unique/metrics/replica3_vnode3.sim +++ b/tests/script/unique/stable/replica3_vnode3.sim @@ -1,9 +1,5 @@ system sh/stop_dnodes.sh - - - - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 @@ -20,16 +16,16 @@ system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode $hostname2 sql create dnode $hostname3 sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 createDnode: $x = $x + 1 @@ -38,13 +34,13 @@ createDnode: return -1 endi sql show dnodes; -if $data4_192.168.0.2 == offline then +if $data4_2 == offline then goto createDnode endi -if $data4_192.168.0.3 == offline then +if $data4_3 == offline then goto createDnode endi -if $data4_192.168.0.4 == offline then +if $data4_4 == offline then goto createDnode endi @@ -144,6 +140,7 @@ if $rows != 5 then endi print =============== step7 +print select count(*) from $mt sql select count(*) from $mt print ===> $data00 if $data00 != $totalNum then @@ -208,7 +205,7 @@ endi if $rows != 50 then return -1 endi -return + print =============== clear sql drop database $db sql show databases diff --git a/tests/script/unique/metrics/testSuite.sim b/tests/script/unique/stable/testSuite.sim similarity index 100% rename from tests/script/unique/metrics/testSuite.sim rename to tests/script/unique/stable/testSuite.sim diff --git a/tests/script/unique/table/back_insert.sim b/tests/script/unique/table/back_insert.sim deleted file mode 100644 index 43831cca95b49218719c5172c3d2d96ad3500896..0000000000000000000000000000000000000000 --- a/tests/script/unique/table/back_insert.sim +++ /dev/null @@ -1,7 +0,0 @@ -sql connect -$x = 1 -begin: - sql insert into db.tb values(now, $x ) -x begin - #print ===> insert successed $x - $x = $x + 1 -goto begin \ No newline at end of file diff --git a/tests/script/unique/table/delete_part.sim b/tests/script/unique/table/delete_part.sim deleted file mode 100644 index 04cb03598ccd207b8790d3262f8271ecff743812..0000000000000000000000000000000000000000 --- a/tests/script/unique/table/delete_part.sim +++ /dev/null @@ -1,85 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode2 -c walLevel -v 0 -system sh/cfg.sh -n dnode3 -c walLevel -v 0 -system sh/cfg.sh -n dnode4 -c walLevel -v 0 - -system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1 -system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1 -system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1 -system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1 - -system sh/cfg.sh -n dnode1 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4 - -system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4 -system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4 -system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 -system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 - -print ========= start dnodes -system sh/exec.sh -n dnode1 -s start -sql connect -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 3000 - -sql create database dpdb -sql use dpdb - -$loop = 0 -begin: - - print ======== step1 - - $x = 0 - while $x < 16 - $tb = tb . $x - sql create table $tb (ts timestamp, i int) - sql insert into $tb values(now, $x ) - $x = $x + 1 - endw - - print ======== step2 - system sh/exec.sh -n dnode2 -s stop - $x = 0 - while $x < 16 - $tb = tb . $x - sql drop table $tb - $x = $x + 1 - endw - - print ======== step3 - sleep 2000 - system sh/exec.sh -n dnode2 -s start - sleep 3000 - - print ===> test times : $loop - if $loop > 20 then - return 0 - endi - - $loop = $loop + 1 - -goto begin - -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT -system sh/exec_up.sh -n dnode3 -s stop -x SIGINT -system sh/exec_up.sh -n dnode4 -s stop -x SIGINT -system sh/exec_up.sh -n dnode5 -s stop -x SIGINT -system sh/exec_up.sh -n dnode6 -s stop -x SIGINT -system sh/exec_up.sh -n dnode7 -s stop -x SIGINT -system sh/exec_up.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/table/testSuite.sim b/tests/script/unique/table/testSuite.sim deleted file mode 100644 index e0c0a1f2fabdaa079deb8b8db08db2095d1148fd..0000000000000000000000000000000000000000 --- a/tests/script/unique/table/testSuite.sim +++ /dev/null @@ -1 +0,0 @@ -run unique/table/delete_part.sim diff --git a/tests/script/unique/testSuite.sim b/tests/script/unique/testSuite.sim deleted file mode 100644 index 374f9b7965ae134bd1acf08449d53a3e918a9e7e..0000000000000000000000000000000000000000 --- a/tests/script/unique/testSuite.sim +++ /dev/null @@ -1,3 +0,0 @@ -################################# -run unique/mnode/testSuite.sim -################################## diff --git a/tests/script/unique/vnode/commit.sim b/tests/script/unique/vnode/commit.sim deleted file mode 100644 index 29c9f72335fc01f156045396452e57f03ca3b590..0000000000000000000000000000000000000000 --- a/tests/script/unique/vnode/commit.sim +++ /dev/null @@ -1,158 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode2 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numofMpeers -v 3 -system sh/cfg.sh -n dnode2 -c numofMpeers -v 3 -system sh/exec_up.sh -n dnode1 -s start - -sql connect -sql create dnode $hostname2 -system sh/exec_up.sh -n dnode2 -s start -sleep 3000 - -print =================== step 1 create db -sql create database c2db replica 2 days 10 keep 50 -sql use c2db -sql create table tb (ts timestamp, speed int) -sql insert into tb values(now, 0) - -print =================== step2 sleep 2000 and kill dnode2(SIGINT) -sleep 2000 -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT -sleep 1000 - -print =================== step3 insert into dnode1 - -$x = 1 -while $x < 100 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 240 -while $x < 400 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 480 -while $x < 700 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 720 -while $x < 809 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 960 -while $x < 1043 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 1200 -while $x < 1244 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 1440 -while $x < 1677 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -$x = 1680 -while $x < 1683 - $time = $x . m - sql insert into tb values (now + $time , $x ) - $x = $x + 1 -endw - -print =================== step4 -sql select count(*) from tb -print select count(*) from tb ==> $data00 (expect 936) -if $data00 != 936 then - return -1 -endi -sql select * from tb order by ts desc -print select * from tb ==> $data00 $data01 $rows -if $data01 != 1682 then - return -1 -endi -if $rows != 936 then - return -1 -endi - -print =================== step5 sleep kill dnode1(SIGINT) then start dnode1 -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -sleep 5000 -system sh/exec_up.sh -n dnode1 -s start - -sleep 3000 -print =================== step6 start dnode2 and sleep 10000 (wait sync complete) -system sh/exec_up.sh -n dnode2 -s start -sleep 12000 - -print =================== step7 -sql_error insert into tb values(now + 1000h, 100) -sql select count(*) from tb order by ts desc -print select count(*) from tb ==> $data00 (expect <= 936) -if $data00 != 936 then - return -1 -endi -$remainRows = $data00 - -sql select * from tb order by ts desc -print select * from tb ==> $data00 $data01 $data10 $data11 $rows - -if $data11 != 1681 then - return -1 -endi -if $rows != $remainRows then - return -1 -endi - -print =================== step8 kill dnode1(SIGINT) and query -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -sleep 2000 - -print =================== step9 -sql select count(*) from tb order by ts desc -print select count(*) from tb ==> $data00 (expect == $remainRows ) -if $data00 > $remainRows then - return -1 -endi -if $data00 <= 0 then - return -1 -endi - -$remainRows = $data00 -sql select * from tb order by ts desc -print select * from tb ==> $data00 $data01 $rows - -if $rows != $remainRows then - return -1 -endi - -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT -system sh/exec_up.sh -n dnode3 -s stop -x SIGINT -system sh/exec_up.sh -n dnode4 -s stop -x SIGINT -system sh/exec_up.sh -n dnode5 -s stop -x SIGINT -system sh/exec_up.sh -n dnode6 -s stop -x SIGINT -system sh/exec_up.sh -n dnode7 -s stop -x SIGINT -system sh/exec_up.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/vnode/many.sim b/tests/script/unique/vnode/many.sim index 0504207c2ed57ca6735a582c35740f68bc3df898..bb3e8813bd7666fa9fda2bb8495cf28a3648fe22 100644 --- a/tests/script/unique/vnode/many.sim +++ b/tests/script/unique/vnode/many.sim @@ -53,27 +53,27 @@ sql select count(*) from db4.tb4 $lastRows4 = $rows print ======== step2 -run_back cluster/vnode/back_insert_many.sim +run_back unique/vnode/back_insert_many.sim sleep 5000 print ======== step3 system sh/exec_up.sh -n dnode2 -s stop -sleep 10000 +sleep 5000 $x = 0 loop: print ======== step4 system sh/exec_up.sh -n dnode2 -s start -sleep 10000 +sleep 5000 system sh/exec_up.sh -n dnode3 -s stop -sleep 10000 +sleep 5000 print ======== step5 system sh/exec_up.sh -n dnode3 -s start -sleep 10000 +sleep 5000 system sh/exec_up.sh -n dnode2 -s stop -sleep 10000 +sleep 5000 print ======== step6 sql select count(*) from db1.tb1 @@ -108,7 +108,7 @@ print ======== step7 print ======== loop Times $x -if $x < 5 then +if $x < 2 then $x = $x + 1 goto loop endi diff --git a/tests/script/unique/vnode/replica2_basic.sim b/tests/script/unique/vnode/replica2_basic.sim deleted file mode 100644 index a0ea7085cb08922303dda014e3fd4322f6a71773..0000000000000000000000000000000000000000 --- a/tests/script/unique/vnode/replica2_basic.sim +++ /dev/null @@ -1,199 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode2 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numofMpeers -v 3 -system sh/cfg.sh -n dnode2 -c numofMpeers -v 3 -system sh/exec_up.sh -n dnode1 -s start - -sql connect -sql create dnode $hostname2 -system sh/exec_up.sh -n dnode2 -s start -sleep 3000 - -$N = 10 -$db = d1 -$table = table_r2 - -print =================== step 1 -sql create database $db replica 3 -sql use $db -sql create table $table (ts timestamp, speed int) -x error_create -return -1 -error_create: -sql drop database $db - -print =================== step 2 - -sql create database $db replica 2 -sql use $db - -sql create table $table (ts timestamp, speed int) -sleep 1000 - -print =================== step 3 -$x = 1 -$y = $x + $N -$expect = $N -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 4 -system sh/exec_up.sh -n dnode2 -s stop -sleep 2000 -$y = $x + $N -$expect = $N * 2 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 5 -system sh/exec_up.sh -n dnode2 -s start -sleep 2000 -$y = $x + $N -$expect = $N * 3 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 6 -system sh/exec_up.sh -n dnode1 -s stop -sleep 2000 -$y = $x + $N -$expect = $N * 4 -while $x < $y -$ms = $x . m -sql insert into $table values (now + $ms , $x ) -$x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then -return -1 -endi - -print =================== step 7 -system sh/exec_up.sh -n dnode1 -s start -sleep 2000 -$y = $x + $N -$expect = $N * 5 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 8 -system sh/ip.sh -i 1 -s down - -sleep 2000 -$y = $x + $N -$expect = $N * 6 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 9 - -sleep 2000 -$y = $x + $N -$expect = $N * 7 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 10 -system sh/ip.sh -i 2 -s down - -sleep 2000 -$y = $x + $N -$expect = $N * 8 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 11 - -sleep 2000 -$y = $x + $N -$expect = $N * 9 -while $x < $y - $ms = $x . m - sql insert into $table values (now + $ms , $x ) - $x = $x + 1 -endw - -sql select * from $table -print sql select * from $table -> $rows points -if $rows != $expect then - return -1 -endi - -print =================== step 12 - -system sh/exec_up.sh -n dnode1 -s stop -x SIGINT -system sh/exec_up.sh -n dnode2 -s stop -x SIGINT -system sh/exec_up.sh -n dnode3 -s stop -x SIGINT -system sh/exec_up.sh -n dnode4 -s stop -x SIGINT -system sh/exec_up.sh -n dnode5 -s stop -x SIGINT -system sh/exec_up.sh -n dnode6 -s stop -x SIGINT -system sh/exec_up.sh -n dnode7 -s stop -x SIGINT -system sh/exec_up.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/vnode/replica2_repeat.sim b/tests/script/unique/vnode/replica2_repeat.sim index f7da1babb73914d58661a180b5cc7b3ddc8681ba..a6bd226484cf10dcbc2bcd8d4687e69b0fed63a7 100644 --- a/tests/script/unique/vnode/replica2_repeat.sim +++ b/tests/script/unique/vnode/replica2_repeat.sim @@ -33,7 +33,7 @@ sql select count(*) from db.tb $lastRows = $rows print ======== step2 -run_back cluster/vnode/back_insert.sim +run_back unique/vnode/back_insert.sim sleep 3000 print ======== step3 @@ -66,7 +66,7 @@ print ======== step7 $lastRows = $data00 print ======== loop Times $x -if $x < 5 then +if $x < 2 then $x = $x + 1 goto loop endi diff --git a/tests/script/unique/vnode/replica3_repeat.sim b/tests/script/unique/vnode/replica3_repeat.sim index 8c3ed902fb1508f4ce0d714f1455ad5546826957..2f311a5d7a5fbba7eb754ac65b281947038fdaa0 100644 --- a/tests/script/unique/vnode/replica3_repeat.sim +++ b/tests/script/unique/vnode/replica3_repeat.sim @@ -36,7 +36,7 @@ sql select count(*) from db.tb $lastRows = $rows print ======== step2 -run_back cluster/vnode/back_insert.sim +run_back unique/vnode/back_insert.sim sleep 3000 print ======== step3 @@ -75,7 +75,7 @@ print ======== step8 $lastRows = $data00 print ======== loop Times $x -if $x < 5 then +if $x < 2 then $x = $x + 1 goto loop endi diff --git a/tests/script/unique/vnode/replica3_vgroup.sim b/tests/script/unique/vnode/replica3_vgroup.sim index f63bf1783d60f67c81a51a104f737033ccceb237..6315a4335cf47abe6f3b42ca7db4a2406fc90c5f 100644 --- a/tests/script/unique/vnode/replica3_vgroup.sim +++ b/tests/script/unique/vnode/replica3_vgroup.sim @@ -33,7 +33,7 @@ sleep 3001 $tbPre = m -$N = 280 +$N = 300 $x = 0 $y = $x + $N while $x < $y @@ -46,20 +46,20 @@ endw #print =================== step 2 -#$x = 1 -#$y = $x + $N -#$expect = $N -#while $x < $y -# $ms = $x . m -# sql insert into $table values (now + $ms , $x ) -# $x = $x + 1 -#endw +$x = -500 +$y = $x + $N +while $x < $y + $ms = $x . m + sql insert into $table values (now $ms , $x ) + $x = $x + 1 +endw -#sql select * from $table -#print sql select * from $table -> $rows points -#if $rows != $expect then -# return -1 -#endi +$expect = $N + 1 +sql select * from $table +print sql select * from $table -> $rows points expect $expect +if $rows != $expect then + return -1 +endi system sh/exec_up.sh -n dnode1 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/unique/vnode/testSuite.sim b/tests/script/unique/vnode/testSuite.sim index 46b01aaa45f2eef6fc37e523c724d6bab1244662..3a9db66beb77d29b7d6bcb68d070edd91c97e199 100644 --- a/tests/script/unique/vnode/testSuite.sim +++ b/tests/script/unique/vnode/testSuite.sim @@ -1,6 +1,4 @@ -run unique/vnode/commit.sim run unique/vnode/many.sim -run unique/vnode/replica2_basic.sim run unique/vnode/replica2_basic2.sim run unique/vnode/replica2_repeat.sim run unique/vnode/replica3_basic.sim